text stringlengths 38 1.54M |
|---|
"Desenvolvido por Flávia"
divida = int(input())
pagamento = int(input())
while divida > 0 :
print ("(antes) {}" .format(divida))
if divida <= pagamento:
divida = 0
else:
divida -= pagamento
print ("(depois) {}" .format(divida)) |
def rgb(t):
dict = {10: "A", 11: "B", 12: "C", 13: "D", 14: "E", 15: "F",}
hex = []
for i in range(3):
temp = []
for j in range(2):
if t[i] % 16 > 9:
temp.append(dict[t[i] % 16])
else:
temp.append(t[i] % 16)
t[i] = t[i] // 16
for i in range(2):
hex.append(str(temp.pop()))
return "#" + "".join(hex)
print(rgb([4, 15, 33]))
|
__author__ = "zhou"
__date__ = "2019-06-06 22:16"
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, String, Integer
db = SQLAlchemy()
class User(db.Model):
id = Column(Integer, primary_key=True, autoincrement=True)
user_name = Column(String(50), nullable=False, unique=True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-30 20:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_narration'),
]
operations = [
migrations.AlterModelOptions(
name='narration',
options={'verbose_name': '\u53d9\u8ff0', 'verbose_name_plural': '\u53d9\u8ff0'},
),
]
|
def solve():
st = 'くさこお{たふふたぬしいやむかひめぬかやしいさふしめひわやいかぬらかるめいらふむに}'
mapping = {
"お": "1",
"こ": "2",
"か": "a",
"く": "c",
"さ": "d",
"し": "e",
"た": "g",
"に": "k",
"ぬ": "l",
"ひ": "n",
"ふ": "o",
"む": "r",
"め": "s",
"や": "t",
"ら": "w",
"る": "y",
"わ": "'",
"い": "-",
"{": "{",
"}": "}",
}
flag = ''
for ch in st:
flag += mapping[ch]
return flag
if __name__ == "__main__":
print(solve())
|
from .utils import safe_script
from django.conf import settings
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
@safe_script
def run():
dbname = settings.DATABASES['default']['NAME']
user = settings.DATABASES['default']['USER']
password = settings.DATABASES['default']['PASSWORD']
host = settings.DATABASES['default']['HOST']
con = None
con = connect(dbname=dbname, user=user, host=host, password=password)
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
cur.execute('CREATE DATABASE paywei')
cur.close()
con.close()
|
# coding: utf8
from django import forms
from personalform.models import User
class UserForm(forms.Form):
username = forms.CharField(max_length = 30, required = True, label = '用户名')
password = forms.CharField(max_length = 25, required = True, label = '密码', widget = forms.PasswordInput)
checkpassword = forms.CharField(max_length = 25, required = True, label = '确认密码', widget = forms.PasswordInput)
email = forms.EmailField(required = True, label = '邮箱', widget = forms.EmailInput)
def clean_checkpassword(self):
checkpassword = self.cleaned_data['checkpassword']
password = self.cleaned_data['password']
if checkpassword != password:
raise forms.ValidationError("密码不一致,请确认后重新输入")
pass
return checkpassword
pass
def clean_username(self):
username = self.cleaned_data['username']
print username
print User.objects.filter(username = username).count()
if User.objects.filter(username = username).count() > 0:
raise forms.ValidationError("用户名已存在,请重新输入.")
return username
pass
class LoginForm(forms.Form):
# TODO: Define form fields here
username = forms.CharField(max_length = 30, required = True, label = '用户名')
password = forms.CharField(max_length = 25, required = True, label = '密码', widget = forms.PasswordInput)
def clean_password(self):
username = self.cleaned_data['username']
password = self.cleaned_data['password']
try:
user = User.objects.get(username = username)
pass
except User.DoesNotExist:
raise forms.ValidationError('不存在该用户,请确认用户名后再重新输入')
if password != User.objects.get(username = username).password:
raise forms.ValidationError('密码错误,请确认后重新输入')
pass
return password
pass
pass
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
|
import os, pdb, sys, logging, threading, time, yaml
import unittest as test
from nistoar.testing import *
from nistoar.pdr.preserv.service import service as serv
from nistoar.pdr.preserv.service import status
from nistoar.pdr.preserv.service.siphandler import SIPHandler, MIDASSIPHandler
from nistoar.pdr.exceptions import PDRException, StateException
from nistoar.pdr import config
# datadir = nistoar/preserv/data
datadir = os.path.join( os.path.dirname(os.path.dirname(__file__)), "data" )
loghdlr = None
rootlog = None
def setUpModule():
global loghdlr
global rootlog
ensure_tmpdir()
rootlog = logging.getLogger()
loghdlr = logging.FileHandler(os.path.join(tmpdir(),"test_pressvc.log"))
loghdlr.setLevel(logging.INFO)
rootlog.addHandler(loghdlr)
config._log_handler = loghdlr
config.global_logdir = tmpdir()
config.global_logfile = os.path.join(tmpdir(),"test_pressvc.log")
def tearDownModule():
global loghdlr
if loghdlr:
if rootlog:
rootlog.removeHandler(loghdlr)
loghdlr = None
rmtmpdir()
class TestThreadedPreservationService(test.TestCase):
sipdata = os.path.join(datadir, "midassip", "review", "1491")
midasid = '3A1EE2F169DD3B8CE0531A570681DB5D1491'
def setUp(self):
self.tf = Tempfiles()
self.narch = self.tf.mkdir("notify")
self.troot = self.tf.mkdir("siphandler")
self.revdir = os.path.join(self.troot, "review")
os.mkdir(self.revdir)
self.workdir = os.path.join(self.troot, "working")
os.mkdir(self.workdir)
self.stagedir = os.path.join(self.troot, "staging")
# os.mkdir(self.stagedir)
self.mdserv = os.path.join(self.troot, "mdserv")
os.mkdir(self.mdserv)
self.store = os.path.join(self.troot, "store")
os.mkdir(self.store)
self.statusdir = os.path.join(self.troot, "status")
os.mkdir(self.statusdir)
shutil.copytree(self.sipdata, os.path.join(self.revdir, "1491"))
with open(os.path.join(datadir, "bagger_conf.yml")) as fd:
baggercfg = yaml.safe_load(fd)
self.config = {
"working_dir": self.workdir,
"store_dir": self.store,
"id_registry_dir": self.workdir,
"sip_type": {
"midas": {
"common": {
"review_dir": self.revdir,
"id_minter": { "shoulder_for_edi": "edi0" },
},
"mdserv": {
"working_dir": self.mdserv
},
"preserv": {
"bagparent_dir": "_preserv",
"staging_dir": self.stagedir,
"bagger": baggercfg,
"status_manager": { "cachedir": self.statusdir },
}
}
},
"notifier": {
"channels": [
{
"name": "arch",
"type": "archive",
"dir": self.narch
}
],
"targets": [
{
"name": "archive",
"type": "archive",
"channel": "arch"
}
],
"alerts": [
{
"type": "preserve.failure",
"targets": [ "archive" ]
},
{
"type": "preserve.success",
"targets": [ "archive" ]
}
]
}
}
try:
self.svc = serv.ThreadedPreservationService(self.config)
except Exception, e:
self.tearDown()
raise
def tearDown(self):
self.svc = None
self.tf.clean()
def test_ctor(self):
self.assertTrue(self.svc)
self.assertTrue(os.path.exists(self.workdir))
self.assertTrue(os.path.exists(self.store))
self.assertEqual(self.svc.siptypes, ['midas'])
self.assertIsNotNone(self.svc._notifier)
def test_make_handler(self):
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertTrue(hndlr.bagger)
self.assertTrue(isinstance(hndlr, SIPHandler),
"hndlr is not an SIPHandler")
self.assertTrue(isinstance(hndlr, MIDASSIPHandler),
"hndlr wrong type for 'midas': "+str(type(hndlr)))
self.assertIsNotNone(hndlr.notifier)
self.assertEqual(hndlr.cfg['working_dir'], self.workdir)
self.assertEqual(hndlr.cfg['store_dir'], self.store)
self.assertEqual(hndlr.cfg['id_registry_dir'], self.workdir)
self.assertEqual(hndlr.cfg['review_dir'], self.revdir)
self.assertEqual(hndlr.cfg['id_minter']['shoulder_for_edi'], 'edi0')
self.assertEqual(hndlr.cfg['bagparent_dir'], '_preserv')
self.assertEqual(hndlr.cfg['metadata_bags_dir'], self.mdserv)
self.assertEqual(hndlr.cfg['bagger']['relative_to_indir'], True)
self.assertEqual(hndlr.cfg['status_manager']['cachedir'], self.statusdir)
self.assertTrue(os.path.exists(self.workdir))
self.assertTrue(os.path.exists(self.stagedir))
self.assertTrue(os.path.exists(self.mdserv))
self.assertTrue(isinstance(hndlr.status, dict))
self.assertEqual(hndlr.state, status.FORGOTTEN)
def test_make_handler_badtype(self):
with self.assertRaises(PDRException):
hndlr = self.svc._make_handler(self.midasid, 'goob')
def test_launch_sync(self):
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertEqual(hndlr.state, status.FORGOTTEN)
(stat, thrd) = self.svc._launch_handler(hndlr, 5)
self.assertEqual(stat['state'], status.SUCCESSFUL)
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip")))
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip.sha256")))
self.assertEqual(hndlr.state, status.SUCCESSFUL)
def test_launch_async(self):
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertEqual(hndlr.state, status.FORGOTTEN)
self.assertTrue(hndlr.isready())
self.assertEqual(hndlr.state, status.READY)
self.assertFalse(os.path.exists(hndlr._status._cachefile))
(stat, thrd) = self.svc._launch_handler(hndlr, 0)
try:
self.assertNotEqual(stat['state'], status.SUCCESSFUL)
finally:
thrd.join()
self.assertEqual(hndlr.state, status.SUCCESSFUL)
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip")))
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip.sha256")))
def test_preserve(self):
self.assertFalse(os.path.exists(os.path.join(self.narch,"archive.txt")))
try:
stat = self.svc.preserve(self.midasid, 'midas', 2)
self.assertEqual(stat['state'], status.SUCCESSFUL)
finally:
for t in threading.enumerate():
if t.name == self.midasid:
t.join()
self.assertTrue(os.path.exists(os.path.join(self.narch,"archive.txt")))
def test_preserve_noupdate(self):
try:
stat = self.svc.preserve(self.midasid, 'midas', 2)
self.assertEqual(stat['state'], status.SUCCESSFUL)
finally:
for t in threading.enumerate():
if t.name == self.midasid:
t.join()
try:
with self.assertRaises(serv.RerequestException):
stat = self.svc.preserve(self.midasid, 'midas', 2)
finally:
for t in threading.enumerate():
if t.name == self.midasid:
t.join()
def test_preserve_inprog(self):
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertEqual(hndlr.state, status.FORGOTTEN)
hndlr.set_state(status.IN_PROGRESS)
try:
with self.assertRaises(serv.RerequestException):
stat = self.svc.preserve(self.midasid, 'midas', 2)
finally:
for t in threading.enumerate():
if t.name == self.midasid:
t.join()
def test_status2(self):
stat = self.svc.status("FFFFFFFFFF", "midas")
self.assertEqual(stat['state'], status.NOT_FOUND)
self.assertTrue(not os.path.exists(os.path.join(self.statusdir,
"FFFFFFFFFF.json")))
os.mkdir(os.path.join(self.revdir, "FFFFFFFFFF"))
stat = self.svc.status("FFFFFFFFFF")
self.assertEqual(stat['state'], status.FAILED)
self.assertTrue(not os.path.exists(os.path.join(self.statusdir,
"FFFFFFFFFF.json")))
def test_status(self):
stat = self.svc.status(self.midasid, "midas")
self.assertEqual(stat['state'], status.READY)
self.assertTrue(not os.path.exists(os.path.join(self.statusdir,
self.midasid+".json")))
hndlr = self.svc._make_handler(self.midasid, 'midas')
hndlr.set_state(status.IN_PROGRESS)
stat = self.svc.status(self.midasid, "midas")
self.assertEqual(stat['state'], status.IN_PROGRESS)
hndlr._status.reset()
stat = self.svc.status(self.midasid, "midas")
self.assertEqual(stat['state'], status.PENDING)
with self.assertRaises(serv.RerequestException):
self.svc.preserve(self.midasid, 'midas', 2)
hndlr.set_state(status.READY)
try:
self.svc.preserve(self.midasid, 'midas', 2)
finally:
for t in threading.enumerate():
if t.name == self.midasid:
t.join()
stat = self.svc.status(self.midasid, "midas")
self.assertEqual(stat['state'], status.SUCCESSFUL)
# if there is no longer a cached status file, ensure that we notice
# when there is bag in the store dir
os.remove(os.path.join(self.statusdir, self.midasid+'.json'))
stat = self.svc.status(self.midasid, "midas")
self.assertEqual(stat['state'], status.SUCCESSFUL)
self.assertIn('orgotten', stat['message'])
def test_status_badtype(self):
stat = self.svc.status(self.midasid, 'goob')
self.assertEqual(stat['state'], status.FAILED)
def test_requests(self):
reqs = self.svc.requests()
self.assertEqual(len(reqs), 0)
hndlr = self.svc._make_handler(self.midasid, 'midas')
hndlr.set_state(status.IN_PROGRESS)
reqs = self.svc.requests()
self.assertIn(self.midasid, reqs)
self.assertEqual(reqs[self.midasid], 'midas')
self.assertEqual(len(reqs), 1)
reqs = self.svc.requests('goob')
self.assertEqual(len(reqs), 0)
reqs = self.svc.requests('midas')
self.assertIn(self.midasid, reqs)
self.assertEqual(reqs[self.midasid], 'midas')
self.assertEqual(len(reqs), 1)
class TestMultiprocPreservationService(test.TestCase):
sipdata = os.path.join(datadir, "midassip", "review", "1491")
midasid = '3A1EE2F169DD3B8CE0531A570681DB5D1491'
def setUp(self):
self.tf = Tempfiles()
self.troot = self.tf.mkdir("siphandler")
self.revdir = os.path.join(self.troot, "review")
os.mkdir(self.revdir)
self.workdir = os.path.join(self.troot, "working")
os.mkdir(self.workdir)
self.stagedir = os.path.join(self.troot, "staging")
# os.mkdir(self.stagedir)
self.mdserv = os.path.join(self.troot, "mdserv")
os.mkdir(self.mdserv)
self.store = os.path.join(self.troot, "store")
os.mkdir(self.store)
self.statusdir = os.path.join(self.troot, "status")
os.mkdir(self.statusdir)
shutil.copytree(self.sipdata, os.path.join(self.revdir, "1491"))
with open(os.path.join(datadir, "bagger_conf.yml")) as fd:
baggercfg = yaml.safe_load(fd)
self.config = {
"working_dir": self.workdir,
"store_dir": self.store,
"logdir": self.troot,
"id_registry_dir": self.workdir,
"announce_subproc": False,
"sip_type": {
"midas": {
"common": {
"review_dir": self.revdir,
"id_minter": { "shoulder_for_edi": "edi0" },
},
"mdserv": {
"working_dir": self.mdserv
},
"preserv": {
"bagparent_dir": "_preserv",
"staging_dir": self.stagedir,
"bagger": baggercfg,
"status_manager": { "cachedir": self.statusdir }
}
}
}
}
try:
self.svc = serv.MultiprocPreservationService(self.config)
except Exception as e:
self.tearDown()
raise
def tearDown(self):
self.svc = None
self.tf.clean()
def test_ctor(self):
self.assertTrue(self.svc)
self.assertTrue(os.path.exists(self.workdir))
self.assertTrue(os.path.exists(self.store))
self.assertEqual(self.svc.siptypes, ['midas'])
def test_launch_sync(self):
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertEqual(hndlr.state, status.FORGOTTEN)
self.assertTrue(hndlr.isready())
self.assertEqual(hndlr.state, status.READY)
proc = None
(stat, proc) = self.svc._launch_handler(hndlr, 10, True)
self.assertIsNone(proc)
self.assertEqual(stat['state'], status.SUCCESSFUL)
self.assertEqual(hndlr.state, status.SUCCESSFUL)
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip")))
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip.sha256")))
def test_launch_async(self):
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertEqual(hndlr.state, status.FORGOTTEN)
self.assertTrue(hndlr.isready())
self.assertEqual(hndlr.state, status.READY)
proc = None
(stat, proc) = self.svc._launch_handler(hndlr, 10)
self.assertNotEqual(stat, status.FAILED,
"Unexpected handler failure: "+hndlr.status['message'])
self.assertIsNotNone(proc)
proc.join()
self.assertFalse(proc.is_alive())
self.assertEqual(stat['state'], status.SUCCESSFUL,
"Unsuccessful state: %s: %s" % (stat['state'], stat['message']))
self.assertEqual(hndlr.state, status.SUCCESSFUL)
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip")))
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip.sha256")))
def test_subprocess_handle(self):
try:
serv._subprocess_handle(self.svc.cfg, "SUBDIR/pres.log", self.midasid, "MIDAS-SIP", False, 5)
hndlr = self.svc._make_handler(self.midasid, 'midas')
self.assertEqual(hndlr.state, status.SUCCESSFUL)
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip")))
self.assertTrue(os.path.exists(os.path.join(self.store,
self.midasid+".1_0_0.mbag0_4-0.zip.sha256")))
plog = os.path.join(self.troot, "preservation.log")
self.assertTrue(os.path.isfile(plog), "Missing preservation logfile: "+plog)
finally:
rootlogger = logging.getLogger()
rootlogger.removeHandler(config._log_handler)
setUpModule()
if __name__ == '__main__':
test.main()
|
import hashlib
import os
import string
import random
import math
import requests
from django.shortcuts import render
from django.urls import reverse
from .models import User
from .forms import LoginForm, SignupForm
from django.http import HttpResponseRedirect, HttpResponse, StreamingHttpResponse
from datetime import timedelta
from django.utils import timezone
from .models import User, File
from django.shortcuts import get_list_or_404, get_object_or_404
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
def signup(request):
"""
注册页面
"""
error_msg = ''
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
if User.objects.filter(username=username):
error_msg = 'user %s exists' % username
return render(request, 'app/signup.html',
context={'error_msg':error_msg})
else:
User.objects.create(username = username, password=password)
login_url = reverse('app:login')
return HttpResponseRedirect(redirect_to=login_url)
else:
form = SignupForm()
context = {'form':form, 'error_msg':error_msg,
'title':'Sign up', 'submit_value': 'Sign up'}
return render(request, 'app/signup.html', context=context)
def gen_sid(text):
"""
生成session的sha1码
"""
return hashlib.sha1(text.encode()).hexdigest()
def login(request):
"""
登录窗口,判断用户是否存在
"""
if request.method == 'POST':
# import pdb
# pdb.set_trace()
form = LoginForm(request.POST)
if form.is_valid():
#检测验证码
posted_captcha = form.cleaned_data['captcha']
saved_captcha = request.session.get('captcha')
if saved_captcha is None:
return HttpRseponse('验证码错误')
#对比验证码
if posted_captcha.lower() == saved_captcha:
username = form.cleaned_data['username']
password = form.cleaned_data['password']
if User.objects.filter(username=username, password=password):
#登录成功之后,设置session
delta = timedelta(days=1)
exprie_time = timezone.now() + delta
session_data = exprie_time.strftime('%s')
session_id = gen_sid('%s:%s' % (username, session_data))
request.session[session_id] = session_data
index_url = reverse('app:index')
response = HttpResponseRedirect(redirect_to=index_url)
response.set_cookie('sid', session_id,
int(delta.total_seconds()))
response.set_cookie('name', username)
return response
else:
form.add_error('captcha','验证码不匹配')
else:
form = LoginForm()
context = {'form': form,'title': 'login', 'submit_value':'Login'}
return render(request, "app/login.html", context=context)
def make_image(char):
'''
验证码的基本设置信息
'''
im_size = (70, 40)
font_size = 28
bg = (0, 0, 0)
offset = (1, 1)
im = Image.new('RGB', size=im_size, color=bg)
font = ImageFont.truetype('app/ubuntu.ttf', font_size)
draw = ImageDraw.ImageDraw(im)
draw.text(offset, char, fill='yellow', font=font)
im = im.transform(im_size, Image.AFFINE, (1, -0.3, 0, -0.1, 1, 0), Image.BILINEAR)
return im
def captcha(request):
'''
渲染验证码页面
'''
text = gentext(4)
request.session['captcha'] = text.lower()#把验证码内容,加到session中用于判断
im = make_image(text)
imgout = BytesIO()
im.save(imgout, format='png')
img_bytes = imgout.getvalue()
return HttpResponse(img_bytes, content_type='image/png')
def gentext(n):
'''
随机生成4个字母
'''
chars = string.ascii_letters
return ''.join([random.choice(chars) for i in range(n)])
def is_login(request):
'''
判断是否够登录
'''
sid = request.COOKIES.get('sid', None)
if sid is None:
return False
try:
expire_second = int(request.session[sid])
except KeyError:
return False
#查看session过期时间是否已经过了
current_second = int(timezone.now().strftime('%s'))
if expire_second < current_second:
return False
return True
def page(request, page):
"""
分页
"""
files = File.objects.all().order_by('filename')
name = request.COOKIES.get('name', None)
pages = math.ceil(len(files) / 5)
if int(page) < 1 or int(page) > pages:
context = {'error_msg':'找不到页面'}
return render(request, 'app/page.html', context)
else:
fis_page = True
las_page = True
if int(page) == 1:
fis_page = False
if int(page) == pages:
las_page = False
context = {'data':files[(int(page)-1)*5:(int(page)*5)],
'tiele':'page','name':name, 'upper':int(page)-1,
'lower':int(page)+1, 'fis_page':fis_page,'las_page':las_page}
return render(request, 'app/page.html', context=context)
# if len(files) >= 5:
# if (len(files) % 5) == 0:
# z_page = len(files) // 5
# else:
# z_page = (len(files) // 5) + 1
# if 1 <= int(page) <= z_page:
# context = {'data':files[(int(page)-1)*5:(int(page)*5)],
# 'tiele':'page','name':name}
# return render(request, 'app/page.html', context=context)
# else:
# context = {'error_msg':'找不到页面'}
# return render(request, 'app/page.html', context=context)
# else:
# context = {'data':files[:], 'title':'page', 'name':name}
# return render(request, 'app/page.html', context=context)
def index(request):
"""
检测用户是否已经登录
"""
login_is = is_login(request)
if login_is:
name = request.COOKIES.get('name', None)
files = get_list_or_404(File)
context={'data':files[0:5],'title':'index','login_is':login_is,
'name':name}
return render(request,'app/index.html',context=context)
else:
files = get_list_or_404(File)
return render(request, 'app/index.html',
context={'data':files, 'login_is':login_is})
def detail(request, pk):
"""
上传文件详情
"""
login_is = is_login(request)
if login_is:
name = request.COOKIES.get('name', None)
file = get_object_or_404(File, pk=pk)
context = {'data':file,'title':'detail','name':name,'login_is':login_is}
return render(request, 'app/detail.html',context=context)
else:
file = get_object_or_404(File, pk=pk)
context={'data':file,'title':'detail'}
return render(request, 'app/detail.html', context=context)
def logout(request):
'''
推出登录,删除session, 也可以把cookie也删除
'''
sid = request.COOKIES.get('sid', None)
if sid is not None:
del request.session[sid]
return HttpResponseRedirect(redirect_to=reverse('app:index'))
def file_md5(path):
"""
生成md5码
"""
m = hashlib.md5()
text = open(path, 'rb').read()
m.update(text)
return m.hexdigest()
def upload(request):
"""
文件上传
:param request:
:return:
"""
login_is = is_login(request)
if login_is:
if request.method == 'POST':
myFile = request.FILES.get("myfile", None)
if myFile is None:
return render(request, 'app/index.html',
context={'erorr':'文件不存在'})
"""
/tmp/files/ 设置文件上传的路径 wind的系统估计和linux不一样,具体设
置
"""
files = open(os.path.join("/tmp/files/", myFile.name), 'wb+')
for chunk in myFile.chunks(chunk_size=1024):
files.write(chunk)
files.close()
filepath = "/tmp/files/%s" % myFile.name
is_status = os.path.getsize(filepath)
if is_status == myFile.size:
status = 1
else:
status = 0
md5 = file_md5(filepath)
username=User.objects.get(username=request.COOKIES.get('name'))
File.objects.create(filename=myFile.name, owner=username,
size=myFile.size, status=status, path=filepath,
md5=md5)
#file_status = '上传完成'
return HttpResponseRedirect(redirect_to=reverse('app:index'))
else:
return HttpResponseRedirect(redirect_to=reverse('app:login'))
def download(request, pk):
login_is = is_login(request)
if login_is:
file = get_object_or_404(File, pk=pk)
filepath = file.path
filename = file.filename
new_file = open(filepath, 'rb')
# def file_download(filepath, chunk_size=1024):
# with open(filepath, 'rb') as f:
# while True:
# c = f.read(chunk_size)
# if c:
# yield c
# else:
# break
# new_file_name = filename
response = StreamingHttpResponse(new_file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(filename)
return response
else:
return HttpResponseRedirect(redirect_to=reverse('app:login'))
|
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.core.validators import RegexValidator, MinValueValidator, MaxValueValidator
from django.db import models
# Create your models here.
class User(AbstractUser):
username = models.CharField(max_length=200, blank=False, unique=True)
first_name = models.CharField(max_length=200, blank=False)
last_name = models.CharField(max_length=200, blank=False)
password = models.CharField(max_length=200)
email = models.EmailField(max_length=250, unique=True)
company_name = models.CharField(max_length=200, blank=False)
office_address = models.CharField(max_length=200, blank=False)
office_telephone = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return self.username
class Meta:
ordering = ("-date_joined",)
class Client(models.Model):
full_name = models.CharField(max_length=200, blank=False)
company_name = models.CharField(max_length=200, blank=False)
telephone = models.CharField(max_length=200, blank=True, null=True)
about = models.TextField(null=True, blank=True)
address = models.CharField(max_length=200, blank=False)
city = models.CharField(max_length=200, blank=False)
state = models.CharField(max_length=200, blank=False)
country = models.CharField(max_length=200, blank=False)
zipcode = models.CharField(max_length=200, blank=False)
date_created = models.DateField(auto_now_add=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.full_name
class Meta:
ordering = ("-date_created",)
class Invoice(models.Model):
PAYMENT_TERM = (
("End of Month", "End of Month"),
("End of Quater", "End of Quater"),
("End of Year", "End of Year"),
)
client = models.ForeignKey(Client, on_delete=models.CASCADE)
invoice_id = models.CharField(max_length=10, blank=False, unique=True)
date_created = models.DateField(auto_now_add=True)
due_date = models.DateField()
amount = models.CharField(max_length=100, null=True, blank=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='invoice', null=True)
payment_term = models.CharField(max_length=200, choices=PAYMENT_TERM, default="End of Month")
shipping_address = models.TextField(blank=False)
vat = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)], blank=True)
dispatch_personnel = models.CharField(max_length=200, blank=False)
def __str__(self):
return self.invoice_id
class Meta:
ordering = ("-date_created",)
|
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
print(dataset.describe())
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
#Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
labelencoder_X = LabelEncoder()
X[:,3] = labelencoder_X.fit_transform(X[:,3])
ct = ColumnTransformer([("State", OneHotEncoder(), [3])], remainder = 'passthrough')
X = ct.fit_transform(X)
print(X)
# Avoiding the Dummy Variable Trap
X = X[:, 1:]
print("X",X)
print(type(y),y)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print(type(X_train),type(y_train[0]))
from sklearn.linear_model import LinearRegression
lr=LinearRegression()
lr.fit(X_train,y_train)
# predicting the test model
y_pred=lr.predict(X_test)
#testing the model
from sklearn import metrics as mt
print("MAE=",mt.mean_absolute_error(y_test,y_pred))
print("MSE=",mt.mean_squared_error(y_test,y_pred))
print("Root Mean squared error ",np.sqrt(mt.mean_squared_error(y_test,y_pred)))
print("variance regression score function",mt.explained_variance_score(y_test,y_pred))
#More near to one is better
print("Maximum resudual error =",mt.max_error(y_test,y_pred))
#Building the optimal model using back ward elimination
# import statsmodels.formula.api as sm
X=np.append(arr=np.ones((50,1)).astype(int),values=X,axis=1)
# X_opt=X[:,[0,1,2,3,4,5]]
#
# regressor_OLS=sm.ols(endog=y,exog=X_opt).fit()
# print(regressor_OLS.summray())
#
import statsmodels.formula.api as sm
def backwardElimination(x, SL):
numVars = len(x[0])
temp = np.zeros((50, 6)).astype(int)
for i in range(0, numVars):
regressor_OLS = sm.ols(y, x).fit()
maxVar = max(regressor_OLS.pvalues).astype(float)
adjR_before = regressor_OLS.rsquared_adj.astype(float)
if maxVar > SL:
for j in range(0, numVars - i):
if (regressor_OLS.pvalues[j].astype(float) == maxVar):
temp[:, j] = x[:, j]
x = np.delete(x, j, 1)
tmp_regressor = sm.ols(y, x).fit()
adjR_after = tmp_regressor.rsquared_adj.astype(float)
if (adjR_before >= adjR_after):
x_rollback = np.hstack((x, temp[:, [0, j]]))
x_rollback = np.delete(x_rollback, j, 1)
print(regressor_OLS.summary())
return x_rollback
else:
continue
# regressor_OLS.summary()
return x
SL = 0.05
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
X_Modeled = backwardElimination(X_opt, SL)
|
from __future__ import with_statement
from __future__ import division
from __future__ import absolute_import
from collections import deque
import os
import csv
import time
import operator
import math
from io import open
# convertir a coordenadas
def xconverter(tamano, cuadros, x):
paso = int(tamano / cuadros) + 1
x = paso * x
return int(x)
def yconverter(tamano, cuadros, y):
paso = int(tamano / cuadros) + 1
y = tamano - paso * y
return int(y)
def astar(mapa,resolution,a,b,c,d,angle):
# radio de los circulos
radio = 10
# tiempo de pausa
tiempo = 0.005
# window setup
cuadriculade = 10
winsize = 700
winsize = winsize + cuadriculade - 1
if mapa == 1:
mapa = u'map01.csv'
elif mapa == 2:
mapa = u'map02.csv'
elif mapa == 3:
mapa = u'map03.csv'
elif mapa == 0:
mapa = u'map00.csv'
# lista de coordenadas donde se encuentran obstculos
obs = []
filepath = os.getcwd()
# generacin visual de obstculos
with open(filepath + "/" + mapa, u'r') as file:
reader = csv.reader(file)
for row in reader:
ox, oy, w, h = float(row[0]), float(row[1]), float(row[2]), float(row[3])
xo = xconverter(winsize, cuadriculade, ox)
yo = 700 + cuadriculade - 1 - xconverter(winsize, cuadriculade, oy)
for xi in xrange(0, int(h * resolution + 1)):
for xa in xrange(0, int(w * resolution + 1)):
obs.append((ox * resolution + xa, oy * resolution + xi))
if (obs[0] == (0.0, 0.0) and len(obs) == 1):
obs[:] = []
# asking for information
xi = a
yi = b
xf = c
yf = d
# Forward search
Q = deque([])
Q.append((xi * resolution, yi * resolution))
visited = []
visited.append((xi * resolution, yi * resolution))
acciones = [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]
found = False
while (len(Q) > 0):
ct = Q.popleft()
if found == True: # ct[0] == xf*resolution and ct[1]*resolution == yf:
break
for accion in acciones:
nueva = (ct[0] + accion[0], ct[1] + accion[1])
if (((nueva[0] <= 10 * resolution) and (nueva[0] >= 0 * resolution) and (nueva[1] <= 10 * resolution)) and ((
nueva[1] >= 0 * resolution))) and nueva not in visited and nueva not in obs:
visited.append(nueva)
Q.append(nueva)
time.sleep(tiempo)
if nueva[0] == xf * resolution and nueva[1] == yf * resolution:
found = True
break
#esta seccion permite tratar con situaciones en donde xi=xf y yi=yf
izqui=[]
dere=[]
arriba=[]
abajo=[]
if xi==xf:
for x in visited:
if x[0]<xi*resolution:
izqui.append(x)
else:
dere.append(x)
if len(dere)>len(izqui):
visited=dere
else:
visited=izqui
if yi==yf:
for y in visited:
if y[1]<yi*resolution:
abajo.append(y)
else:
arriba.append(y)
if len(arriba)>len(abajo):
visited=arriba
else:
visited=abajo
######################################################################################3
# A* implementation
dicti = {}
path = []
path.append((xi * resolution, yi * resolution))
visited.append((xf * resolution, yf * resolution)) # * resolution
for x in visited:
x = path[-1]
for y in acciones:
new = (x[0] + y[0], x[1] + y[1])
old_price = ((xf * resolution - path[-1][0]) ** 2 + (yf * resolution - path[-1][1]) ** 2) ** (1 / 2)
pasado = ((xi * resolution - new[0]) ** 2 + (yi * resolution - new[1]) ** 2) ** (1 / 2)
new_price = ((xf * resolution - new[0]) ** 2 + (yf * resolution - new[1]) ** 2) ** (1 / 2)
if xi==xf:
if(new[1]!=yf*resolution):#si no se esta a la misma altura
if new in visited and new not in path and new[1]!=yi*resolution and abs(yf*resolution-new[1])<=abs(yf*resolution-path[-1][1]): #and new[1]>=path[-1][1]:
dicti[new] = new_price
else:
dicti[new] = 10000000000000
else:#si no se esta a la misma distancia horizontal
if new in visited and new not in path and new[1]!=yi*resolution and abs(xf*resolution-new[0])<=abs(xf*resolution-path[-1][0]): #and new[1]>=path[-1][1]:
dicti[new] = new_price
else:
dicti[new] = 10000000000000
elif yi==yf:
if(new[0]!=xf*resolution):#si no se esta a la misma distancia horizontal
if new in visited and new not in path and new[0]!=xi*resolution and abs(xf*resolution-new[0])<=abs(xf*resolution-path[-1][0]):
dicti[new] = new_price
else:
dicti[new] = 10000000000000
else:#si no se esta a la misma altura
if new in visited and new not in path and new[0]!=xi*resolution and abs(yf*resolution-new[1])<=abs(yf*resolution-path[-1][1]):
dicti[new] = new_price
else:
dicti[new] = 10000000000000
else:
if new in visited and new not in path:
dicti[new] = new_price
else:
dicti[new] = 10000000000000
t = min(dicti.items(), key=operator.itemgetter(1))[0]
# se anade a la lista de los buenos
path.append((t[0], t[1]))
if t == (xf * resolution, yf * resolution):
break
# se limpia el diccionario
dicti.clear()
# Para implementacin en el robot
angulos = {
(0, 0): angle,
(0, 1): 90,
(1, 1): 45,
(1, 0): 0,
(1, -1): -45,
(0, -1): -90,
(-1, -1): -135,
(-1, 0): 180,
(-1, 1): 135
}
acciones = []
mov = []
for x in xrange(0, len(path) - 1):
mov.append((path[x + 1][0] - path[x][0], path[x + 1][1] - path[x][1]))
mov.insert(0, (0, 0))
for x in xrange(0, len(mov) - 1):
if (abs(mov[x + 1][0]) + abs(mov[x + 1][1])) == 2:
acciones.append((angulos[mov[x + 1]] - angulos[mov[x]], (((1 / resolution) ** 2) * 2) ** (1 / 2)))
else:
acciones.append((angulos[mov[x + 1]] - angulos[mov[x]], 1 / resolution))
###################Movimientos fluidos##############################
accionesfluidas = []
x = 0
while (x < len(acciones) - 1):
rota = acciones[x][0]
trasla = acciones[x][1]
for w in xrange(x + 1, len(acciones)):
if acciones[x][1] == acciones[w][1] and acciones[w][0] == 0:
trasla = trasla + acciones[w][1]
x = w
else:
break
accionesfluidas.append((rota, trasla))
x = x + 1
##################################################################
costofinal = 0
for x in mov:
if x == (1, 1) or x == (-1, -1) or x == (-1, 1) or x == (1, -1):
costofinal = costofinal + (((1 / resolution) ** 2) * 2) ** (1 / 2)
else:
costofinal = costofinal + (1 / resolution)
info=path,mov,acciones
# print(mov)
#print(path)
# print(acciones)
# print(visited)
return(info)
#print(accionesfluidas)
#print('El cossto final del recorrido es: ', costofinal)
|
# http://codeforces.com/contest/1553/problem/D
def main():
s = list(input().rstrip())
t = list(input().rstrip())
n = len(s)
m = len(t)
if n<m: return False
p = (n-m)%2
q, k = 0, 0
for i in range(p, n):
if k==1:
k = 0
continue
if q<m and s[i]==t[q]:
q += 1
else:
k = 1
return q==m
q = int(input())
for i in range(q):
if main():
print("YES")
else:
print("NO")
# TLE
# from queue import Queue
# def main():
# s = list(input().rstrip())
# t = list(input().rstrip())
# # print(s)
# # print(t)
# ans = "NO"
# if len(s)<len(t):
# print(ans)
# return
# q = Queue()
# q.put([len(s)-1, len(t)-1])
# while not q.empty():
# si, ti = q.get()
# # print(si, ti)
# if ti<0:
# ans = "YES"
# break
# if si<ti: continue
# if s[si]==t[ti]:
# q.put([si-1, ti-1])
# if si>=2:
# q.put([si-2, ti])
# print(ans)
# return
# q = int(input())
# for i in range(q):
# main()
# Memory limit exceeded
# import sys
# sys.setrecursionlimit(10 ** 7)
# def main():
# s = list(input().rstrip())
# t = list(input().rstrip())
# # print(s)
# # print(t)
# ans = ["NO"]
# if len(s)<len(t):
# print(ans[0])
# return
# def dfs(si, ti):
# print(si, ti, ans[0])
# if ans[0]=="YES": return
# if ti<0:
# ans[0] = "YES"
# # print(si, ti, ans[0])
# return
# if si<ti: return
# if s[si]==t[ti]:
# dfs(si-1, ti-1)
# dfs(si-2, ti)
# return
# dfs(len(s)-1, len(t)-1)
# print(ans[0])
# return
# q = int(input())
# for i in range(q):
# main()
# WA
# def main():
# s = list(input().rstrip())
# t = list(input().rstrip())
# # print(s)
# # print(t)
# ans = "NO"
# if len(s)<len(t):
# print(ans)
# return
# for i in range(len(s)-1, -1, -1):
# if s[i]!=t[-1]: continue
# si = i
# for ti in range(len(t)-1, -1, -1):
# while si>=0 and s[si]!=t[ti]:
# si -= 2
# si -= 1
# if si<ti: break
# else:
# ans = "YES"
# break
# print(ans)
# return
# q = int(input())
# for i in range(q):
# main() |
from wtforms import StringField, validators, SubmitField
from flask_wtf import FlaskForm
class ImageLinkForm(FlaskForm):
link = StringField('ImageLink')
submit = SubmitField('Submit')
|
"""Test model."""
import argparse
from torchvision import transforms
from torch.utils.data import DataLoader
from model.model import Generator, Discriminator
from util.datasets import CelebAHQDataset
from util.custom_transforms import Normalize, CenterSquareMask, \
ScaleNRotate, ToTensor
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',
default= '/home/whikwon/Documents/github/download-celebA-HQ/tfrecord/', # noqa E501
help='dataset directory')
args = parser.parse_args()
def test_all_level_yes_mask_yes_attr(args):
"""Test model with input image, mask and attributes."""
transform = transforms.Compose([Normalize(0.5, 0.5),
CenterSquareMask(),
ScaleNRotate(),
ToTensor()])
batch_size = 1
num_attrs = 40
resolutions_to = [4, 8, 8, 16, 16, 32, 32, 64, 64,
128, 128, 256, 256] # 512, 512]
levels = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5,
6, 6.5, 7] # 7.5, 8]
data_shape = [batch_size, 3, 512, 512]
G = Generator(data_shape, num_attrs=num_attrs)
D = Discriminator(data_shape, num_attrs=num_attrs)
for res, lev in zip(resolutions_to, levels):
dataset = CelebAHQDataset(args.data_dir, res, transform)
dataloader = DataLoader(dataset, batch_size, True)
sample = iter(dataloader).next() # noqa: B305
image = sample['image']
masked_image = sample['masked_image']
mask = sample['mask']
attr = sample['attr']
print(f"level: {lev}, resolution: {res}, image: {masked_image.shape}, \
mask: {mask.shape}")
# Generator
if isinstance(lev, int):
# training state
fake_image1 = G(masked_image, attr, mask, lev)
assert list(fake_image1.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
else:
# transition state
fake_image2 = G(masked_image, attr, mask, lev)
assert list(fake_image2.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
# Discriminator
if isinstance(lev, int):
# training state
cls1, attr1 = D(image, lev)
assert list(cls1.shape) == [batch_size, 1], \
f'{res, lev} test failed'
assert list(attr1.shape) == [batch_size, num_attrs], \
f'{res, lev} test failed'
else:
# transition state
cls2, attr2 = D(image, lev)
assert list(cls2.shape) == [batch_size, 1], \
f'{res, lev} test failed'
assert list(attr2.shape) == [batch_size, num_attrs], \
f'{res, lev} test failed'
def test_all_level_no_mask_yes_attr(args):
"""Test model with input image and attributes."""
transform = transforms.Compose([Normalize(0.5, 0.5),
CenterSquareMask(),
ScaleNRotate(),
ToTensor()])
batch_size = 1
num_attrs = 40
resolutions_to = [4, 8, 8, 16, 16, 32, 32, 64, 64,
128, 128, 256, 256] # 512, 512]
levels = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5,
6, 6.5, 7] # 7.5, 8]
data_shape = [batch_size, 3, 512, 512]
G = Generator(data_shape, use_mask=False, num_attrs=num_attrs)
D = Discriminator(data_shape, num_attrs=num_attrs)
for res, lev in zip(resolutions_to, levels):
dataset = CelebAHQDataset(args.data_dir, res, transform)
dataloader = DataLoader(dataset, batch_size, True)
sample = iter(dataloader).next() # noqa: B305
image = sample['image']
masked_image = sample['masked_image']
mask = sample['mask']
attr = sample['attr']
print(f"level: {lev}, resolution: {res}, image: {masked_image.shape}, \
mask: {mask.shape}")
# Generator
if isinstance(lev, int):
# training state
fake_image1 = G(masked_image, attr, cur_level=lev)
assert list(fake_image1.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
else:
# transition state
fake_image2 = G(masked_image, attr, cur_level=lev)
assert list(fake_image2.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
# Discriminator
if isinstance(lev, int):
# training state
cls1, attr1 = D(image, lev)
assert list(cls1.shape) == [batch_size, 1], \
f'{res, lev} test failed'
assert list(attr1.shape) == [batch_size, num_attrs], \
f'{res, lev} test failed'
else:
# transition state
cls2, attr2 = D(image, lev)
assert list(cls2.shape) == [batch_size, 1], \
f'{res, lev} test failed'
assert list(attr2.shape) == [batch_size, num_attrs], \
f'{res, lev} test failed'
def test_all_level_yes_mask_no_attr(args):
"""Test model with input image and mask."""
transform = transforms.Compose([Normalize(0.5, 0.5),
CenterSquareMask(),
ScaleNRotate(),
ToTensor()])
batch_size = 1
resolutions_to = [4, 8, 8, 16, 16, 32, 32, 64, 64,
128, 128, 256, 256] # 512, 512]
levels = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5,
6, 6.5, 7] # 7.5, 8]
data_shape = [batch_size, 3, 512, 512]
G = Generator(data_shape, use_attrs=False)
D = Discriminator(data_shape, use_attrs=False)
for res, lev in zip(resolutions_to, levels):
dataset = CelebAHQDataset(args.data_dir, res, transform)
dataloader = DataLoader(dataset, batch_size, True)
sample = iter(dataloader).next() # noqa: B305
image = sample['image']
masked_image = sample['masked_image']
mask = sample['mask']
print(f"level: {lev}, resolution: {res}, image: {image.shape}, \
mask: {mask.shape}")
# Generator
if isinstance(lev, int):
# training state
fake_image1 = G(masked_image, None, mask, cur_level=lev)
assert list(fake_image1.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
else:
# transition state
fake_image2 = G(masked_image, None, mask, cur_level=lev)
assert list(fake_image2.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
# Discriminator
if isinstance(lev, int):
# training state
cls1 = D(image, lev)
assert list(cls1.shape) == [batch_size, 1], \
f'{res, lev} test failed'
else:
# transition state
cls2 = D(image, lev)
assert list(cls2.shape) == [batch_size, 1], \
f'{res, lev} test failed'
def test_all_level_no_mask_no_attr(args):
"""Test model with input image."""
transform = transforms.Compose([Normalize(0.5, 0.5),
CenterSquareMask(),
ScaleNRotate(),
ToTensor()])
batch_size = 1
resolutions_to = [4, 8, 8, 16, 16, 32, 32, 64, 64,
128, 128, 256, 256] # 512, 512]
levels = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5,
6, 6.5, 7] # 7.5, 8]
data_shape = [batch_size, 3, 512, 512]
G = Generator(data_shape, use_attrs=False, use_mask=False)
D = Discriminator(data_shape, use_attrs=False)
for res, lev in zip(resolutions_to, levels):
dataset = CelebAHQDataset(args.data_dir, res, transform)
dataloader = DataLoader(dataset, batch_size, True)
sample = iter(dataloader).next() # noqa: B305
image = sample['image']
masked_image = sample['masked_image']
mask = sample['mask']
print(f"level: {lev}, resolution: {res}, image: {image.shape}, \
mask: {mask.shape}")
# Generator
if isinstance(lev, int):
# training state
fake_image1 = G(masked_image, cur_level=lev)
assert list(fake_image1.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
else:
# transition state
fake_image2 = G(masked_image, cur_level=lev)
assert list(fake_image2.shape) == [batch_size, 3, res, res], \
f'{res, lev} test failed'
# Discriminator
if isinstance(lev, int):
# training state
cls1 = D(image, lev)
assert list(cls1.shape) == [batch_size, 1], \
f'{res, lev} test failed'
else:
# transition state
cls2 = D(image, lev)
assert list(cls2.shape) == [batch_size, 1], \
f'{res, lev} test failed'
if __name__ == "__main__":
test_all_level_yes_mask_yes_attr(args)
test_all_level_yes_mask_no_attr(args)
test_all_level_no_mask_yes_attr(args)
test_all_level_no_mask_no_attr(args)
|
print("hello")
user_input = int(input("Give me a number! "))
result = int (user_input) * int(user_input)
print (result)
name = input ('What is your name') |
## https://leetcode.com/problems/single-number
class Solution:
def singleNumber(self, nums: List[int]) -> int:
### Method 1 ###
nums.sort()
i = 0
while i < len(nums)-1:
if nums[i] == nums[i+1]:
i += 2
else:
return nums[i]
return nums[i]
### Method 2 ###
class Solution:
def singleNumber(self, nums: List[int]) -> int:
bit = 0
for i in range(len(nums)):
bit = bit ^ nums[i]
return bit
|
## Ch05 P 5.31
def color_to_value(temp):
if temp >=0 and temp <=25:
red = 0
blue = 255
green = color_number(temp)
elif temp >= 25 and temp <= 50:
red = 0
green = 255
blue = color_number(25-(temp-25))
elif temp >= 50 and temp <= 75:
blue = 0
green = 255
red = color_number(temp-50)
elif temp >= 75 and temp <= 100:
blue = 0
red = 255
green = color_number(25-(temp-75))
color = 65536 * red + 256 * green + blue
return round(color)
def color_number(temp):
color = 255/25*temp
return color
print(color_to_value(74))
print(color_to_value(75))
print(color_to_value(76)) |
import maya.cmds as cmds
class ToggleAxis():
def __init__(self):
self.stringWindow = 'Toggle Axis'
self.main_row = ""
self.buttonIn = ""
def delete(self):
if cmds.window(self.stringWindow, exists=True):
cmds.deleteUI(self.stringWindow)
def create(self):
self.delete()
self.stringWindow = cmds.window(self.stringWindow,
title='Toggle Axis',
widthHeight=[300,100])
self.main_row = cmds.rowLayout(parent=self.stringWindow,
numberOfColumns=1)
self.buttonIn = cmds.button(parent=self.main_row,
width=300,
c=lambda *x: self.Toggled(),
label="Toggle Axis")
cmds.showWindow(self.stringWindow)
def Toggled(self):
sels = cmds.ls(selection=True)
cmds.toggle(la=True)
for s in sels:
cmds.setAttr(s + ".jointOrientX", cb=True)
cmds.setAttr(s + ".jointOrientY", cb=True)
cmds.setAttr(s + ".jointOrientZ", cb=True)
cmds.setAttr(s + ".displayLocalAxis", cb=True)
|
from unittest import TestCase
from brill_tagger.rules import *
class TestRule(TestCase):
def test_single_rule_creation(self):
# making sure that objects are created properly and don't affect each other
test_rule_1 = Rule('VBZ', 'VB', 'TO')
test_rule_2 = Rule('VB', 'VBZ', 'TO')
self.assertEqual(test_rule_1.old_tag, 'VBZ')
self.assertEqual(test_rule_1.new_tag, 'VB')
self.assertEqual(test_rule_1.condition, 'TO')
self.assertEqual(test_rule_2.old_tag, 'VB')
self.assertEqual(test_rule_2.new_tag, 'VBZ')
self.assertEqual(test_rule_2.condition, 'TO')
def test_rules_creation(self):
# making sure that wrapper is fine
test_rules_1 = Rules()
test_rules_2 = Rules()
test_rules_1.enqueue(Rule('VBZ', 'VB', 'TO'))
self.assertEqual(len(test_rules_1), 1)
self.assertEqual(len(test_rules_2), 0)
def test_single_rule_printing(self):
test_rule_1 = Rule('VBZ', 'VB', 'TO')
self.assertEqual(str(test_rule_1), 'VBZ VB TO')
def test_rules_printing(self):
test_rules = Rules()
test_rules.enqueue(Rule('VBZ', 'VB', 'TO'))
test_rules.enqueue(Rule('VBZ', 'VB', 'TO'))
test_rules.enqueue(Rule('VBZ', 'VB', 'TO'))
self.assertEqual(str(test_rules), "VBZ VB TO\nVBZ VB TO\nVBZ VB TO\n")
def test_rule_equality(self):
tr1 = Rule()
tr2 = Rule()
tr3 = Rule('VBZ', 'VB', 'TO')
self.assertTrue(tr1 == tr2)
self.assertFalse(tr2 == tr3)
|
#!/usr/bin/env python
# Reproductions/tests for crashes/read errors in TiffDecode.c
# When run in python, all of these images should fail for
# one reason or another, either as a buffer overrun,
# unrecognized datastream, or truncated image file.
# There shouldn't be any segfaults.
#
# if run like
# `valgrind --tool=memcheck python check_tiff_crashes.py 2>&1 | grep TiffDecode.c`
# the output should be empty. There may be python issues
# in the valgrind especially if run in a debug python
# version.
from PIL import Image
repro_read_strip = (
"images/crash_1.tif",
"images/crash_2.tif",
)
for path in repro_read_strip:
with Image.open(path) as im:
try:
im.load()
except Exception as msg:
print(msg)
|
#!/usr/bin/python
#main varaiables to be anonymized later before upload to github:
organization = cred.organization
key = cred.key
email_server = cred.email_server
#imports
import requests
import json
import os
import time
import sys
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
#Import the CRED module from a separate directory
sys.path.insert(0,'../CRED')
import cred
me = cred.me
you1 = cred.you1
#Main URL for the Meraki Platform
dashboard = "https://api.meraki.com"
#api token and other data that needs to be uploaded in the header
headers = {'X-Cisco-Meraki-API-Key': (key), 'Content-Type': 'application/json'}
# open files for writing
avery_status = open("avery_status.csv", "w", 0)
error_status = open("error_status.csv", "w", 0)
#pull back all of the networks for the organization
get_network_url = dashboard + '/api/v0/organizations/%s/networks' % organization
#request the network data
get_network_response = requests.get(get_network_url, headers=headers)
#puts the data into a json format
get_network_json = get_network_response.json()
for network in get_network_json:
time.sleep(1)
#use this one when pulling back the last 3 days worth of data
get_client_url = dashboard + '/api/v0/networks/%s/clients?timespan=604800' % network["id"]
get_client_response = requests.get(get_client_url, headers=headers)
get_client_json = get_client_response.json()
for client in get_client_json:
time.sleep(1)
try:
if (client["os"]) == "Slingbox":
avery_status.write(network["name"] + ", " + str(client["lastSeen"] + "\n"))
except TypeError:
error_status.write(network["name"] + "\n")
#pass
msg = MIMEMultipart()
msg['Subject'] = 'Current connection status of Avery Guns'
msg['From'] = me
#used when sending email to groups vs a single user.
#msg['To'] = ', '.join(you1)
msg['To'] = you1
part = MIMEBase('application', "octet-stream")
part.set_payload(open("avery_status.csv", "rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="avery_status.csv"')
msg.attach(part)
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP(email_server)
s.sendmail(me, you1, msg.as_string())
s.quit()
avery_status.close
error_status.close
|
# use ord() and chr() to convert
def caesar(str, shift=13):
new_word = ''
str = str.upper()
for char in str:
if char.isalpha():
char = ord(char) + shift
if char > 90:
char -= 26
char = chr(char)
new_word += char
print(new_word)
return new_word
caesar("SERR PBQR PNZC")
|
import UB_BB
import os
import sys
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB, GaussianNB # GaussianNB
from sklearn.feature_extraction import text
def naiveBayes(trainingData, labels):
nb = MultinomialNB(alpha=0.25)
nb.fit(trainingData, labels)
return nb
def nGram(gramTuple):
trainingData = UB_BB.getString(trainSet) # String representation of all the training files in the format[0,1,2,3]
my_stop_words = text.ENGLISH_STOP_WORDS
vectorizer = CountVectorizer(ngram_range=gramTuple, stop_words = my_stop_words, strip_accents="ascii", max_df=.8) # Get the count vectorizer
unigramTrainCounts = vectorizer.fit_transform(trainingData) # Fit the vectorizer with the training string
testingData = UB_BB.getString(evalSet) # String representation of the evaluation data for testing
evalCounts = vectorizer.transform(testingData)
trainNB = naiveBayes(unigramTrainCounts, labels)
algorithmArray = [trainNB]
testCases = UB_BB.genTestCases(evalSet, labels)
strQueries = testCases[0]
answers = testCases[1]
queries = []
for category in strQueries:
for txt in category:
queries.append(vectorizer.transform([txt]))
predictionArray = []
for algorithm in algorithmArray:
predictionArray.append(UB_BB.generatePredictions(queries, algorithm)) # Array of arrays, [nb, log, svm, rf]
# print(predictionArray)
algOutput = ["NB"]
UBoutput = []
baseline = "UB"
config = ["(Multinomial, Alpha=.25, Stopwords, Strip Accents Ascii, max_df=.8)"]
for x in range(len(algOutput)):
UBoutput.append("%s,%s,%s" % (algOutput[x],config[x], UB_BB.tripleScore(answers, predictionArray[x])))
return UBoutput
if __name__ == '__main__':
trainSet = sys.argv[1]
evalSet = sys.argv[2]
output = sys.argv[3]
labels = os.listdir(trainSet)
uniOut = nGram((1, 1))
# print(uniOut)
f = open(output, "w")
for x in range(len(uniOut)):
f.write(uniOut[x])
f.write("\n") |
def solution():
return sum([fib for fib in fib_limit(4E6) if fib % 2 == 0])
def fib_limit(limit):
# This function will yield fibonacci upto below a limit
numbers = []
prev, current = 0, 1
while current < limit:
numbers.append(current)
prev, current = current, current + prev
return numbers
print(solution())
|
from django.urls import reverse
from django.http import HttpResponseRedirect, Http404, JsonResponse
from django.views.generic.edit import FormView
from django.views.generic.base import TemplateView, View
from django.views.generic.list import ListView
from .models import Document
from .forms import DocumentForm
# Create your views here.
class IndexView(FormView):
template_name = 'web/index.html'
form_class = DocumentForm
success_url = 'web:tenso'
def form_valid(self, form):
form.save()
return HttpResponseRedirect(reverse(self.get_success_url(),
kwargs={'pk': form.instance.pk}))
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
tensos = Document.objects.filter(share=True).order_by('-created')[:15]
context['tensos'] = tensos
return context
class TensoView(TemplateView):
template_name = 'web/result.html'
context_object_name = 'tenso'
def get_context_data(self, **kwargs):
pk = self.kwargs.get('pk')
url = self.request.build_absolute_uri(reverse('web:tenso', args=(pk, )))
context = super(TensoView, self).get_context_data(**kwargs)
try:
tenso = Document.objects.get(pk=pk)
context['tenso'] = tenso
context['url'] = url
except Document.DoesNotExist:
raise Http404
return context
class ShareView(View):
template_name = 'web/result.html'
def post(self, request):
try:
tenso_pk = request.POST.get('pk')
tenso = Document.objects.get(pk=tenso_pk)
tenso.share = True
tenso.save()
response = {
'status': 'Success',
'pk': tenso.pk
}
return JsonResponse(response)
except Document.DoesNotExist:
raise Http404
class TensoListView(ListView):
queryset = Document.objects.filter(share=True).order_by('-created')
template_name = 'web/tenso_list.html'
context_object_name = 'tenso_list'
paginate_by = 20
class PrivacyView(TemplateView):
template_name = 'web/privacy.html'
class URLView(TemplateView):
template_name = 'web/urls.js'
|
from plenum.common.constants import TXN_TYPE, STATE_PROOF, DOMAIN_LEDGER_ID
from plenum.common.util import get_utc_epoch
from plenum.test.helper import sendRandomRequests, waitForSufficientRepliesForRequests
from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \
client1, wallet1, client1Connected
nodeCount = 4
nodes_wth_bls = 0
def test_make_proof_bls_disabled(looper, txnPoolNodeSet,
client1, client1Connected, wallet1):
reqs = sendRandomRequests(wallet1, client1, 1)
waitForSufficientRepliesForRequests(looper, client1, requests=reqs)
req = reqs[0]
for node in txnPoolNodeSet:
req_handler = node.get_req_handler(DOMAIN_LEDGER_ID)
key = req_handler.prepare_buy_key(req.identifier, req.reqId)
proof = req_handler.make_proof(key)
assert not proof
def test_make_result_bls_disabled(looper, txnPoolNodeSet,
client1, client1Connected, wallet1):
reqs = sendRandomRequests(wallet1, client1, 1)
waitForSufficientRepliesForRequests(looper, client1, requests=reqs)
req = reqs[0]
for node in txnPoolNodeSet:
req_handler = node.get_req_handler(DOMAIN_LEDGER_ID)
key = req_handler.prepare_buy_key(req.identifier, req.reqId)
proof = req_handler.make_proof(key)
result = req_handler.make_result(req,
{TXN_TYPE: "buy"},
2,
get_utc_epoch(),
proof)
assert STATE_PROOF not in result
|
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from dateutil.rrule import rrulestr, rrule
from dateutil.parser import parse
from datetime import datetime
import utils
import matplotlib.pyplot as plt
import numpy as np
import csv
def load_arguments(**kwargs):
parser = argparse.ArgumentParser()
parser.add_argument("-pc","--fichier_cours", help="Chemin vers le fichier des historiques de cours", type=str, default=r'./data/Histo_Cours.csv')
parser.add_argument("-pb", "--fichier_bench", help="Chemin vers le fichier de l'historiqe du benchmark", type=str, default=r'./data/Histo_Bench.csv')
parser.add_argument("-pp", "--fichier_prtfs", help="Chemin vers le fichier del'historique des portefeuilles", type=str, default=r'./output/prtfs.csv')
parser.add_argument("-dd", "--date_debut", help="Date de début du calcul des performances", type=str, default='01/01/2019')
parser.add_argument("-df", "--date_fin", help="Date de fin du calcul des performances", type=str, default='01/04/2019')
parser.add_argument("-tp", "--type_pas", help="Type de pas entre chaque calcul de perf", type=str, choices=['DAILY','WEEKLY','MONTHLY','YEARLY'], default='MONTHLY')
parser.add_argument("-nl", "--nb_pas", help="Nombre de pas entre chaque calcul de perf", type=int, default=1)
parser.add_argument("-b", "--base", help="Base du graphique", type=int, default=100)
parser.add_argument("-g", "--graphique", help="Créé le graphique", action="store_true")
for key, value in kwargs.items():
for action in parser._actions:
if action.dest == key:
if action.dest == 'type_pas':
if value == 'months':
value = 'MONTHLY'
elif value == 'days':
value = 'DAILY'
elif value == 'years':
value = 'YEARLY'
action.choices = value
action.default = value
return parser
def Calc_Perf(**kwargs):
parser = load_arguments(**kwargs)
args = parser.parse_args()
dfCours = pd.read_csv(args.fichier_cours,header=[0], sep=';',index_col=0, parse_dates=True)
dfBench = pd.read_csv(args.fichier_bench, header=[0], sep=';',index_col=0, parse_dates=True)
#Si le portefeuille n'est pas passé en argument alors on le récupère dans le fichier csv
dfPrtfs = kwargs.get('Portefeuilles',pd.DataFrame())
if dfPrtfs.empty:
dfPrtfs = pd.read_csv(args.fichier_prtfs,header=[0], sep=';', parse_dates=['DATE_PRTF'])
#récupère l'ensemble des dates des portefeuilles
dfPrtfs.sort_values(by=['DATE_PRTF'],ascending=True, inplace=True)
dtPrtfs = [x.astype('M8[ms]').astype('O') for x in dfPrtfs['DATE_PRTF'].unique()]
dtDeb = datetime.strptime(args.date_debut,'%d/%m/%Y')
dtFin = datetime.strptime(args.date_fin,'%d/%m/%Y')
#vecteur des dates de calcul de perf
strRule = "FREQ=" + str(args.type_pas) + ";INTERVAL=" + str(args.nb_pas) + ";UNTIL=" + datetime.strftime(dtFin,"%Y%m%d")
setDates = set(rrulestr(strRule, dtstart=dtDeb))
#S'assure que les dates de chaque portefeuille sont présente dans le set contenant les dates de calcul
setDates.update(dtPrtfs)
vDates = list(setDates)
vDates.sort()
#initialiser une matrice de trois colonne : Ticker, Poids, Prix
#retirer la première date de lstDates car elle correspond à la base
#initialiser un vecteur des cours du portefeuille
base = args.base
newMatCalc = pd.DataFrame()#columns=['TICKER','POIDS','COURS'])
vPrtf = []
lBench = []
for dtCalc in vDates:
#Ensemble des dates de portefeuilles inférieures ou égales à la date de calcul
dtList = [x for x in dtPrtfs if x <= dtCalc]
#S'il existe des dates de portefeuilles inférieurs ou égale à celle de la date de calcul
if len(dtList) > 0:
dtLastPrtf = max(dtList)
#Si la date de calcul est une date de rebalancement de portefeuille alors
if dtCalc == dtLastPrtf:
#s'il n'y a pas encore de matrice de calcul
if len(newMatCalc) ==0 :
newMatCalc = utils.GetLastPrtf(dfPrtfs,dfCours,dtCalc)
vPrtf.append(base)
else:
oldMatCalc = newMatCalc
#Récupère la matrice comprenant les tickers et les poids de l'ancien prtf
#avec les cours correspondant à la date de calcul (dtCalc)
newMatCalc = utils.GetLastCours(oldMatCalc, dfCours, dtCalc)
#Calcul la performance pondérée de chaque ligne
perfPrtf = sum((newMatCalc['COURS']/oldMatCalc['COURS'] - 1)*oldMatCalc['POIDS'])
#Ajout du dernier cours du portefeuille
derPrtfCours = vPrtf[len(vPrtf)-1]
derPrtfCours = derPrtfCours * (1+perfPrtf)
vPrtf.append(derPrtfCours)
#Récupération du nouveau portefeuille
newMatCalc = utils.GetLastPrtf(dfPrtfs,dfCours,dtCalc)
else:
oldMatCalc = newMatCalc
#Récupère la matrice comprenant les tickers et les poids de l'ancien prtf
#avec les cours correspondant à la date de calcul (dtCalc)
newMatCalc = utils.GetLastCours(oldMatCalc, dfCours, dtCalc)
#Calcul la performance pondérée de chaque ligne
perfPrtf = sum((newMatCalc['COURS']/oldMatCalc['COURS'] - 1)*oldMatCalc['POIDS'])
#Ajout du dernier cours du portefeuille
derPrtfCours = vPrtf[len(vPrtf)-1]
derPrtfCours = derPrtfCours * (1+perfPrtf)
vPrtf.append(derPrtfCours)
#Modification des poids au sein du nouveau portefeuille
sNewPoids = newMatCalc['COURS']/oldMatCalc['COURS']*oldMatCalc['POIDS']
#Rebasement des poids à 100
sNewPoids = sNewPoids / sum(sNewPoids)
sNewPoids.rename('POIDS', inplace=True)
#Ajout des nouveaux poids à la nouvelle matrice de calcul
newMatCalc.drop(labels='POIDS', axis=1, inplace=True)
newMatCalc = pd.concat([newMatCalc,sNewPoids], axis=1)
else:
vPrtf.append(base)
#Calcul du benchmark
utils.AddBenchPerf(lBench,dfBench,dtCalc,base)
vBench = [x for x,y in lBench]
if args.graphique:
plt.plot(vDates,vPrtf, label='PRTF')
plt.plot(vDates,vBench, label='Bench')
plt.title("PRTF vs Bench")
plt.legend()
plt.show()
output = {}
output['Perf_PRTF'] = vPrtf[len(vPrtf)-1] / vPrtf[0] - 1
output['Perf_Bench'] = vBench[len(vPrtf)-1] / vBench[0] - 1
return output
if __name__ == '__main__':
print(Calc_Perf()) |
from variables import msgs as M
from helpers import checkWrongAnswer
taskStr = 'task'
def getActInfo(actNum, tasks):
""" Obtener la información de todas las actividades
Args:
actNum: integer del número de actividades
tasks: diccionario para guardar las actividades
"""
for ac in range(actNum):
num = ac + 1
print(M['msg2'] + str(num))
act = str(input(M['msg3']))
pred = str(input(M['msg4']))
dur = str(input(M['msg5']))
dur = checkWrongAnswer(dur)
print('\n')
addTask(num, act, pred, dur, tasks)
def addTask(id, act, pred, dur, tasks):
""" Agregar una nueva actividad al diccionario
Args:
id: integer id de la actividad
act: string descripción de la actividad
pred: array conjunto de predecesores
dur: integer duración de la actividad
tasks: diccionario para guardar las actividades
"""
actInfo = dict()
actInfo['id'] = str(id)
actInfo['name'] = act
if pred != '':
actInfo['pred'] = pred.split(',')
else:
actInfo['pred'] = ['-1']
actInfo['dur'] = dur
addDefault(actInfo)
tasks[taskStr + str(id)] = actInfo
def addDefault(actInfo):
""" Agregar valores predeterminados
Args:
actInfo: diccionario de la actividad
"""
actInfo['ES'] = 0
actInfo['EF'] = 0
actInfo['LS'] = 0
actInfo['LF'] = 0
actInfo['float'] = 0
actInfo['isCritical'] = False
def forwardPass(tasks):
""" Algoritmo para el forward pass
Args:
tasks: diccionario para guardar las actividades
"""
for task in tasks:
if('-1' in tasks[task]['pred']):
tasks[task]['ES'] = 0
tasks[task]['EF'] = (tasks[task]['dur'])
else:
for k in tasks.keys():
current = tasks[k]
for pred in current['pred']:
if(pred != '-1' and len(current['pred']) == 1):
current['ES'] = int(tasks[taskStr + pred]['EF'])
current['EF'] = int(current['ES']) + \
int(current['dur'])
elif(pred != '-1'):
if(int(tasks[taskStr + pred]['EF']) > int(current['ES'])):
current['ES'] = int(
tasks[taskStr + pred]['EF'])
current['EF'] = int(
current['ES']) + int(current['dur'])
def backwardPass(reversedTasks, tasks):
""" Algoritmo para el backwards pass
Args:
reversedTasks: lista con los keys de los tasks invertidos
tasks: diccionario para guardar las actividades
"""
for task in reversedTasks:
current = tasks[task]
if(reversedTasks.index(task) == 0):
current['LF'] = current['EF']
current['LS'] = current['ES']
for pred in current['pred']:
if(pred != '-1'):
currPred = tasks[taskStr + str(pred)]
if(currPred['LF'] == 0):
currPred['LF'] = int(current['LS'])
currPred['LS'] = int(currPred['LF']) - \
int(currPred['dur'])
currPred['float'] = int(
currPred['LF']) - int(currPred['EF'])
elif(int(currPred['LF']) > int(current['LS'])):
currPred['LF'] = int(current['LS'])
currPred['LS'] = int(currPred['LF']) - \
int(currPred['dur'])
currPred['float'] = int(
currPred['LF']) - int(currPred['EF'])
def findCriticalPath(tasks):
""" Algoritmo de la ruta crítica
Args:
tasks: diccionario para guardar las actividades
"""
forwardPass(tasks)
reversedTasks = list()
for key in tasks.keys():
reversedTasks.append(key)
reversedTasks.reverse()
backwardPass(reversedTasks, tasks)
printData(tasks)
def printData(tasks):
""" Imprimir matriz con resultados
Args:
tasks: diccionario para guardar las actividades
"""
def printData(tasks):
""" Imprimir matriz con resultados
Args:
tasks: diccionario para guardar las actividades
"""
print('ID\tNombre\tDur\tES\tEF\tLS\tLF\tHolgura\tEs Crítica\tPredecesores')
for task in tasks:
curr = tasks[task]
if(curr['float'] == 0):
curr['isCritical'] = True
if(curr['pred'] == ['-1']):
curr['pred'] = ''
print(str(curr['id']) + '\t' + str(curr['name']) + '\t' + str(curr['dur']) + '\t' + str(curr['ES']) + '\t' + str(curr['EF']) + '\t' +
str(curr['LS']) + '\t' + str(curr['LF']) + '\t' + str(curr['float']) + '\t' + str(curr['isCritical']) + '\t' + '\t' + str(curr['pred']))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-08-04 02:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokedex', '0031_box_count'),
]
operations = [
migrations.AddField(
model_name='adopt',
name='beauty',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='adopt',
name='cool',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='adopt',
name='cute',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='adopt',
name='smart',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='adopt',
name='tough',
field=models.IntegerField(default=0),
),
]
|
from typing import List
import numpy as np
import pandas as pd
from perfana.monte_carlo._utility import infer_frequency
from perfana.types import Vector
from ._types import Frequency
from .returns import annualized_returns_m
from .risk import cvar_m, volatility_m
__all__ = ["sensitivity_m", "sensitivity_cvar_m", "sensitivity_returns_m", "sensitivity_vol_m"]
def sensitivity_m(data: np.ndarray,
weights: Vector,
freq: Frequency,
shock: float = 0.05,
geometric: bool = True,
rebalance: bool = True,
cov: np.ndarray = None,
cvar_cutoff: int = 3,
cvar_data: np.ndarray = None,
alpha=0.95,
invert=True,
names: List[str] = None,
leveraged=False,
distribute=True) -> pd.DataFrame:
"""
Calculates the sensitivity of adding and removing from the asset class on the portfolio.
This is a wrapper function for the 3 sensitivity calculations. For more granular usages, use the base
functions instead.
Notes
-----
When given a positive shock and a "proportionate" distribution strategy, each asset class is given an
additional amount by removing from the other asset classes proportionately. For example, given a portfolio
with weights :code:`[0.1, 0.2, 0.3, 0.4]`, a shock of 5% to the first asset in the portfolio will result
in weights :code:`[0.15, 0.19, 0.28, 0.38]`. A negative shock works by removing from the asset class and
adding to the other asset classes proportionately.
If the distribution strategy is set to :code:`False`, the asset class' weight is increased without removing
from the other asset classes. Thus the sum of the portfolio weights will not equal 1.
By default, the portfolio is **not** leveraged. This means that the asset class be shorted (negative shock) to
go below 0 and levered (positive shock) to go above 1. The asset class weight is thus capped between 0 and 1
by default. If the :code:`leverage` option is set to :code:`True`, then this value is no longer capped.
Parameters
----------
data: ndarray
Monte carlo simulation data. This must be 3 dimensional with the axis representing time, trial
and asset respectively.
weights: array_like
Weights of the portfolio. This must be 1 dimensional and must match the dimension of the data's
last axis.
freq: Frequency
Frequency of the data. Can either be a string ('week', 'month', 'quarter', 'semi-annual', 'year') or
an integer specifying the number of units per year. Week: 52, Month: 12, Quarter: 4, Semi-annual: 6,
Year: 1.
shock: float
The amount to shock each asset class by. A positive number represents adding to the asset class by
proportionately removing from the other asset class. A negative number represents removing from the
asset class and adding to the other asset class proportionately.
geometric: bool
If True, calculates the geometric mean, otherwise, calculates the arithmetic mean.
cov: ndarray
Asset covariance matrix
cvar_cutoff: int
Number of years to trim the data cube by for cvar calculation.
cvar_data: np.ndarray
If specified, will use this data cube instead of the main data cube for cvar calculations.
alpha: float
Confidence level for calculation.
invert: bool
Whether to invert the confidence interval level
rebalance: bool
If True, portfolio is assumed to be rebalanced at every step.
names: list of str
Asset class names
leveraged: bool
If True, asset weights are allowed to go below 0 and above 1. This represents that the
asset class can be shorted or levered.
distribute: bool
If True, asset value changes are distributed proportionately to all other asset classes. See Notes
for more information.
Returns
-------
DataFrame
A dataframe with the asset names as the indices and with columns (ret, vol, cvar) representing
returns, volatility and CVaR respectively.
Examples
--------
>>> from perfana.datasets import load_cube
>>> from perfana.monte_carlo import sensitivity_m
>>> data = load_cube()[..., :7]
>>> weights = [0.25, 0.18, 0.13, 0.11, 0.24, 0.05, 0.04]
>>> freq = 'quarterly'
>>> shock = 0.05 # 5% absolute shock
>>> sensitivity_m(data, weights, freq, shock)
ret vol cvar
Asset_1 0.022403 0.113284 -0.485220
Asset_2 0.020484 0.121786 -0.542988
Asset_3 0.022046 0.113964 -0.492411
Asset_4 0.020854 0.109301 -0.478581
Asset_5 0.020190 0.104626 -0.459786
Asset_6 0.020335 0.106652 -0.467798
Asset_7 0.020220 0.106140 -0.468692
"""
assert isinstance(cvar_cutoff, int) and cvar_cutoff > 0, "cvar_cutoff must be a positive integer"
cov_or_data = data if cov is None else cov
if cvar_data is None:
cvar_data = data
cvar_data = cvar_data[:cvar_cutoff * infer_frequency(freq)]
ret = sensitivity_returns_m(data, weights, freq, shock, geometric, rebalance, names, leveraged, distribute)
vol = sensitivity_vol_m(cov_or_data, weights, freq, shock, names, leveraged, distribute)
cvar = sensitivity_cvar_m(cvar_data, weights, shock, alpha, rebalance, invert, names, leveraged, distribute)
return pd.merge(ret, vol, left_index=True, right_index=True).merge(cvar, left_index=True, right_index=True)
def sensitivity_cvar_m(data: np.ndarray,
weights: Vector,
shock: float = 0.05,
alpha=0.95,
rebalance: bool = True,
invert=True,
names: List[str] = None,
leveraged=False,
distribute=True) -> pd.Series:
"""
Calculates the sensitivity of a shock to the CVaR of the portfolio
Notes
-----
When given a positive shock and a "proportionate" distribution strategy, each asset class is given an
additional amount by removing from the other asset classes proportionately. For example, given a portfolio
with weights :code:`[0.1, 0.2, 0.3, 0.4]`, a shock of 5% to the first asset in the portfolio will result
in weights :code:`[0.15, 0.19, 0.28, 0.38]`. A negative shock works by removing from the asset class and
adding to the other asset classes proportionately.
If the distribution strategy is set to :code:`False`, the asset class' weight is increased without removing
from the other asset classes. Thus the sum of the portfolio weights will not equal 1.
By default, the portfolio is **not** leveraged. This means that the asset class be shorted (negative shock) to
go below 0 and levered (positive shock) to go above 1. The asset class weight is thus capped between 0 and 1
by default. If the :code:`leverage` option is set to :code:`True`, then this value is no longer capped.
Parameters
----------
data: ndarray
Monte carlo simulation data. This must be 3 dimensional with the axis representing time, trial
and asset respectively.
weights: array_like
Weights of the portfolio. This must be 1 dimensional and must match the dimension of the data's
last axis.
shock: float
The amount to shock each asset class by. A positive number represents adding to the asset class by
proportionately removing from the other asset class. A negative number represents removing from the
asset class and adding to the other asset class proportionately.
alpha: float
Confidence level for calculation.
invert: bool
Whether to invert the confidence interval level
rebalance: bool
If True, portfolio is assumed to be rebalanced at every step.
names: list of str
Asset class names
leveraged: bool
If True, asset weights are allowed to go below 0 and above 1. This represents that the
asset class can be shorted or levered.
distribute: bool
If True, asset value changes are distributed proportionately to all other asset classes. See Notes
for more information.
Returns
-------
Series
A series with asset names as the index and CVaR as its value
Examples
--------
>>> from perfana.datasets import load_cube
>>> from perfana.monte_carlo import sensitivity_cvar_m
>>> data = load_cube()[..., :7]
>>> weights = [0.25, 0.18, 0.13, 0.11, 0.24, 0.05, 0.04]
>>> freq = 'quarterly'
>>> shock = 0.05 # 5% absolute shock
>>> sensitivity_cvar_m(data, weights, shock)
Asset_1 -0.485220
Asset_2 -0.542988
Asset_3 -0.492411
Asset_4 -0.478581
Asset_5 -0.459786
Asset_6 -0.467798
Asset_7 -0.468692
Name: cvar, dtype: float64
"""
weight_matrix = _setup(weights, shock, leveraged, distribute)
names = _setup_names(weights, names)
cvar = [cvar_m(data, w, alpha, rebalance, invert) for w in weight_matrix]
return pd.Series(cvar, names, name="cvar")
def sensitivity_returns_m(data: np.ndarray,
weights: Vector,
freq: Frequency,
shock: float = 0.05,
geometric: bool = True,
rebalance: bool = True,
names: List[str] = None,
leveraged=False,
distribute=True) -> pd.Series:
"""
Calculates the sensitivity of a shock to the annualized returns of the portfolio
Notes
-----
When given a positive shock and a "proportionate" distribution strategy, each asset class is given an
additional amount by removing from the other asset classes proportionately. For example, given a portfolio
with weights :code:`[0.1, 0.2, 0.3, 0.4]`, a shock of 5% to the first asset in the portfolio will result
in weights :code:`[0.15, 0.19, 0.28, 0.38]`. A negative shock works by removing from the asset class and
adding to the other asset classes proportionately.
If the distribution strategy is set to :code:`False`, the asset class' weight is increased without removing
from the other asset classes. Thus the sum of the portfolio weights will not equal 1.
By default, the portfolio is **not** leveraged. This means that the asset class be shorted (negative shock) to
go below 0 and levered (positive shock) to go above 1. The asset class weight is thus capped between 0 and 1
by default. If the :code:`leverage` option is set to :code:`True`, then this value is no longer capped.
Parameters
----------
data: ndarray
Monte carlo simulation data. This must be 3 dimensional with the axis representing time, trial
and asset respectively.
weights: array_like
Weights of the portfolio. This must be 1 dimensional and must match the dimension of the data's
last axis.
shock: float
The amount to shock each asset class by. A positive number represents adding to the asset class by
proportionately removing from the other asset class. A negative number represents removing from the
asset class and adding to the other asset class proportionately.
freq: Frequency
Frequency of the data. Can either be a string ('week', 'month', 'quarter', 'semi-annual', 'year') or
an integer specifying the number of units per year. Week: 52, Month: 12, Quarter: 4, Semi-annual: 6,
Year: 1.
geometric: bool
If True, calculates the geometric mean, otherwise, calculates the arithmetic mean.
rebalance: bool
If True, portfolio is assumed to be rebalanced at every step.
names: list of str
Asset class names
leveraged: bool
If True, asset weights are allowed to go below 0 and above 1. This represents that the
asset class can be shorted or levered.
distribute: bool
If True, asset value changes are distributed proportionately to all other asset classes. See Notes
for more information.
Returns
-------
Series
A series with asset names as the index and annualized returns as its value
Examples
--------
>>> from perfana.datasets import load_cube
>>> from perfana.monte_carlo import sensitivity_returns_m
>>> data = load_cube()[..., :7]
>>> weights = [0.25, 0.18, 0.13, 0.11, 0.24, 0.05, 0.04]
>>> freq = 'quarterly'
>>> shock = 0.05 # 5% absolute shock
>>> sensitivity_returns_m(data, weights, freq, shock)
Asset_1 0.022403
Asset_2 0.020484
Asset_3 0.022046
Asset_4 0.020854
Asset_5 0.020190
Asset_6 0.020335
Asset_7 0.020220
Name: ret, dtype: float64
"""
weight_matrix = _setup(weights, shock, leveraged, distribute)
names = _setup_names(weights, names)
ret = [annualized_returns_m(data, w, freq, geometric, rebalance) for w in weight_matrix]
return pd.Series(ret, names, name="ret")
def sensitivity_vol_m(cov_or_data: np.ndarray,
weights: Vector,
freq: Frequency = None,
shock: float = 0.05,
names: List[str] = None,
leveraged=False,
distribute=True) -> pd.Series:
"""
Calculates the sensitivity of a shock to the annualized volatility of the portfolio
Parameters
----------
cov_or_data
Monte carlo simulation data or covariance matrix. If simulation cube, this must be 3 dimensional with
the axis representing time, trial and asset respectively and frequency will also need to be specified.
weights: array_like
Weights of the portfolio. This must be 1 dimensional and must match the dimension of the data's
last axis.
freq: Frequency
Frequency of the data. Can either be a string ('week', 'month', 'quarter', 'semi-annual', 'year') or
an integer specifying the number of units per year. Week: 52, Month: 12, Quarter: 4, Semi-annual: 6,
Year: 1.
shock: float
The amount to shock each asset class by. A positive number represents adding to the asset class by
proportionately removing from the other asset class. A negative number represents removing from the
asset class and adding to the other asset class proportionately.
names: list of str
Asset class names
leveraged: bool
If True, asset weights are allowed to go below 0 and above 1. This represents that the
asset class can be shorted or levered.
distribute: bool
If True, asset value changes are distributed proportionately to all other asset classes. See Notes
for more information.
Returns
-------
Series
A series with asset names as the index and annualized volatility as its value
Examples
--------
>>> from perfana.datasets import load_cube
>>> from perfana.monte_carlo import sensitivity_vol_m
>>> data = load_cube()[..., :7]
>>> weights = [0.25, 0.18, 0.13, 0.11, 0.24, 0.05, 0.04]
>>> freq = 'quarterly'
>>> shock = 0.05 # 5% absolute shock
>>> sensitivity_vol_m(data, weights, freq, shock)
Asset_1 0.113284
Asset_2 0.121786
Asset_3 0.113964
Asset_4 0.109301
Asset_5 0.104626
Asset_6 0.106652
Asset_7 0.106140
Name: vol, dtype: float64
"""
weight_matrix = _setup(weights, shock, leveraged, distribute)
names = _setup_names(weights, names)
vol = [volatility_m(cov_or_data, w, freq) for w in weight_matrix]
return pd.Series(vol, names, name="vol")
def _setup(weights: Vector,
shock: float = 0.05,
leveraged=False,
distribute=True):
"""Common setup for sensitivity analytics"""
assert -1 <= shock <= 1, "shock must be between [-1, 1]"
weights = np.ravel(weights)
# if leverage is True:
# Prevents shocks beyond the asset's current allocation. That is if the shock is
# -5% and the asset only has 2% allocation, then the shock is effectively -2%.
# And prevent shocks that bring the assets beyond 100% allocation.
shocks = np.array([
shock if (0 <= w + shock <= 1) or leveraged else
-w if shock < 0 else 1 - w
for w in weights
])
n = len(weights)
matrix = np.tile(weights, (n, 1)) + np.diag(shocks)
if distribute:
weight_matrix = np.tile(weights, (n, 1))
np.fill_diagonal(weight_matrix, 0)
matrix -= weight_matrix * (shocks / weight_matrix.sum(1))[:, None]
return matrix
def _setup_names(weights: np.ndarray, names: List[str] = None):
if names is None:
return [f"Asset_{i + 1}" for i in range(len(weights))]
names = list(names)
assert len(names) == len(weights), "number of names given is not equal to length of weight vector"
return names
|
#!/usr/bin/env python3
#
# by Kendall Weaver <kendall@peppermintos.com>
# for Peppermint OS <http://peppermintos.com>
#
# Peppermint Control Center is a Python/GTK+3 GUI wrapper to handle a
# number of desktop configuration options from a number of different
# applications/scripts and put them all in one place.
#
#############
### To Do ###
#############
#
# 1. Improve keyboard shortcut support.
#
# 2. Add gettext support and begin working on translations.
#
# 3. Push to github.
#
import os
import sys
import subprocess
from gi.repository import Gtk
import xml.etree.ElementTree as ET
if not os.path.exists(os.path.expanduser("~/.config/peppermint-control-center/")):
try:
os.system("mkdir -p ~/.config/peppermint-control-center")
except:
print("ERROR: Configuration directory does not exist and could not be created.")
sys.exit(1)
if not os.path.exists(os.path.expanduser("~/.config/peppermint-control-center/pointer")):
try:
os.system("echo '#!/bin/bash\n#Autogenerated script - Do not edit\nxset m 2 2\nxmodmap -e \"pointer = 1 2 3\"\nsynclient TouchPadOff=0\nsynclient VertEdgeScroll=0\nsynclient HorizEdgeScroll=0\nsynclient TapButton1=1\nsynclient VertTwoFingerScroll=1\nsynclient HorizTwoFingerScroll=0' >> ~/.config/peppermint-control-center/pointer")
os.system("chmod +x ~/.config/peppermint-control-center/pointer")
except:
print("ERROR: pointer file does not exist and could not be generated.")
sys.exit(1)
if not os.path.exists(os.path.expanduser("~/.config/peppermint-control-center/keyboard")):
try:
os.system("echo '#!/bin/bash\n#Autogenerated script - Do not edit\nxset r rate 500 20' >> ~/.config/peppermint-control-center/keyboard")
os.system("chmod +x ~/.config/peppermint-control-center/keyboard")
except:
print("ERROR: keyboard file does not exist and could not be generated.")
sys.exit(1)
if not os.path.exists(os.path.expanduser("~/.config/xfce4/xfconf/xfce-perchannel-xml/xfce4-keyboard-shortcuts.xml")):
try:
os.system("cp /usr/share/peppermint/peppermint-control-center/xfce4-keyboard-shortcuts.xml ~/.config/xfce4/xfconf/xfce-perchannel-xml/xfce4-keyboard-shortcuts.xml")
except:
print("ERROR: xfce4-keyboard-shortcuts.xml does not exist and could not be generated.")
sys.exit(1)
if not os.path.exists(os.path.expanduser("~/.config/peppermint-control-center/xbindkeys.conf")):
try:
os.system("cp /usr/share/peppermint/peppermint-control-center/xbindkeys.conf ~/.config/peppermint-control-center/xbindkeys.conf")
except:
print("ERROR: xbindkeys.conf does not exist and could not be generated.")
sys.exit(1)
if not os.path.exists(os.path.expanduser("~/.config/xfce4/xfconf/xfce-perchannel-xml/xfwm4.xml")):
try:
os.system("cp /usr/share/peppermint/peppermint-control-center/xfwm4.xml ~/.config/xfce4/xfce-perchannel-xml/xfwm4.xml")
except:
print("ERROR: xfwm4.xml does not exist and could not be generated.")
sys.exit(1)
# Open, read, and close the configuration files.
try:
pntr_file = open(os.path.expanduser("~/.config/peppermint-control-center/pointer"), 'r')
keys_file = open(os.path.expanduser("~/.config/peppermint-control-center/keyboard"), 'r')
xfks_file = open(os.path.expanduser("~/.config/xfce4/xfconf/xfce-perchannel-xml/xfce4-keyboard-shortcuts.xml"), 'r')
bind_file = open(os.path.expanduser("~/.config/peppermint-control-center/xbindkeys.conf"), 'r')
xfwm_file = open(os.path.expanduser("~/.config/xfce4/xfconf/xfce-perchannel-xml/xfwm4.xml"), 'r')
except:
print("ERROR: Could not open the necessary configuration files.")
sys.exit(1)
try:
pntr_data = pntr_file.readlines()
keys_data = keys_file.readlines()
xfks_data = xfks_file.readlines()
bind_data = bind_file.readlines()
xfwm_data = xfwm_file.readlines()
except:
print("ERROR: One or more configuration files is corrupt.")
sys.exit(1)
pntr_file.close()
keys_file.close()
xfks_file.close()
bind_file.close()
xfwm_file.close()
###########################################################
### Generic methods for getting/setting boolean values. ###
###########################################################
def value_from_xml(a):
for line in xfwm_data:
if a in line:
b = line.split()
c = b[3]
d = c.replace(" ", "_")
e = d.replace('"', ' ')
f = e.split()
return f[1]
def get_generic(a):
if value_from_xml('name="{0}"'.format(a)) == "true":
return True
return False
def set_generic(widget, prop):
if widget.get_active():
os.system("xfconf-query -c xfwm4 -p /general/{0} -s true".format(prop))
else:
os.system("xfconf-query -c xfwm4 -p /general/{0} -s false".format(prop))
##########################################
### Methods for 'Window Manager' page. ###
##########################################
# xfwm4 theme methods.
def get_theme():
return value_from_xml('name="theme"')
def theme_list():
themelist = []
syslist = os.listdir("/usr/share/themes")
for item in syslist:
if os.path.exists("/usr/share/themes/{0}/xfwm4/themerc".format(item)):
themelist.append(item)
if os.path.exists(os.path.expanduser("~/.themes/")):
usrlist = os.listdir(os.path.expanduser("~/.themes"))
for item in usrlist:
if os.path.exists(os.path.expanduser("~/.themes/{0}/xfwm4/themerc".format(item))):
themelist.append(item)
if os.path.exists(os.path.expanduser("/usr/local/share/themes/")):
usrlist = os.listdir(os.path.expanduser("/usr/local/share/themes"))
for item in usrlist:
if os.path.exists(os.path.expanduser("/usr/local/share/themes/{0}/xfwm4/themerc".format(item))):
themelist.append(item)
return sorted(themelist)
def theme_index():
try:
return theme_list().index(get_theme())
except ValueError as e:
print("DEBUG: Caught exception '{0}', using 'Xfwm4_Fallback' as default.".format(e))
return theme_list().index('Xfwm4_Fallback')
def set_theme(widget):
os.system("xfconf-query -c xfwm4 -p /general/theme -s {0}".format(theme_box.get_active_text()))
# Title font methods.
def get_title_font():
for line in xfwm_data:
if 'name="title_font"' in line:
a = line.replace(' <property name="title_font" type="string" value="', '')
b = a.replace('"/>', '')
return b.replace("\n", "")
def set_title_font(widget):
font_name = widget.get_font_name()
os.system('xfconf-query -c xfwm4 -p /general/title_font -s "{0}"'.format(font_name))
widget.set_title(font_name)
# Title alignment methods.
def get_title_alignment():
return value_from_xml('name="title_alignment"')
def alignment_index():
indices = {
'left': 0,
'center': 1,
'right': 2
}
a = get_title_alignment()
try:
return indices[a]
except KeyError:
print("ERROR: Alignment index out of range.")
sys.exit(1)
def set_title_alignment(widget):
indices = ['left', 'center', 'right']
a = alignment_box.get_active_text()
if a.lower() in indices:
os.system("xfconf-query -c xfwm4 -p /general/title_alignment -s {0}".format(a.lower()))
else:
print("ERROR: Alignment index out of range.")
sys.exit(1)
# Workspace count methods:
def get_workspace_count():
return int(value_from_xml('name="workspace_count"'))
def set_workspace_count(button):
os.system("xfconf-query -c xfwm4 -p /general/workspace_count -s {0}".format(str(int(button.get_value()))))
# Click to focus methods.
def get_click_to_focus():
return value_from_xml('name="click_to_focus"')
def set_click_to_focus(button, value):
if button.get_active():
os.system("xfconf-query -c xfwm4 -p /general/click_to_focus -s {0}".format(value))
if value == "false":
focus_delay_box.set_sensitive(True)
if value == "true":
focus_delay_box.set_sensitive(False)
# Focus Delay methods.
def get_focus_delay():
return int(value_from_xml('name="focus_delay"'))
def set_focus_delay(widget):
os.system("xfconf-query -c xfwm4 -p /general/focus_delay -s {0}".format(str(int(focus_delay.get_value()))))
# New window focus methods.
def get_focus_new():
return get_generic("focus_new")
def set_focus_new(widget):
set_generic(widget, "focus_new")
# Raise on focus methods.
def get_raise_on_focus():
return get_generic("raise_on_focus")
def set_raise_on_focus(widget):
set_generic(widget, "raise_on_focus")
# Methods for focus raise delay.
def get_raise_delay():
return int(value_from_xml('name="raise_delay"'))
def set_raise_delay(widget):
os.system("xfconf-query -c xfwm4 -p /general/raise_delay -s {0}".format(str(int(raise_delay.get_value()))))
# Raise on click methods.
def get_raise_on_click():
return get_generic("raise_on_click")
def set_raise_on_click(widget):
set_generic(widget, "raise_on_click")
# Window snapping methods.
def get_snap_to_border():
return get_generic("snap_to_border")
def set_snap_to_border(widget):
set_generic(widget, "snap_to_border")
def get_snap_to_windows():
return get_generic("snap_to_windows")
def set_snap_to_windows(widget):
set_generic(widget, "snap_to_windows")
def get_snap_width():
return int(value_from_xml('name="snap_width"'))
def set_snap_width(widget):
os.system("xfconf-query -c xfwm4 -p /general/snap_width -s {0}".format(str(int(snap_width.get_value()))))
# Workspace wrapping methods.
def get_wrap_workspaces():
return get_generic("wrap_workspaces")
def set_wrap_workspaces(widget):
set_generic(widget, "wrap_workspaces")
def get_wrap_windows():
return get_generic("wrap_windows")
def set_wrap_windows(widget):
set_generic(widget, "wrap_windows")
def get_wrap_resistance():
return int(value_from_xml('name="wrap_resistance"'))
def set_wrap_resistance(widget):
os.system("xfconf-query -c xfwm4 -p /general/wrap_resistance -s {0}".format(str(int(wrap_resistance.get_value()))))
# Methods for hiding window content during transformations.
def get_box_move():
return get_generic("box_move")
def set_box_move(widget):
set_generic(widget, "box_move")
def get_box_resize():
return get_generic("box_resize")
def set_box_resize(widget):
set_generic(widget, "box_resize")
# Title bar double click methods.
def get_double_click_action():
return value_from_xml('name="double_click_action"')
def set_double_click_action(button):
a = double_click_action.get_active_text()
if a == "Shade window":
b = "shade"
elif a == "Hide window":
b = "hide"
elif a == "Maximize window":
b = "maximize"
elif a == "Fill window":
b = "fill"
elif a == "Nothing":
b = "none"
else:
print("ERROR: Double click action index out of range.")
sys.exit(1)
os.system("xfconf-query -c xfwm4 -p /general/double_click_action -s {0}".format(b))
def double_click_action_index():
indices = {
'shade': 0,
'hide': 1,
'maximize': 2,
'fill': 3,
'none': 4
}
a = get_double_click_action()
try:
return indices[a]
except KeyError:
print("ERROR: Double click action index out of range.")
sys.exit(1)
###########################################
### Methods for 'Pointer Options' page. ###
###########################################
def get_acceleration():
return int(pntr_data[2].split()[2])
def get_threshold():
return int(pntr_data[2].split()[3])
def get_lefthanded():
return pntr_data[3][22] == "3"
def get_touchenable():
return pntr_data[4][22] == "0"
def get_taptoclick():
return pntr_data[7][21] == "1"
def get_vertedge():
return pntr_data[5][25] == "1"
def get_horizedge():
return pntr_data[6][26] == "1"
def get_vtwofinger():
return pntr_data[8][30] == "1"
def get_htwofinger():
return pntr_data[9][31] == "1"
# 'Apply Pointer Options' button.
def pntr_apply(widget):
acceleration_value = str(int(acceleration.get_value()))
threshold_value = str(int(threshold.get_value()))
if lefthanded.get_active():
lefthanded_value = "3 2 1"
else:
lefthanded_value = "1 2 3"
if touchenable.get_active():
touchenable_value = "0"
else:
touchenable_value = "1"
if taptoclick.get_active():
taptoclick_value = "1"
else:
taptoclick_value = "0"
if vertedge.get_active():
vertedge_value = "1"
else:
vertedge_value = "0"
if horizedge.get_active():
horizedge_value = "1"
else:
horizedge_value = "0"
if vtwofinger.get_active():
vtwofinger_value = "1"
else:
vtwofinger_value = "0"
if htwofinger.get_active():
htwofinger_value = "1"
else:
htwofinger_value = "0"
try:
with open(os.path.expanduser("~/.config/peppermint-control-center/pointer"), 'w') as conf:
conf.truncate()
conf.write("#!/bin/bash\n")
conf.write("#Autogenerated script - Do not edit\n")
conf.write("xset m {0} {1}\n".format(acceleration_value, threshold_value))
conf.write('xmodmap -e "pointer = {0}"\n'.format(lefthanded_value))
conf.write("synclient TouchPadOff={0}\n".format(touchenable_value))
conf.write("synclient VertEdgeScroll={0}\n".format(vertedge_value))
conf.write("synclient HorizEdgeScroll={0}\n".format(horizedge_value))
conf.write("synclient TapButton1={0}\n".format(taptoclick_value))
conf.write("synclient VertTwoFingerScroll={0}\n".format(vtwofinger_value))
conf.write("synclient HorizTwoFingerScroll={0}\n".format(htwofinger_value))
except:
print("ERROR: Could not save new configuration.")
sys.exit(1)
try:
os.system("ppmcc-load")
except:
print("ERROR: Could not load new configuration.")
sys.exit(1)
def get_delay():
return int(keys_data[2].split()[3])
def get_interval():
return int(keys_data[2].split()[4])
def lxkeymap(widget):
os.system("lxkeymap &")
def layoutsetter(widget):
win = SetLayout()
win.show_all()
def keys_apply(widget):
delay_value = str(int(delay.get_value()))
interval_value = str(int(interval.get_value()))
try:
with open(os.path.expanduser("~/.config/peppermint-control-center/keyboard"), 'w') as conf:
conf.truncate()
conf.write("#!/bin/bash\n")
conf.write("#Autogenerated script - Do not edit\n")
conf.write("xset r rate {0} {1}\n".format(delay_value, interval_value))
except:
print("ERROR: Could not save new configuration.")
sys.exit(1)
try:
os.system("ppmcc-load")
except:
print("ERROR: Could not load new configuration.")
sys.exit(1)
##############################################
### Methods for 'Keyboard Shortcuts' page. ###
##############################################
# Swap out encoded strings for human readable characters.
def encodereplace(a):
b = a.replace("<", "<")
c = b.replace(">", ">")
return c
# Swap out human readable characters for encoded strings.
def unencodereplace(a):
b = a.replace("<", "<")
c = b.replace(">", ">")
return c
# Format the xml data into a more human readable form.
def xmlformat(a):
spl = a.split()
name = spl[3].replace('"', ' ').split()
keys = spl[1].replace('"', ' ').split()
disp = "{0} {1}".format(name[1], keys[1])
kpad = disp.replace("KP_", "")
return encodereplace(kpad).replace("Primary", "Control")
# Match the xbindkeys format to the xfce4 format.
def bindformat(a):
b = a.replace("Alt", "<Alt>")
c = b.replace("Mod4", "<Super>")
d = c.replace("Control", "<Control>")
e = d.replace("Shift", "<Shift>")
return e.replace("+", "")
# Return the xfce4 format to native xbindkeys format.
def unbindformat(a):
b = a.replace("<Alt>", "Alt + ")
c = b.replace("<Super>", "Mod4 + ")
d = c.replace("<Control>", "Control + ")
e = d.replace("<Shift>", "Shift + ")
return e
# Return a list of all valid shortcuts in xfce4-keyboard-shortcuts.xml.
def get_xfce_shortcuts():
xfceline = []
for line in xfks_data:
if 'type="string"' in line:
try:
xfceline.append(xmlformat(line))
except:
continue
return xfceline
# This works as long as xbindkeys-config is not used and the user does
# not edit the config file. We should launch xbindkeys in Peppermint
# using a custom config file and this package should probably provide
# xbindkeys-config in the debian/control file in order to prevent any
# conflicts.
def get_xbindkeys():
bindlist = []
bindtemp = ""
bindline = []
for line in bind_data:
if line[0] == "#" or line[0] == "\n":
continue
else:
bindlist.append(line.replace("\n", ""))
for line in bindlist:
if line[0] == '"':
bindtemp = line.replace('"', '')
elif line[0] == " ":
a = line.replace(" ", "")
bindline.append(bindtemp + "\n" + bindformat(a))
else:
continue
return bindline
def edit_xfce_shortcut(selection, path, column):
global workspace
workspace = "xfce"
store = selection.get_model()
global command
command = store[path][0]
global oldcommand
oldcommand = store[path][1]
treeiter = store.get_iter(path)
shortcut = KeyboardShortcut(store, treeiter)
shortcut.show_all()
def edit_bind_shortcut(selection, path, column):
global workspace
workspace = "bind"
store = selection.get_model()
global command
command = store[path][0]
global oldcommand
oldcommand = store[path][1]
treeiter = store.get_iter(path)
shortcut = KeyboardShortcut(store, treeiter)
shortcut.show_all()
def set_xfce_shortcut(command, shortcut):
new0 = shortcut.replace("Control", "Primary")
new1 = new0.replace("Space", "space")
os.system('xfconf-query -c xfce4-keyboard-shortcuts -p "/xfwm4/custom/{0}" -r'.format(oldcommand))
os.system('xfconf-query -c xfce4-keyboard-shortcuts -p "/xfwm4/custom/{0}" -n -t string -s {1}'.format(
new1, command))
################################################################################################
### To Do: This will require more formatting as it has a bazillion possible breaking points. ###
################################################################################################
def set_bind_shortcut(command, shortcut, store):
bindlist = []
for item in store:
if command in item[0] and oldcommand in item[1]:
bindlist.append([item[0], shortcut])
continue
bindlist.append([item[0], item[1]])
with open(os.path.expanduser("~/.config/peppermint-control-center/xbindkeys.conf"), 'w') as conf:
conf.truncate()
for line in bindlist:
new0 = line[1]
new1 = new0.replace("<Control>", "Control+")
new2 = new1.replace("<Alt>", "Alt+")
new3 = new2.replace("<Super>", "Mod4+")
new4 = new3.replace("<Shift>", "Shift+")
conf.write('"{0}"\n {1}\n\n'.format(line[0], new4))
os.system("killall xbindkeys")
os.system("xbindkeys -f ~/.config/peppermint-control-center/xbindkeys.conf")
def refresh_bind_shortcuts():
bindlist = []
for item in bindstore:
bindlist.append([item[0], item[1]])
with open(os.path.expanduser("~/.config/peppermint-control-center/xbindkeys.conf"), 'w') as conf:
conf.truncate()
for line in bindlist:
new0 = line[1]
new1 = new0.replace("<Control>", "Control+")
new2 = new1.replace("<Alt>", "Alt+")
new3 = new2.replace("<Super>", "Mod4+")
new4 = new3.replace("<Shift>", "Shift+")
conf.write('"{0}"\n {1}\n\n'.format(line[0], new4))
os.system("killall xbindkeys")
os.system("xbindkeys -f ~/.config/peppermint-control-center/xbindkeys.conf")
def action_bind_new(widget, liststore):
new = NewShortcut(liststore)
new.show_all()
def action_bind_restore(widget):
action = "bind-restore"
command = "cp -f /usr/share/peppermint/peppermint-control-center/xbindkeys.conf ~/.config/peppermint-control-center/xbindkeys.conf"
confirm = AreYouSure(action, command)
confirm.show_all()
def action_xfce_restore(widget):
action = "xfce-restore"
command = ""
confirm = AreYouSure(action, command)
confirm.show_all()
###########################################
### Methods for 'Desktop Effects' page. ###
###########################################
def get_use_compositing():
return get_generic("use_compositing")
def set_use_compositing(widget):
set_generic(widget, "use_compositing")
if widget.get_active() == True:
effects_box.set_sensitive(True)
elif widget.get_active() == False:
effects_box.set_sensitive(False)
# CheckButtons
def get_unredirect_overlays():
return get_generic("unredirect_overlays")
def set_unredirect_overlays(widget):
set_generic(widget, "unredirect_overlays")
def get_sync_to_vblank():
return get_generic("sync_to_vblank")
def set_sync_to_vblank(widget):
set_generic(widget, "sync_to_vblank")
def get_show_popup_shadow():
return get_generic("show_popup_shadow")
def set_show_popup_shadow(widget):
set_generic(widget, "show_popup_shadow")
def get_show_dock_shadow():
return get_generic("show_dock_shadow")
def set_show_dock_shadow(widget):
set_generic(widget, "show_dock_shadow")
def get_show_frame_shadow():
return get_generic("show_frame_shadow")
def set_show_frame_shadow(widget):
set_generic(widget, "show_frame_shadow")
# HScales
def get_frame_opacity():
return int(value_from_xml('name="frame_opacity"'))
def set_frame_opacity(widget):
os.system("xfconf-query -c xfwm4 -p /general/frame_opacity -s {0}".format(str(int(widget.get_value()))))
def get_inactive_opacity():
return int(value_from_xml('name="inactive_opacity"'))
def set_inactive_opacity(widget):
os.system("xfconf-query -c xfwm4 -p /general/inactive_opacity -s {0}".format(str(int(widget.get_value()))))
def get_move_opacity():
return int(value_from_xml('name="move_opacity"'))
def set_move_opacity(widget):
os.system("xfconf-query -c xfwm4 -p /general/move_opacity -s {0}".format(str(int(widget.get_value()))))
def get_resize_opacity():
return int(value_from_xml('name="resize_opacity"'))
def set_resize_opacity(widget):
os.system("xfconf-query -c xfwm4 -p /general/resize_opacity -s {0}".format(str(int(widget.get_value()))))
def get_popup_opacity():
return int(value_from_xml('name="popup_opacity"'))
def set_popup_opacity(widget):
os.system("xfconf-query -c xfwm4 -p /general/popup_opacity -s {0}".format(str(int(widget.get_value()))))
####################################
### Methods for 'Advanced' page. ###
####################################
# Window cycling
def get_cycle_minimum():
return get_generic("cycle_minimum")
def set_cycle_minimum(widget):
set_generic(widget, "cycle_minimum")
def get_cycle_hidden():
return get_generic("cycle_hidden")
def set_cycle_hidden(widget):
set_generic(widget, "cycle_hidden")
def get_cycle_workspaces():
return get_generic("cycle_workspaces")
def set_cycle_workspaces(widget):
set_generic(widget, "cycle_workspaces")
def get_cycle_draw_frame():
return get_generic("cycle_draw_frame")
def set_cycle_draw_frame(widget):
set_generic(widget, "cycle_draw_frame")
# Window focus
def get_prevent_focus_stealing():
return get_generic("prevent_focus_stealing")
def set_prevent_focus_stealing(widget):
set_generic(widget, "prevent_focus_stealing")
def get_focus_hint():
return get_generic("focus_hint")
def set_focus_hint(widget):
set_generic(widget, "focus_hint")
def get_activate_action():
return value_from_xml('name="activate_action"')
def set_activate_action(button, value):
if button.get_active():
os.system("xfconf-query -c xfwm4 -p /general/activate_action -s {0}".format(value))
# Accessibility
def get_raise_with_any_button():
return get_generic("raise_with_any_button")
def set_raise_with_any_button(widget):
set_generic(widget, "raise_with_any_button")
def get_borderless_maximize():
return get_generic("borderless_maximize")
def set_borderless_maximize(widget):
set_generic(widget, "borderless_maximize")
def get_restore_on_move():
return get_generic("restore_on_move")
def set_restore_on_move(widget):
set_generic(widget, "restore_on_move")
if widget.get_active() == True:
tile_on_move.set_sensitive(True)
elif widget.get_active() == False:
tile_on_move.set_sensitive(False)
def get_tile_on_move():
return get_generic("tile_on_move")
def set_tile_on_move(widget):
set_generic(widget, "tile_on_move")
def get_snap_resist():
return get_generic("snap_resist")
def set_snap_resist(widget):
set_generic(widget, "snap_resist")
def get_urgent_blink():
return get_generic("urgent_blink")
def set_urgent_blink(widget):
set_generic(widget, "urgent_blink")
if widget.get_active() == True:
repeat_urgent_blink.set_sensitive(True)
else:
repeat_urgent_blink.set_sensitive(False)
repeat_urgent_blink.set_active(False)
set_repeat_urgent_blink_off()
def get_repeat_urgent_blink():
return get_generic("repeat_urgent_blink")
def set_repeat_urgent_blink(widget):
set_generic(widget, "repeat_urgent_blink")
def set_repeat_urgent_blink_off():
os.system("xfconf-query -c xfwm4 -p /general/repeat_urgent_blink -s false")
def get_mousewheel_rollup():
return get_generic("mousewheel_rollup")
def set_mousewheel_rollup(widget):
set_generic(widget, "mousewheel_rollup")
# Workspaces
def get_scroll_workspaces():
return get_generic("scroll_workspaces")
def set_scroll_workspaces(widget):
set_generic(widget, "scroll_workspaces")
def get_wrap_layout():
return get_generic("wrap_layout")
def set_wrap_layout(widget):
set_generic(widget, "wrap_layout")
# Window placement
def get_placement_ratio():
return int(value_from_xml("placement_ratio"))
def set_placement_ratio(widget):
os.system("xfconf-query -c xfwm4 -p /general/placement_ratio -s {0}".format(str(int(widget.get_value()))))
def get_placement_mode():
return value_from_xml("placement_mode")
def set_placement_mode(button, value):
if button.get_active():
os.system("xfconf-query -c xfwm4 -p /general/placement_mode -s {0}".format(value))
# 'Are you sure?' window.
class AreYouSure(Gtk.Window):
def close_it(self, widget):
self.close()
def confirm(self, widget, action, command):
if action == "bind-restore":
os.system(command)
os.system("killall xbindkeys")
os.system("xbindkeys -f ~/.config/peppermint-control-center/xbindkeys.conf")
bindview.set_model(None)
bindstore.clear()
get_bind = get_xbindkeys()
for line in get_bind:
a = line.split("\n")
bindstore.append([a[0], a[1]])
bindview.set_model(bindstore)
self.close()
elif action == "xfce-restore":
os.system('zenity --error --text "This feature has not yet been implemented."')
elif action == "delete":
bindstore.remove(command)
refresh_bind_shortcuts()
self.close()
def __init__(self, action, command):
Gtk.Window.__init__(self, title="Confirm")
self.set_size_request(250, 120)
self.set_icon_from_file("/usr/share/pixmaps/peppermint-control-center.png")
label = Gtk.Label()
label.set_markup("<b>Are you sure you want to perform this action?</b>")
buttons = Gtk.HBox()
void = Gtk.Label()
no = Gtk.Button.new_from_stock(Gtk.STOCK_NO)
no.connect("clicked", self.close_it)
yes = Gtk.Button.new_from_stock(Gtk.STOCK_YES)
yes.connect("clicked", self.confirm, action, command)
buttons.pack_start(void, True, True, 0)
buttons.pack_start(no, False, False, 10)
buttons.pack_start(yes, False, False, 0)
vbox = Gtk.VBox()
vbox.pack_start(label, True, True, 0)
vbox.pack_start(buttons, False, False, 10)
hbox = Gtk.HBox()
hbox.pack_start(vbox, True, True, 10)
self.add(hbox)
class SetLayout(Gtk.Window):
def close_it(self, widget):
self.close()
def revert_it(self, widget, command):
execute = "setxkbmap " + command
os.system(execute)
os.system('touch ~/.config/peppermint-control-center/keyboard_layout')
os.system('chmod +x ~/.config/peppermint-control-center/keyboard_layout')
os.system('echo "#!/bin/sh\n{0}" > ~/.config/peppermint-control-center/keyboard_layout'.format(execute))
def getlist(self):
layoutfile = ET.parse("/usr/share/X11/xkb/rules/base.xml")
root = layoutfile.getroot()
layoutlist = []
for child in root.iter('layout'):
name = ""
sublist = []
for item in child.findall('configItem'):
name = item.find('name').text
command = name
desc = item.find('description').text
sublist.append([command, desc])
for variantList in child.findall('variantList'):
for variant in variantList.findall('variant'):
for item in variant.findall('configItem'):
varname = item.find('name').text
command = name + " " + varname
vardesc = item.find('description').text
sublist.append([command, vardesc])
layoutlist.append([name, sublist])
return sorted(layoutlist)
def fill_variants(self, widget):
variantstore.clear()
layouts = self.getlist()
select = layoutview.get_selection()
model, treeiter = select.get_selected()
for layout in layouts:
if layout[0] == model[treeiter][0]:
for variant in layout[1]:
variantstore.append([variant[0], variant[1]])
x = 0
for obj in variantstore:
if str(obj[0]) == cm:
variantview.set_cursor(x)
variantview.scroll_to_cell(x)
x += 1
def set_layout(self, widget, var):
try:
select = variantview.get_selection()
model, treeiter = select.get_selected()
execute = "setxkbmap {0}".format(model[treeiter][0])
os.system(execute)
os.system('touch ~/.config/peppermint-control-center/keyboard_layout')
os.system('chmod +x ~/.config/peppermint-control-center/keyboard_layout')
os.system('echo "#!/bin/sh\n{0}" > ~/.config/peppermint-control-center/keyboard_layout'.format(execute))
except:
pass
def get_current(self):
command = ""
cl = subprocess.Popen('setxkbmap -query | grep layout', shell=True, stdout=subprocess.PIPE)
curlay = str(cl.stdout.read())
if "layout:" in curlay:
cl1 = curlay.split()
current_layout = cl1[1].replace("\\n'", "")
command = command + current_layout
cv = subprocess.Popen('setxkbmap -query | grep variant', shell=True, stdout=subprocess.PIPE)
curvar = str(cv.stdout.read())
if "variant:" in curvar:
cv1 = curvar.split()
current_variant = cv1[1].replace("\\n'", "")
command = "{0} {1}".format(command, current_variant)
return current_layout, command
def __init__(self):
Gtk.Window.__init__(self, title="Select Keyboard Layout")
self.set_size_request(600, 480)
self.set_icon_from_file("/usr/share/pixmaps/peppermint-control-center.png")
global cl, cm
cl, cm = self.get_current()
oldcommand = cm
global layoutstore
layoutstore = Gtk.ListStore(str)
global variantstore
variantstore = Gtk.ListStore(str, str)
global layoutview
layoutview = Gtk.TreeView(layoutstore)
layout = Gtk.CellRendererText()
layoutcolumn = Gtk.TreeViewColumn("Layout", layout, text=0)
layoutview.append_column(layoutcolumn)
global variantview
variantview = Gtk.TreeView(variantstore)
variant = Gtk.CellRendererText()
description = Gtk.CellRendererText()
variantcolumn = Gtk.TreeViewColumn("Variant")
variantcolumn.pack_start(variant, True)
variantcolumn.pack_start(description, True)
variantcolumn.add_attribute(variant, "text", 0)
variantcolumn.add_attribute(description, "text", 1)
variantview.append_column(variantcolumn)
layouts = self.getlist()
for layout in layouts:
layoutstore.append([layout[0]])
select = layoutview.get_selection()
select.connect("changed", self.fill_variants)
x = 0
for obj in layoutstore:
if str(obj[0]) == cl:
layoutview.set_cursor(x)
layoutview.scroll_to_cell(x)
x += 1
layoutbox = Gtk.HBox()
layoutscroll = Gtk.ScrolledWindow()
variantscroll = Gtk.ScrolledWindow()
layoutscroll.add(layoutview)
variantscroll.add(variantview)
layoutbox.pack_start(layoutscroll, True, True, 0)
layoutbox.pack_start(variantscroll, True, True, 5)
entryt = Gtk.Entry()
entryt.set_width_chars(35)
entryt.set_placeholder_text("Type here to test the layout.")
void = Gtk.Label()
applyb = Gtk.Button.new_from_stock(Gtk.STOCK_APPLY)
applyb.connect("clicked", self.set_layout, "us")
revert = Gtk.Button.new_from_stock(Gtk.STOCK_REVERT_TO_SAVED)
revert.connect("clicked", self.revert_it, oldcommand)
cancel = Gtk.Button.new_from_stock(Gtk.STOCK_CLOSE)
cancel.connect("clicked", self.close_it)
buttons = Gtk.HBox()
buttons.pack_start(entryt, False, False, 0)
buttons.pack_start(void, True, True, 0)
buttons.pack_start(applyb, False, False, 0)
buttons.pack_start(revert, False, False, 10)
buttons.pack_start(cancel, False, False, 0)
vbox = Gtk.VBox()
vbox.pack_start(layoutbox, True, True, 0)
vbox.pack_start(buttons, False, False, 10)
hbox = Gtk.HBox()
hbox.pack_start(vbox, True, True, 10)
self.add(hbox)
# Add new shortcut window.
class NewShortcut(Gtk.Window):
def close_it(self, widget):
self.close()
def add_shortcut(self, widget, liststore):
global oldcommand
oldcommand = ""
global command
command = newcommand.get_text()
global workspace
workspace = "bind"
liststore.prepend([newcommand.get_text(), ""])
treeiter = liststore.get_iter(0)
print(treeiter)
print(newcommand.get_text())
print(liststore[0][0])
self.close()
a = KeyboardShortcut(liststore, treeiter)
a.show_all()
def __init__(self, liststore):
Gtk.Window.__init__(self, title="Add New Shortcut")
self.set_size_request(250, 120)
self.set_icon_from_file("/usr/share/pixmaps/peppermint-control-center.png")
label = Gtk.Label("Enter the command to execute:")
global newcommand
newcommand = Gtk.Entry()
buttons = Gtk.HBox()
button_void = Gtk.Label()
button_okay = Gtk.Button.new_from_stock(Gtk.STOCK_OK)
button_okay.connect("clicked", self.add_shortcut, liststore)
button_cancel = Gtk.Button.new_from_stock(Gtk.STOCK_CANCEL)
button_cancel.connect("clicked", self.close_it)
buttons.pack_start(button_void, True, True, 0)
buttons.pack_start(button_cancel, False, False, 10)
buttons.pack_start(button_okay, False, False, 0)
vbox = Gtk.VBox()
vbox.pack_start(label, True, True, 10)
vbox.pack_start(newcommand, False, False, 0)
vbox.pack_start(buttons, False, False, 10)
hbox = Gtk.HBox()
hbox.pack_start(vbox, True, True, 10)
self.add(hbox)
# Keyboard Shortcut selection window.
class KeyboardShortcut(Gtk.Window):
def close_it(self, widget):
self.close()
def action_bind_del(self, widget, store, treeiter):
action = "delete"
command = treeiter
self.close()
confirm = AreYouSure(action, command)
confirm.show_all()
def set_shortcut(self, widget, store, treeiter):
newkey = keybox_entry.get_text()
array = ["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", "F12", "`", "1", "2", "3", "4", "5", "6",
"7", "8", "9", "0", "-", "=", "q", "w", "e", "r", "t", "y", "u", "i", "o", "p", "[", "]", "a", "s",
"d", "f", "g", "h", "j", "k", "l", ";", "'", "z", "x", "c", "v", "b", "n", "m", ",", ".", "/", "\\",
"Space", "Tab", "Up", "Down", "Left", "Right"]
newshortcut = ""
if newkey == " ":
newkey = "Space"
if newkey in array:
if _shift.get_active():
newshortcut = newshortcut + "<Shift>"
if _control.get_active():
newshortcut = newshortcut + "<Control>"
if _super.get_active():
newshortcut = newshortcut + "<Super>"
if _alt.get_active():
newshortcut = newshortcut + "<Alt>"
if _shift.get_active() == False and _control.get_active() == False and _super.get_active() == False and \
_alt.get_active() == False:
assign_void.set_markup("<b>Specify modifier(s)</b>")
else:
newshortcut = newshortcut + newkey
print(newshortcut)
store.set_value(treeiter, 1, newshortcut)
if workspace == "xfce":
set_xfce_shortcut(command, newshortcut)
elif workspace == "bind":
set_bind_shortcut(command, newshortcut, store)
else:
print("ERROR: Unknown workspace type.")
sys.exit(1)
self.close()
else:
assign_void.set_markup("<b>Invalid character</b>")
def __init__(self, store, treeiter):
Gtk.Window.__init__(self, title="Assign New Shortcut")
self.set_size_request(250, 120)
self.set_icon_from_file("/usr/share/pixmaps/peppermint-control-center.png")
buttons = Gtk.HBox()
global _shift
_shift = Gtk.ToggleButton("Shift")
global _control
_control = Gtk.ToggleButton("Control")
global _super
_super = Gtk.ToggleButton("Super")
global _alt
_alt = Gtk.ToggleButton("Alt")
buttons.pack_start(_shift, True, True, 5)
buttons.pack_start(_control, True, True, 5)
buttons.pack_start(_super, True, True, 5)
buttons.pack_start(_alt, True, True, 5)
keybox = Gtk.HBox()
keybox_label = Gtk.Label("Enter a valid character:")
keybox_space = Gtk.Label(" ")
global keybox_entry
keybox_entry = Gtk.Entry()
keybox.pack_start(keybox_label, False, False, 0)
keybox.pack_start(keybox_space, False, False, 5)
keybox.pack_start(keybox_entry, True, True, 0)
assign = Gtk.HBox()
global assign_void
assign_void = Gtk.Label()
bind_delete = Gtk.Button.new_from_stock(Gtk.STOCK_DELETE)
bind_delete.connect("clicked", self.action_bind_del, store, treeiter)
assign_cancel = Gtk.Button.new_from_stock(Gtk.STOCK_CANCEL)
assign_cancel.connect("clicked", self.close_it)
assign_apply = Gtk.Button.new_from_stock(Gtk.STOCK_APPLY)
assign_apply.connect("clicked", self.set_shortcut, store, treeiter)
assign.pack_start(assign_void, True, True, 0)
assign.pack_start(bind_delete, False, False, 0)
assign.pack_start(assign_cancel, False, False, 5)
assign.pack_start(assign_apply, False, False, 0)
shortvbox = Gtk.VBox()
shortvbox.pack_start(buttons, True, False, 5)
shortvbox.pack_start(keybox, True, False, 5)
shortvbox.pack_start(assign, True, False, 5)
shorthbox = Gtk.HBox()
shorthbox.pack_start(shortvbox, True, True, 10)
self.add(shorthbox)
# Window and window layout.
class ControlCenter(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Peppermint Control Center")
self.set_size_request(600, 500)
self.set_icon_from_file("/usr/share/pixmaps/peppermint-control-center.png")
##############################
### 'Window Manager' page. ###
##############################
page_wman_lab = Gtk.Label("Window Manager")
page_wman = Gtk.VBox()
page_wman_hbox = Gtk.HBox()
page_wman_hbox.pack_start(page_wman, True, True, 10)
scroll_wman = Gtk.ScrolledWindow()
scroll_wman.add(page_wman_hbox)
theme_label = Gtk.Label()
theme_label.set_markup("<b>Theme</b>")
theme_void = Gtk.Label(" ")
theme_label_wrapper = Gtk.HBox()
theme_label_wrapper.pack_start(theme_label, False, False, 10)
theme_label_wrapper.pack_start(theme_void, True, True, 0)
global theme_box
theme_box = Gtk.ComboBoxText()
theme_box.connect("changed", set_theme)
for theme in theme_list():
theme_box.append_text(theme)
theme_box.set_active(theme_index())
theme_box_wrapper = Gtk.HBox()
theme_box_wrapper.pack_start(theme_box, True, True, 20)
title_font_label = Gtk.Label()
title_font_label.set_markup("<b>Title font</b>")
title_font_void = Gtk.Label(" ")
title_font_label_wrapper = Gtk.HBox()
title_font_label_wrapper.pack_start(title_font_label, False, False, 10)
title_font_label_wrapper.pack_start(title_font_void, True, True, 0)
global title_font
title_font = Gtk.FontButton()
title_font.set_font_name(get_title_font())
title_font.connect("font-set", set_title_font)
title_font_wrapper = Gtk.HBox()
title_font_wrapper.pack_start(title_font, True, True, 20)
title_alignment_label = Gtk.Label()
title_alignment_label.set_markup("<b>Title alignment</b>")
title_alignment_void = Gtk.Label(" ")
title_alignment_label_wrapper = Gtk.HBox()
title_alignment_label_wrapper.pack_start(title_alignment_label, False, False, 10)
title_alignment_label_wrapper.pack_start(title_alignment_void, True, True, 0)
global alignment_box
alignment_box = Gtk.ComboBoxText()
alignment_box.connect("changed", set_title_alignment)
alignment_box.append_text("Left")
alignment_box.append_text("Center")
alignment_box.append_text("Right")
alignment_box.set_active(alignment_index())
alignment_box_wrapper = Gtk.HBox()
alignment_box_wrapper.pack_start(alignment_box, True, True, 20)
workspace_count_label = Gtk.Label()
workspace_count_label.set_markup("<b>Number of workspaces:</b>")
workspace_count_adj = Gtk.Adjustment(0, 1, 100, 1, 10, 0)
global workspace_count
workspace_count = Gtk.SpinButton()
workspace_count.set_adjustment(workspace_count_adj)
workspace_count.set_value(get_workspace_count())
workspace_count.set_numeric(True)
workspace_count.connect("changed", set_workspace_count)
workspace_count_box = Gtk.HBox()
workspace_count_box.pack_start(workspace_count_label, False, False, 10)
workspace_count_box.pack_start(workspace_count, False, False, 10)
global focus_delay_box
focus_delay_box = Gtk.HBox()
focus_delay_wrapper = Gtk.HBox()
focus_delay_wrapper.pack_start(focus_delay_box, True, True, 50)
click_to_focus_label_box = Gtk.HBox()
click_to_focus_label = Gtk.Label()
click_to_focus_label.set_markup("<b>Focus model</b>")
click_to_focus_void = Gtk.Label(" ")
click_to_focus_label_box.pack_start(click_to_focus_label, False, False, 10)
click_to_focus_label_box.pack_start(click_to_focus_void, True, True, 0)
ctf_button1 = Gtk.RadioButton.new_with_label_from_widget(None, "Click to focus")
ctf_button1.connect("toggled", set_click_to_focus, "true")
if get_click_to_focus() == "true":
ctf_button1.set_active(True)
ctf_button2 = Gtk.RadioButton.new_from_widget(ctf_button1)
ctf_button2.set_label("Focus follows mouse")
ctf_button2.connect("toggled", set_click_to_focus, "false")
if get_click_to_focus() == "false":
ctf_button2.set_active(True)
click_to_focus = Gtk.HBox()
click_to_focus.pack_start(ctf_button1, True, True, 30)
click_to_focus.pack_start(ctf_button2, True, True, 30)
focus_delay_label_box = Gtk.HBox()
focus_delay_label = Gtk.Label("Delay before window receives focus:")
focus_delay_void = Gtk.Label(" ")
focus_delay_label_box.pack_start(focus_delay_label, False, False, 30)
focus_delay_label_box.pack_start(focus_delay_void, True, True, 0)
focus_delay_short = Gtk.Label()
focus_delay_short.set_markup("<i>Short</i>")
focus_delay_long = Gtk.Label()
focus_delay_long.set_markup("<i>Long</i>")
global focus_delay
focus_delay = Gtk.HScale()
focus_delay.set_range(5, 2000)
focus_delay.set_increments(1, 1)
focus_delay.set_digits(0)
focus_delay.set_value(get_focus_delay())
focus_delay.connect("value-changed", set_focus_delay)
focus_delay_box.pack_start(focus_delay_short, False, False, 0)
focus_delay_box.pack_start(focus_delay, True, True, 10)
focus_delay_box.pack_start(focus_delay_long, False, False, 0)
if ctf_button1.get_active():
focus_delay_box.set_sensitive(False)
focus_new_label_box = Gtk.HBox()
focus_new_label = Gtk.Label()
focus_new_label.set_markup("<b>New window focus</b>")
focus_new_void = Gtk.Label(" ")
focus_new_label_box.pack_start(focus_new_label, False, False, 10)
focus_new_label_box.pack_start(focus_new_void, True, True, 0)
global focus_new
focus_new = Gtk.CheckButton("Automatically give focus to newly created windows")
focus_new.set_active(get_focus_new())
focus_new.connect("toggled", set_focus_new)
focus_new_wrapper = Gtk.HBox()
focus_new_wrapper.pack_start(focus_new, True, True, 30)
global raise_delay_box
raise_delay_box = Gtk.HBox()
raise_delay_wrapper = Gtk.HBox()
raise_delay_wrapper.pack_start(raise_delay_box, True, True, 50)
raise_on_focus_label_box = Gtk.HBox()
raise_on_focus_label = Gtk.Label()
raise_on_focus_label.set_markup("<b>Raise on focus</b>")
raise_on_focus_void = Gtk.Label(" ")
raise_on_focus_label_box.pack_start(raise_on_focus_label, False, False, 10)
raise_on_focus_label_box.pack_start(raise_on_focus_void, True, True, 0)
global raise_on_focus
raise_on_focus = Gtk.CheckButton("Automatically raise windows when they receive focus")
raise_on_focus.set_active(get_raise_on_focus())
raise_on_focus.connect("toggled", set_raise_on_focus)
raise_on_focus_wrapper = Gtk.HBox()
raise_on_focus_wrapper.pack_start(raise_on_focus, True, True, 30)
raise_delay_label_box = Gtk.HBox()
raise_delay_label_wrapper = Gtk.HBox()
raise_delay_label_wrapper.pack_start(raise_delay_label_box, True, True, 30)
raise_delay_label = Gtk.Label("Delay before raising focused window:")
raise_delay_void = Gtk.Label(" ")
raise_delay_label_box.pack_start(raise_delay_label, False, False, 0)
raise_delay_label_box.pack_start(raise_delay_void, True, True, 0)
raise_delay_short = Gtk.Label()
raise_delay_short.set_markup("<i>Short</i>")
raise_delay_long = Gtk.Label()
raise_delay_long.set_markup("<i>Long</i>")
global raise_delay
raise_delay = Gtk.HScale()
raise_delay.set_range(5, 2000)
raise_delay.set_increments(1, 1)
raise_delay.set_digits(0)
raise_delay.set_value(get_raise_delay())
raise_delay.connect("value-changed", set_raise_delay)
raise_delay_box.pack_start(raise_delay_short, False, False, 0)
raise_delay_box.pack_start(raise_delay, True, True, 10)
raise_delay_box.pack_start(raise_delay_long, False, False, 0)
raise_on_click_label_box = Gtk.HBox()
raise_on_click_label = Gtk.Label()
raise_on_click_label.set_markup("<b>Raise on click</b>")
raise_on_click_void = Gtk.Label(" ")
raise_on_click_label_box.pack_start(raise_on_click_label, False, False, 10)
raise_on_click_label_box.pack_start(raise_on_click_void, True, True, 0)
global raise_on_click
raise_on_click = Gtk.CheckButton("Raise window when clicking inside an application window")
raise_on_click.set_active(get_raise_on_click())
raise_on_click.connect("toggled", set_raise_on_click)
raise_on_click_wrapper = Gtk.HBox()
raise_on_click_wrapper.pack_start(raise_on_click, True, True, 30)
snap_label_box = Gtk.HBox()
snap_label = Gtk.Label()
snap_label.set_markup("<b>Window Snapping</b>")
snap_void = Gtk.Label(" ")
snap_label_box.pack_start(snap_label, False, False, 10)
snap_label_box.pack_start(snap_void, True, True, 0)
global snap_to_border
snap_to_border = Gtk.CheckButton("To screen borders")
snap_to_border.set_active(get_snap_to_border())
snap_to_border.connect("toggled", set_snap_to_border)
global snap_to_windows
snap_to_windows = Gtk.CheckButton("To other windows")
snap_to_windows.set_active(get_snap_to_windows())
snap_to_windows.connect("toggled", set_snap_to_windows)
snap_to_wrapper = Gtk.HBox()
snap_to_wrapper.pack_start(snap_to_border, True, True, 30)
snap_to_wrapper.pack_start(snap_to_windows, True, True, 30)
global snap_width_box
snap_width_box = Gtk.HBox()
snap_width_wrapper = Gtk.HBox()
snap_width_wrapper.pack_start(snap_width_box, True, True, 50)
snap_width_label = Gtk.Label("Distance:")
snap_width_void = Gtk.Label(" ")
snap_width_label_wrapper = Gtk.HBox()
snap_width_label_wrapper.pack_start(snap_width_label, False, False, 30)
snap_width_label_wrapper.pack_start(snap_width_void, True, True, 0)
snap_width_small = Gtk.Label()
snap_width_small.set_markup("<i>Small</i>")
snap_width_wide = Gtk.Label()
snap_width_wide.set_markup("<i>Wide</i>")
global snap_width
snap_width = Gtk.HScale()
snap_width.set_range(5, 100)
snap_width.set_increments(1, 1)
snap_width.set_digits(0)
snap_width.set_value(get_snap_width())
snap_width.connect("value-changed", set_snap_width)
snap_width_box.pack_start(snap_width_small, False, False, 0)
snap_width_box.pack_start(snap_width, True, True, 10)
snap_width_box.pack_start(snap_width_wide, False, False, 0)
wrap_label_box = Gtk.HBox()
wrap_label = Gtk.Label()
wrap_label.set_markup("<b>Wrap workspaces when reaching the screen edge</b>")
wrap_void = Gtk.Label(" ")
wrap_label_box.pack_start(wrap_label, False, False, 10)
wrap_label_box.pack_start(wrap_void, True, True, 0)
global wrap_workspaces
wrap_workspaces = Gtk.CheckButton("With the mouse pointer")
wrap_workspaces.set_active(get_wrap_workspaces())
wrap_workspaces.connect("toggled", set_wrap_workspaces)
global wrap_windows
wrap_windows = Gtk.CheckButton("To other windows")
wrap_windows.set_active(get_wrap_windows())
wrap_windows.connect("toggled", set_wrap_windows)
wrap_type_wrapper = Gtk.HBox()
wrap_type_wrapper.pack_start(wrap_workspaces, True, True, 30)
wrap_type_wrapper.pack_start(wrap_windows, True, True, 30)
global wrap_resistance_box
wrap_resistance_box = Gtk.HBox()
wrap_resistance_wrapper = Gtk.HBox()
wrap_resistance_wrapper.pack_start(wrap_resistance_box, True, True, 50)
wrap_resistance_label = Gtk.Label("Edge Resistance:")
wrap_resistance_void = Gtk.Label(" ")
wrap_resistance_label_wrapper = Gtk.HBox()
wrap_resistance_label_wrapper.pack_start(wrap_resistance_label, False, False, 30)
wrap_resistance_label_wrapper.pack_start(wrap_resistance_void, True, True, 0)
wrap_resistance_small = Gtk.Label()
wrap_resistance_small.set_markup("<i>Small</i>")
wrap_resistance_wide = Gtk.Label()
wrap_resistance_wide.set_markup("<i>Wide</i>")
global wrap_resistance
wrap_resistance = Gtk.HScale()
wrap_resistance.set_range(5, 100)
wrap_resistance.set_increments(1, 1)
wrap_resistance.set_digits(0)
wrap_resistance.set_value(get_wrap_resistance())
wrap_resistance.connect("value-changed", set_wrap_resistance)
wrap_resistance_box.pack_start(wrap_resistance_small, False, False, 0)
wrap_resistance_box.pack_start(wrap_resistance, True, True, 10)
wrap_resistance_box.pack_start(wrap_resistance_wide, False, False, 0)
box_label_box = Gtk.HBox()
box_label = Gtk.Label()
box_label.set_markup("<b>Hide content of windows</b>")
box_void = Gtk.Label(" ")
box_label_box.pack_start(box_label, False, False, 10)
box_label_box.pack_start(box_void, True, True, 0)
global box_move
box_move = Gtk.CheckButton("When moving")
box_move.set_active(get_box_move())
box_move.connect("toggled", set_box_move)
global box_resize
box_resize = Gtk.CheckButton("When resizing")
box_resize.set_active(get_box_resize())
box_resize.connect("toggled", set_box_resize)
box_type_wrapper = Gtk.HBox()
box_type_wrapper.pack_start(box_move, True, True, 30)
box_type_wrapper.pack_start(box_resize, True, True, 30)
double_click_label_box = Gtk.HBox()
double_click_label = Gtk.Label()
double_click_label.set_markup("<b>Title bar double click action</b>")
double_click_void = Gtk.Label(" ")
double_click_label_box.pack_start(double_click_label, False, False, 10)
double_click_label_box.pack_start(double_click_void, True, True, 0)
global double_click_action
double_click_action = Gtk.ComboBoxText()
double_click_action.connect("changed", set_double_click_action)
double_click_action.append_text("Shade window")
double_click_action.append_text("Hide window")
double_click_action.append_text("Maximize window")
double_click_action.append_text("Fill window")
double_click_action.append_text("Nothing")
double_click_action.set_active(double_click_action_index())
double_click_box = Gtk.HBox()
double_click_box.pack_start(double_click_action, True, True, 30)
wmimage = Gtk.Image()
wmimage.set_from_file("/usr/lib/peppermint/peppermint-control-center/images/wm.png") # temporary
wmimage_label = Gtk.Label("Configure window manager\nbehavior and options")
wmimage_label.set_justify(Gtk.Justification.CENTER)
box_wmimage = Gtk.VBox()
box_wmimage.pack_start(wmimage, True, True, 10)
box_wmimage.pack_start(wmimage_label, True, True, 0)
box_theme = Gtk.VBox()
box_theme.pack_start(theme_label_wrapper, False, False, 4)
box_theme.pack_start(theme_box_wrapper, False, False, 4)
box_theme.pack_start(title_font_label_wrapper, False, False, 4)
box_theme.pack_start(title_font_wrapper, False, False, 4)
box_theme.pack_start(title_alignment_label_wrapper, False, False, 4)
box_theme.pack_start(alignment_box_wrapper, False, False, 4)
box_theme.pack_start(workspace_count_box, False, False, 10)
box_top = Gtk.HBox()
box_top.pack_start(box_wmimage, False, False, 30)
box_top.pack_start(box_theme, True, True, 0)
box_focus = Gtk.VBox()
box_focus.pack_start(click_to_focus_label_box, False, False, 4)
box_focus.pack_start(click_to_focus, False, False, 4)
box_focus.pack_start(focus_delay_label_box, False, False, 4)
box_focus.pack_start(focus_delay_wrapper, False, False, 4)
box_focus_new = Gtk.VBox()
box_focus_new.pack_start(focus_new_label_box, False, False, 4)
box_focus_new.pack_start(focus_new_wrapper, False, False, 4)
box_raise = Gtk.VBox()
box_raise.pack_start(raise_on_focus_label_box, False, False, 4)
box_raise.pack_start(raise_on_focus_wrapper, False, False, 4)
box_raise.pack_start(raise_delay_label_wrapper, False, False, 4)
box_raise.pack_start(raise_delay_wrapper, False, False, 4)
box_click = Gtk.VBox()
box_click.pack_start(raise_on_click_label_box, False, False, 4)
box_click.pack_start(raise_on_click_wrapper, False, False, 4)
box_snap = Gtk.VBox()
box_snap.pack_start(snap_label_box, False, False, 4)
box_snap.pack_start(snap_to_wrapper, False, False, 4)
box_snap.pack_start(snap_width_label_wrapper, False, False, 4)
box_snap.pack_start(snap_width_wrapper, False, False, 4)
box_wrap = Gtk.VBox()
box_wrap.pack_start(wrap_label_box, False, False, 4)
box_wrap.pack_start(wrap_type_wrapper, False, False, 4)
box_wrap.pack_start(wrap_resistance_label_wrapper, False, False, 4)
box_wrap.pack_start(wrap_resistance_wrapper, False, False, 4)
box_box = Gtk.VBox()
box_box.pack_start(box_label_box, False, False, 4)
box_box.pack_start(box_type_wrapper, False, False, 4)
box_dclick = Gtk.VBox()
box_dclick.pack_start(double_click_label_box, False, False, 4)
box_dclick.pack_start(double_click_box, False, False, 4)
page_wman.pack_start(box_top, True, True, 6)
page_wman.pack_start(box_focus, True, True, 4)
page_wman.pack_start(box_focus_new, True, True, 4)
page_wman.pack_start(box_raise, True, True, 4)
page_wman.pack_start(box_click, True, True, 4)
page_wman.pack_start(box_snap, True, True, 4)
page_wman.pack_start(box_wrap, True, True, 4)
page_wman.pack_start(box_box, True, True, 4)
page_wman.pack_start(box_dclick, True, True, 4)
###############################
### 'Pointer Options' page. ###
###############################
page_point_lab = Gtk.Label("Keyboard & Pointer")
page_point = Gtk.VBox()
scroll_point = Gtk.ScrolledWindow()
scroll_point.add(page_point)
keyopt_label = Gtk.Label()
keyopt_label.set_markup("<b>Keyboard options</b>")
keyopt_void = Gtk.Label(" ")
keyopt_label_box = Gtk.HBox()
keyopt_label_box.pack_start(keyopt_label, False, False, 10)
keyopt_label_box.pack_start(keyopt_void, True, True, 0)
delay_label = Gtk.Label("Delay:")
global delay
delay = Gtk.HScale()
delay.set_range(10, 1000)
delay.set_increments(1, 1)
delay.set_digits(0)
try:
delay.set_value(get_delay())
except:
delay.set_value(500)
delay.connect("value-changed", keys_apply)
delay_label_box = Gtk.HBox()
delay_label_box.pack_start(delay_label, False, False, 20)
delay_label_box.pack_start(delay, True, True, 0)
interval_label = Gtk.Label("Interval:")
global interval
interval = Gtk.HScale()
interval.set_range(1, 50)
interval.set_increments(1, 1)
interval.set_digits(0)
try:
interval.set_value(get_interval())
except:
interval.set_value(20)
interval.connect("value-changed", keys_apply)
interval_label_box = Gtk.HBox()
interval_label_box.pack_start(interval_label, False, False, 20)
interval_label_box.pack_start(interval, True, True, 0)
kbimage = Gtk.Image()
kbimage.set_from_file("/usr/lib/peppermint/peppermint-control-center/images/keyboard.png")
layout_button = Gtk.Button("Select Keyboard Layout")
layout_button.connect("clicked", layoutsetter)
layout_button_left = Gtk.Label(" ")
layout_button_right = Gtk.Label(" ")
layout_button_box = Gtk.HBox()
layout_button_box.pack_start(layout_button_left, True, True, 30)
layout_button_box.pack_start(layout_button, True, True, 0)
layout_button_box.pack_start(layout_button_right, True, True, 30)
pntopt_label = Gtk.Label()
pntopt_label.set_markup("<b>Pointer options</b>")
pntopt_void = Gtk.Label(" ")
pntopt_label_box = Gtk.HBox()
pntopt_label_box.pack_start(pntopt_label, False, False, 10)
pntopt_label_box.pack_start(pntopt_void, True, True, 0)
acceleration_label = Gtk.Label("Acceleration:")
global acceleration
acceleration = Gtk.HScale()
acceleration.set_range(0, 10)
acceleration.set_increments(1, 1)
acceleration.set_digits(0)
try:
acceleration.set_value(get_acceleration())
except:
acceleration.set_value(2)
acceleration.connect("value-changed", pntr_apply)
acceleration_label_box = Gtk.HBox()
acceleration_label_box.pack_start(acceleration_label, False, False, 20)
acceleration_label_box.pack_start(acceleration, True, True, 0)
threshold_label = Gtk.Label("Threshold")
global threshold
threshold = Gtk.HScale()
threshold.set_range(0, 10)
threshold.set_increments(1, 1)
threshold.set_digits(0)
try:
threshold.set_value(get_threshold())
except:
threshold.set_value(2)
threshold.connect("value-changed", pntr_apply)
threshold_label_box = Gtk.HBox()
threshold_label_box.pack_start(threshold_label, False, False, 20)
threshold_label_box.pack_start(threshold, True, True, 0)
pntimage = Gtk.Image()
pntimage.set_from_file("/usr/lib/peppermint/peppermint-control-center/images/mouse.png")
global lefthanded
lefthanded = Gtk.CheckButton("Left handed button layout")
lefthanded.set_active(get_lefthanded())
lefthanded.connect("toggled", pntr_apply)
lefthanded_left = Gtk.Label(" ")
lefthanded_right = Gtk.Label(" ")
lefthanded_box = Gtk.HBox()
lefthanded_box.pack_start(lefthanded_left, True, True, 0)
lefthanded_box.pack_start(lefthanded, False, False, 0)
lefthanded_box.pack_start(lefthanded_right, True, True, 0)
touch_label = Gtk.Label()
touch_label.set_markup("<b>Touchpad options</b>")
touch_void = Gtk.Label(" ")
touch_label_box = Gtk.HBox()
touch_label_box.pack_start(touch_label, False, False, 30)
touch_label_box.pack_start(touch_void, True, True, 0)
global touchenable
touchenable = Gtk.CheckButton("Touchpad Enabled")
touchenable.set_active(get_touchenable())
touchenable.connect("toggled", pntr_apply)
global taptoclick
taptoclick = Gtk.CheckButton("Tap-to-Click Enabled")
taptoclick.set_active(get_taptoclick())
taptoclick.connect("toggled", pntr_apply)
global vertedge
vertedge = Gtk.CheckButton("Vertical Edge Scrolling")
vertedge.set_active(get_vertedge())
vertedge.connect("toggled", pntr_apply)
global horizedge
horizedge = Gtk.CheckButton("Horizontal Edge Scrolling")
horizedge.set_active(get_horizedge())
horizedge.connect("toggled", pntr_apply)
global vtwofinger
vtwofinger = Gtk.CheckButton("Two Finger Vertical Scrolling")
vtwofinger.set_active(get_vtwofinger())
vtwofinger.connect("toggled", pntr_apply)
global htwofinger
htwofinger = Gtk.CheckButton("Two Finger Horizontal Scrolling")
htwofinger.set_active(get_htwofinger())
htwofinger.connect("toggled", pntr_apply)
kbimage_box = Gtk.VBox()
kbimage_box.pack_start(kbimage, True, True, 20)
kboptions_box = Gtk.VBox()
kboptions_box.pack_start(keyopt_label_box, False, False, 4)
kboptions_box.pack_start(delay_label_box, False, False, 4)
kboptions_box.pack_start(interval_label_box, False, False, 4)
kboptions_box.pack_start(layout_button_box, False, False, 8)
kb_box = Gtk.HBox()
kb_box.pack_start(kbimage_box, False, False, 40)
kb_box.pack_start(kboptions_box, True, True, 20)
pntimage_box = Gtk.VBox()
pntimage_box.pack_start(pntimage, True, True, 20)
pntoptions_box = Gtk.VBox()
pntoptions_box.pack_start(pntopt_label_box, False, False, 4)
pntoptions_box.pack_start(acceleration_label_box, False, False, 4)
pntoptions_box.pack_start(threshold_label_box, False, False, 4)
pntoptions_box.pack_start(lefthanded_box, False, False, 4)
pnt_box = Gtk.HBox()
pnt_box.pack_start(pntoptions_box, True, True, 20)
pnt_box.pack_start(pntimage_box, False, False, 40)
touchleft_box = Gtk.VBox()
touchleft_box.pack_start(touchenable, False, False, 0)
touchleft_box.pack_start(vertedge, False, False, 0)
touchleft_box.pack_start(horizedge, False, False, 0)
touchright_box = Gtk.VBox()
touchright_box.pack_start(taptoclick, False, False, 0)
touchright_box.pack_start(vtwofinger, False, False, 0)
touchright_box.pack_start(htwofinger, False, False, 0)
touch_box = Gtk.HBox()
touch_box.pack_start(touchleft_box, False, False, 40)
touch_box.pack_start(touchright_box, False, False, 0)
page_point.pack_start(kb_box, False, False, 10)
page_point.pack_start(pnt_box, False, False, 0)
page_point.pack_start(touch_label_box, False, False, 10)
page_point.pack_start(touch_box, False, False, 10)
##################################
### 'Keyboard Shortcuts' page. ###
##################################
page_short_lab = Gtk.Label("Keyboard Shortcuts")
page_short = Gtk.VBox()
scroll_short = Gtk.ScrolledWindow()
scroll_short.add(page_short)
xfce_label = Gtk.Label()
xfce_label.set_markup("<b>Window Manager Shortcuts</b>")
xfce_void = Gtk.Label(" ")
xfce_restore = Gtk.Button("Restore Defaults")
xfce_restore.connect("clicked", action_xfce_restore)
xfce_label_box = Gtk.HBox()
xfce_label_box.pack_start(xfce_label, False, False, 10)
xfce_label_box.pack_start(xfce_void, True, True, 0)
xfce_label_box.pack_start(xfce_restore, False, False, 10)
xfcestore = Gtk.ListStore(str, str)
get_xfce = get_xfce_shortcuts()
move_directions = ['move_window_workspace', 'move_window_right_key', 'move_window_left_key',
'move_window_up_key', 'up_workspace_key', 'down_workspace_key', 'switch_window_key',
'raise_window_key']
for line in get_xfce:
a = line.split()
for direction in move_directions:
if direction in a[0]:
continue
xfcestore.append([a[0], a[1]])
xfceview = Gtk.TreeView(model=xfcestore)
xfce_renderer_action = Gtk.CellRendererText()
xfce_column_action = Gtk.TreeViewColumn("Action", xfce_renderer_action, text=0)
xfceview.append_column(xfce_column_action)
xfce_renderer_shortcut = Gtk.CellRendererText()
xfce_column_shortcut = Gtk.TreeViewColumn("Shortcut", xfce_renderer_shortcut, text=1)
xfceview.append_column(xfce_column_shortcut)
xfceview.connect("row-activated", edit_xfce_shortcut)
global bindstore
bindstore = Gtk.ListStore(str, str)
get_bind = get_xbindkeys()
for line in get_bind:
a = line.split("\n")
bindstore.append([a[0], a[1]])
bind_label = Gtk.Label()
bind_label.set_markup("<b>System Shortcuts</b>")
bind_void = Gtk.Label(" ")
bind_new = Gtk.Button("Add New")
bind_new.connect("clicked", action_bind_new, bindstore)
bind_restore = Gtk.Button("Restore Defaults")
bind_restore.connect("clicked", action_bind_restore)
bind_label_box = Gtk.HBox()
bind_label_box.pack_start(bind_label, False, False, 10)
bind_label_box.pack_start(bind_void, True, True, 0)
bind_label_box.pack_start(bind_new, False, False, 0)
bind_label_box.pack_start(bind_restore, False, False, 10)
global bindview
bindview = Gtk.TreeView(model=bindstore)
bind_renderer_action = Gtk.CellRendererText()
bind_column_action = Gtk.TreeViewColumn("Action", bind_renderer_action, text=0)
bindview.append_column(bind_column_action)
bind_renderer_shortcut = Gtk.CellRendererText()
bind_column_shortcut = Gtk.TreeViewColumn("Shortcut", bind_renderer_shortcut, text=1)
bindview.append_column(bind_column_shortcut)
bindview.connect("row-activated", edit_bind_shortcut)
page_short.pack_start(bind_label_box, False, False, 10)
page_short.pack_start(bindview, True, True, 0)
page_short.pack_start(xfce_label_box, False, False, 10)
page_short.pack_start(xfceview, True, True, 0)
###############################
### 'Desktop Effects' page. ###
###############################
page_effect_lab = Gtk.Label("Desktop Effects")
page_effect = Gtk.VBox()
scroll_effect = Gtk.ScrolledWindow()
scroll_effect.add(page_effect)
use_compositing = Gtk.CheckButton()
use_compositing.set_active(get_use_compositing())
use_compositing.connect("toggled", set_use_compositing)
use_compositing_label = Gtk.Label()
use_compositing_label.set_markup("<b>Enable desktop effects</b>")
use_compositing_void = Gtk.Label(" ")
use_compositing_box = Gtk.HBox()
use_compositing_box.pack_start(use_compositing, False, False, 10)
use_compositing_box.pack_start(use_compositing_label, False, False, 0)
use_compositing_box.pack_start(use_compositing_void, True, True, 0)
unredirect_overlays = Gtk.CheckButton("Display fullscreen overlay windows directly")
unredirect_overlays.set_active(get_unredirect_overlays())
unredirect_overlays.connect("toggled", set_unredirect_overlays)
sync_to_vblank = Gtk.CheckButton("Synchronize drawing to the vertical blank")
sync_to_vblank.set_active(get_sync_to_vblank())
sync_to_vblank.connect("toggled", set_sync_to_vblank)
show_popup_shadow = Gtk.CheckButton("Show shadows under popup windows")
show_popup_shadow.set_active(get_show_popup_shadow())
show_popup_shadow.connect("toggled", set_show_popup_shadow)
show_dock_shadow = Gtk.CheckButton("Show shadows under dock windows")
show_dock_shadow.set_active(get_show_dock_shadow())
show_dock_shadow.connect("toggled", set_show_dock_shadow)
show_frame_shadow = Gtk.CheckButton("Show shadows under regular windows")
show_frame_shadow.set_active(get_show_frame_shadow())
show_frame_shadow.connect("toggled", set_show_frame_shadow)
frame_opacity_label = Gtk.Label("Opacity of window decorations:")
frame_opacity_void = Gtk.Label(" ")
frame_opacity_label_box = Gtk.HBox()
frame_opacity_label_box.pack_start(frame_opacity_label, False, False, 0)
frame_opacity_label_box.pack_start(frame_opacity_void, True, True, 0)
frame_opacity_left = Gtk.Label()
frame_opacity_left.set_markup("<i>Transparent</i>")
frame_opacity_right = Gtk.Label()
frame_opacity_right.set_markup("<i>Opaque</i>")
frame_opacity = Gtk.HScale()
frame_opacity.set_range(0, 100)
frame_opacity.set_increments(1, 1)
frame_opacity.set_digits(0)
try:
frame_opacity.set_value(get_frame_opacity())
except:
frame_opacity.set_value(100)
frame_opacity.connect("value-changed", set_frame_opacity)
frame_opacity_box = Gtk.HBox()
frame_opacity_box.pack_start(frame_opacity_left, False, False, 15)
frame_opacity_box.pack_start(frame_opacity, True, True, 0)
frame_opacity_box.pack_start(frame_opacity_right, False, False, 15)
inactive_opacity_label = Gtk.Label("Opacity of inactive windows:")
inactive_opacity_void = Gtk.Label(" ")
inactive_opacity_label_box = Gtk.HBox()
inactive_opacity_label_box.pack_start(inactive_opacity_label, False, False, 0)
inactive_opacity_label_box.pack_start(inactive_opacity_void, True, True, 0)
inactive_opacity_left = Gtk.Label()
inactive_opacity_left.set_markup("<i>Transparent</i>")
inactive_opacity_right = Gtk.Label()
inactive_opacity_right.set_markup("<i>Opaque</i>")
inactive_opacity = Gtk.HScale()
inactive_opacity.set_range(0, 100)
inactive_opacity.set_increments(1, 1)
inactive_opacity.set_digits(0)
try:
inactive_opacity.set_value(get_inactive_opacity())
except:
inactive_opacity.set_value(100)
inactive_opacity.connect("value-changed", set_inactive_opacity)
inactive_opacity_box = Gtk.HBox()
inactive_opacity_box.pack_start(inactive_opacity_left, False, False, 15)
inactive_opacity_box.pack_start(inactive_opacity, True, True, 0)
inactive_opacity_box.pack_start(inactive_opacity_right, False, False, 15)
move_opacity_label = Gtk.Label("Opacity of windows during move:")
move_opacity_void = Gtk.Label(" ")
move_opacity_label_box = Gtk.HBox()
move_opacity_label_box.pack_start(move_opacity_label, False, False, 0)
move_opacity_label_box.pack_start(move_opacity_void, True, True, 0)
move_opacity_left = Gtk.Label()
move_opacity_left.set_markup("<i>Transparent</i>")
move_opacity_right = Gtk.Label()
move_opacity_right.set_markup("<i>Opaque</i>")
move_opacity = Gtk.HScale()
move_opacity.set_range(0, 100)
move_opacity.set_increments(1, 1)
move_opacity.set_digits(0)
try:
move_opacity.set_value(get_move_opacity())
except:
move_opacity.set_value(100)
move_opacity.connect("value-changed", set_move_opacity)
move_opacity_box = Gtk.HBox()
move_opacity_box.pack_start(move_opacity_left, False, False, 15)
move_opacity_box.pack_start(move_opacity, True, True, 0)
move_opacity_box.pack_start(move_opacity_right, False, False, 15)
resize_opacity_label = Gtk.Label("Opacity of windows during resize:")
resize_opacity_void = Gtk.Label(" ")
resize_opacity_label_box = Gtk.HBox()
resize_opacity_label_box.pack_start(resize_opacity_label, False, False, 0)
resize_opacity_label_box.pack_start(resize_opacity_void, True, True, 0)
resize_opacity_left = Gtk.Label()
resize_opacity_left.set_markup("<i>Transparent</i>")
resize_opacity_right = Gtk.Label()
resize_opacity_right.set_markup("<i>Opaque</i>")
resize_opacity = Gtk.HScale()
resize_opacity.set_range(0, 100)
resize_opacity.set_increments(1, 1)
resize_opacity.set_digits(0)
try:
resize_opacity.set_value(get_resize_opacity())
except:
resize_opacity.set_value(100)
resize_opacity.connect("value-changed", set_resize_opacity)
resize_opacity_box = Gtk.HBox()
resize_opacity_box.pack_start(resize_opacity_left, False, False, 15)
resize_opacity_box.pack_start(resize_opacity, True, True, 0)
resize_opacity_box.pack_start(resize_opacity_right, False, False, 15)
popup_opacity_label = Gtk.Label("Opacity of popup windows:")
popup_opacity_void = Gtk.Label(" ")
popup_opacity_label_box = Gtk.HBox()
popup_opacity_label_box.pack_start(popup_opacity_label, False, False, 0)
popup_opacity_label_box.pack_start(popup_opacity_void, True, True, 0)
popup_opacity_left = Gtk.Label()
popup_opacity_left.set_markup("<i>Transparent</i>")
popup_opacity_right = Gtk.Label()
popup_opacity_right.set_markup("<i>Opaque</i>")
popup_opacity = Gtk.HScale()
popup_opacity.set_range(0, 100)
popup_opacity.set_increments(1, 1)
popup_opacity.set_digits(0)
try:
popup_opacity.set_value(get_popup_opacity())
except:
popup_opacity.set_value(100)
popup_opacity.connect("value-changed", set_popup_opacity)
popup_opacity_box = Gtk.HBox()
popup_opacity_box.pack_start(popup_opacity_left, False, False, 15)
popup_opacity_box.pack_start(popup_opacity, True, True, 0)
popup_opacity_box.pack_start(popup_opacity_right, False, False, 15)
deimage = Gtk.Image()
deimage.set_from_file("/usr/lib/peppermint/peppermint-control-center/images/de.png")
deimage_box = Gtk.HBox()
deimage_box.pack_start(deimage, True, True, 0)
effects_left = Gtk.VBox()
effects_left.pack_start(unredirect_overlays, False, False, 0)
effects_left.pack_start(sync_to_vblank, False, False, 0)
effects_left.pack_start(show_popup_shadow, False, False, 0)
effects_left.pack_start(show_dock_shadow, False, False, 0)
effects_left.pack_start(show_frame_shadow, False, False, 0)
effects_top = Gtk.HBox()
effects_top.pack_start(effects_left, False, False, 0)
effects_top.pack_start(deimage_box, True, True, 0)
global effects_box
effects_box = Gtk.VBox()
if get_use_compositing() == False:
effects_box.set_sensitive(False)
effects_box.pack_start(effects_top, False, False, 10)
effects_box.pack_start(frame_opacity_label_box, False, False, 0)
effects_box.pack_start(frame_opacity_box, False, False, 0)
effects_box.pack_start(inactive_opacity_label_box, False, False, 0)
effects_box.pack_start(inactive_opacity_box, False, False, 0)
effects_box.pack_start(move_opacity_label_box, False, False, 0)
effects_box.pack_start(move_opacity_box, False, False, 0)
effects_box.pack_start(resize_opacity_label_box, False, False, 0)
effects_box.pack_start(resize_opacity_box, False, False, 0)
effects_box.pack_start(popup_opacity_label_box, False, False, 0)
effects_box.pack_start(popup_opacity_box, False, False, 0)
effects_wrapper = Gtk.HBox()
effects_wrapper.pack_start(effects_box, True, True, 30)
page_effect.pack_start(use_compositing_box, False, False, 10)
page_effect.pack_start(effects_wrapper, True, True, 0)
########################
### 'Advanced' page. ###
########################
page_adv_lab = Gtk.Label("Advanced")
page_adv = Gtk.VBox()
scroll_adv = Gtk.ScrolledWindow()
scroll_adv.add(page_adv)
cycling_label = Gtk.Label()
cycling_label.set_markup("<b>Window cycling</b>")
cycling_void = Gtk.Label(" ")
cycling_label_box = Gtk.HBox()
cycling_label_box.pack_start(cycling_label, False, False, 10)
cycling_label_box.pack_start(cycling_void, True, True, 0)
cycle_minimum = Gtk.CheckButton("Skip windows specified to skip the pager/taskbar")
cycle_minimum.set_active(get_cycle_minimum())
cycle_minimum.connect("toggled", set_cycle_minimum)
cycle_hidden = Gtk.CheckButton("Include hidden/iconified windows")
cycle_hidden.set_active(get_cycle_hidden())
cycle_hidden.connect("toggled", set_cycle_minimum)
cycle_workspaces = Gtk.CheckButton("Cycle through windows on all workspaces")
cycle_workspaces.set_active(get_cycle_workspaces())
cycle_workspaces.connect("toggled", set_cycle_workspaces)
cycle_draw_frame = Gtk.CheckButton("Draw frame around selected windows while cycling")
cycle_draw_frame.set_active(get_cycle_draw_frame())
cycle_draw_frame.connect("toggled", set_cycle_draw_frame)
cycling_section = Gtk.VBox()
cycling_section.pack_start(cycle_minimum, False, False, 0)
cycling_section.pack_start(cycle_hidden, False, False, 0)
cycling_section.pack_start(cycle_workspaces, False, False, 0)
cycling_section.pack_start(cycle_draw_frame, False, False, 0)
cycling_section_box = Gtk.HBox()
cycling_section_box.pack_start(cycling_section, False, False, 30)
cycling_box = Gtk.VBox()
cycling_box.pack_start(cycling_label_box, False, False, 4)
cycling_box.pack_start(cycling_section_box, False, False, 4)
focus_label = Gtk.Label()
focus_label.set_markup("<b>Window focus</b>")
focus_void = Gtk.Label(" ")
focus_label_box = Gtk.HBox()
focus_label_box.pack_start(focus_label, False, False, 10)
focus_label_box.pack_start(focus_void, True, True, 0)
prevent_focus_stealing = Gtk.CheckButton("Activate focus stealing prevention")
prevent_focus_stealing.set_active(get_prevent_focus_stealing())
prevent_focus_stealing.connect("toggled", set_prevent_focus_stealing)
focus_hint = Gtk.CheckButton("Honor standard ICCCM focus hint")
focus_hint.set_active(get_focus_hint())
focus_hint.connect("toggled", set_focus_hint)
window_raise_label = Gtk.Label()
window_raise_label.set_markup("<i>When a window raises itself:</i>")
window_raise_void = Gtk.Label(" ")
window_raise_label_box = Gtk.HBox()
window_raise_label_box.pack_start(window_raise_label, False, False, 30)
window_raise_label_box.pack_start(window_raise_void, True, True, 0)
activate_action_box = Gtk.VBox()
activate_action_bring = Gtk.RadioButton.new_with_label_from_widget(None, "Bring window on current workspace")
activate_action_bring.connect("toggled", set_activate_action, "bring")
if get_activate_action() == "bring":
activate_action_bring.set_active(True)
activate_action_switch = Gtk.RadioButton.new_from_widget(activate_action_bring)
activate_action_switch.set_label("Switch to window's workspace")
activate_action_switch.connect("toggled", set_activate_action, "switch")
if get_activate_action() == "switch":
activate_action_switch.set_active(True)
activate_action_none = Gtk.RadioButton.new_from_widget(activate_action_bring)
activate_action_none.set_label("Do nothing")
activate_action_none.connect("toggled", set_activate_action, "none")
if get_activate_action() == "none":
activate_action_none.set_active(True)
activate_action_box.pack_start(activate_action_bring, False, False, 0)
activate_action_box.pack_start(activate_action_switch, False, False, 0)
activate_action_box.pack_start(activate_action_none, False, False, 0)
activate_action_wrapper = Gtk.HBox()
activate_action_wrapper.pack_start(activate_action_box, False, False, 50)
focus_section = Gtk.VBox()
focus_section.pack_start(prevent_focus_stealing, False, False, 0)
focus_section.pack_start(focus_hint, False, False, 0)
focus_section.pack_start(window_raise_label_box, False, False, 10)
focus_section.pack_start(activate_action_wrapper, False, False, 0)
focus_section_box = Gtk.HBox()
focus_section_box.pack_start(focus_section, False, False, 30)
focus_box = Gtk.VBox()
focus_box.pack_start(focus_label_box, False, False, 4)
focus_box.pack_start(focus_section_box, False, False, 4)
accessibility_label = Gtk.Label()
accessibility_label.set_markup("<b>Accessibility</b>")
accessibility_void = Gtk.Label(" ")
accessibility_label_box = Gtk.HBox()
accessibility_label_box.pack_start(accessibility_label, False, False, 10)
accessibility_label_box.pack_start(accessibility_void, True, True, 0)
raise_with_any_button = Gtk.CheckButton("Raise windows when any pointer button is pressed")
raise_with_any_button.set_active(get_raise_with_any_button())
raise_with_any_button.connect("toggled", set_raise_with_any_button)
borderless_maximize = Gtk.CheckButton("Hide frames of maximized windows")
borderless_maximize.set_active(get_borderless_maximize())
borderless_maximize.connect("toggled", set_borderless_maximize)
global restore_on_move
restore_on_move = Gtk.CheckButton("Restore original size of maximized windows when moving")
restore_on_move.set_active(get_restore_on_move())
restore_on_move.connect("toggled", set_restore_on_move)
# Depends on restore_on_move
global tile_on_move
tile_on_move = Gtk.CheckButton("Automatically tile windows when moving toward the screen edge")
tile_on_move.set_active(get_tile_on_move())
tile_on_move.connect("toggled", set_tile_on_move)
if get_restore_on_move() == True:
tile_on_move.set_sensitive(True)
else:
tile_on_move.set_sensitive(False)
snap_resist = Gtk.CheckButton("Use edge resistance instead of window snapping")
snap_resist.set_active(get_snap_resist())
snap_resist.connect("toggled", set_snap_resist)
# Also disable repeat_urgent_blink
global urgent_blink
urgent_blink = Gtk.CheckButton("Notify of urgency with blinking window decorations")
urgent_blink.set_active(get_urgent_blink())
urgent_blink.connect("toggled", set_urgent_blink)
# Depends on urgent_blink
global repeat_urgent_blink
repeat_urgent_blink = Gtk.CheckButton("Keep urgent windows blinking repeatedly")
repeat_urgent_blink.set_active(get_repeat_urgent_blink())
repeat_urgent_blink.connect("toggled", set_repeat_urgent_blink)
if get_urgent_blink() == True:
repeat_urgent_blink.set_sensitive(True)
else:
repeat_urgent_blink.set_active(False)
repeat_urgent_blink.set_sensitive(False)
set_repeat_urgent_blink_off()
mousewheel_rollup = Gtk.CheckButton("Scroll on title bar to roll up the window")
mousewheel_rollup.set_active(get_mousewheel_rollup())
mousewheel_rollup.connect("toggled", set_mousewheel_rollup)
accessibility_section = Gtk.VBox()
accessibility_section.pack_start(raise_with_any_button, False, False, 0)
accessibility_section.pack_start(borderless_maximize, False, False, 0)
accessibility_section.pack_start(restore_on_move, False, False, 0)
accessibility_section.pack_start(tile_on_move, False, False, 0)
accessibility_section.pack_start(snap_resist, False, False, 0)
accessibility_section.pack_start(urgent_blink, False, False, 0)
accessibility_section.pack_start(repeat_urgent_blink, False, False, 0)
accessibility_section.pack_start(mousewheel_rollup, False, False, 0)
accessibility_section_box = Gtk.HBox()
accessibility_section_box.pack_start(accessibility_section, False, False, 30)
accessibility_box = Gtk.VBox()
accessibility_box.pack_start(accessibility_label_box, False, False, 4)
accessibility_box.pack_start(accessibility_section_box, False, False, 4)
workspaces_label = Gtk.Label()
workspaces_label.set_markup("<b>Workspaces</b>")
workspaces_void = Gtk.Label(" ")
workspaces_label_box = Gtk.HBox()
workspaces_label_box.pack_start(workspaces_label, False, False, 10)
workspaces_label_box.pack_start(workspaces_void, True, True, 0)
scroll_workspaces = Gtk.CheckButton("Scroll on desktop to change workspaces")
scroll_workspaces.set_active(get_scroll_workspaces())
scroll_workspaces.connect("toggled", set_scroll_workspaces)
wrap_layout = Gtk.CheckButton("Wrap workspaces")
wrap_layout.set_active(get_wrap_layout())
wrap_layout.connect("toggled", set_wrap_layout)
workspaces_section = Gtk.VBox()
workspaces_section.pack_start(scroll_workspaces, False, False, 0)
workspaces_section.pack_start(wrap_layout, False, False, 0)
workspaces_section_box = Gtk.HBox()
workspaces_section_box.pack_start(workspaces_section, False, False, 30)
workspaces_box = Gtk.VBox()
workspaces_box.pack_start(workspaces_label_box, False, False, 4)
workspaces_box.pack_start(workspaces_section_box, False, False, 4)
placement_label = Gtk.Label()
placement_label.set_markup("<b>Window placement</b>")
placement_void = Gtk.Label(" ")
placement_label_box = Gtk.HBox()
placement_label_box.pack_start(placement_label, False, False, 10)
placement_label_box.pack_start(placement_void, True, True, 0)
placement_ratio_label = Gtk.Label("Minimum window size to trigger smart placement:")
placement_ratio_void = Gtk.Label(" ")
placement_ratio_label_box = Gtk.HBox()
placement_ratio_label_box.pack_start(placement_ratio_label, False, False, 0)
placement_ratio_label_box.pack_start(placement_ratio_void, True, True, 0)
placement_ratio_small = Gtk.Label()
placement_ratio_small.set_markup("<i>Small</i>")
placement_ratio_large = Gtk.Label()
placement_ratio_large.set_markup("<i>Large</i>")
placement_ratio = Gtk.HScale()
placement_ratio.set_range(0, 100)
placement_ratio.set_increments(1, 1)
placement_ratio.set_digits(0)
try:
placement_ratio.set_value(get_placement_ratio())
except:
placement_ratio.set_value(20)
placement_ratio.connect("value-changed", set_placement_ratio)
placement_ratio_box = Gtk.HBox()
placement_ratio_box.pack_start(placement_ratio_small, False, False, 15)
placement_ratio_box.pack_start(placement_ratio, True, True, 0)
placement_ratio_box.pack_start(placement_ratio_large, False, False, 15)
placement_mode_label = Gtk.Label("By default, place windows:")
placement_mode_void = Gtk.Label(" ")
placement_mode_label_box = Gtk.HBox()
placement_mode_label_box.pack_start(placement_mode_label, False, False, 0)
placement_mode_label_box.pack_start(placement_mode_void, False, False, 0)
placement_mode_center = Gtk.RadioButton.new_with_label_from_widget(None, "At the center of the screen")
placement_mode_center.connect("toggled", set_placement_mode, "center")
if get_placement_mode() == "center":
placement_mode_center.set_active(True)
placement_mode_mouse = Gtk.RadioButton.new_from_widget(placement_mode_center)
placement_mode_mouse.set_label("Under the pointer")
placement_mode_mouse.connect("toggled", set_placement_mode, "mouse")
if get_placement_mode() == "mouse":
placement_mode_mouse.set_active(True)
placement_mode_box = Gtk.VBox()
placement_mode_box.pack_start(placement_mode_center, False, False, 0)
placement_mode_box.pack_start(placement_mode_mouse, False, False, 0)
placement_mode_wrapper = Gtk.HBox()
placement_mode_wrapper.pack_start(placement_mode_box, True, True, 10)
placement_section = Gtk.VBox()
placement_section.pack_start(placement_ratio_label_box, False, False, 4)
placement_section.pack_start(placement_ratio_box, False, False, 4)
placement_section.pack_start(placement_mode_label_box, False, False, 4)
placement_section.pack_start(placement_mode_wrapper, False, False, 4)
placement_section_box = Gtk.HBox()
placement_section_box.pack_start(placement_section, True, True, 30)
placement_box = Gtk.VBox()
placement_box.pack_start(placement_label_box, False, False, 4)
placement_box.pack_start(placement_section_box, False, False, 4)
page_adv.pack_start(cycling_box, False, False, 10)
page_adv.pack_start(focus_box, False, False, 0)
page_adv.pack_start(accessibility_box, False, False, 10)
page_adv.pack_start(workspaces_box, False, False, 0)
page_adv.pack_start(placement_box, False, False, 10)
##############################
### Main window structure. ###
##############################
mainbook = Gtk.Notebook()
mainbook.append_page(scroll_wman, page_wman_lab)
mainbook.append_page(scroll_point, page_point_lab)
mainbook.append_page(scroll_short, page_short_lab)
mainbook.append_page(scroll_effect, page_effect_lab)
mainbook.append_page(scroll_adv, page_adv_lab)
mainb = Gtk.VBox(10, 10)
mainb.pack_start(mainbook, True, True, 10)
mainb1 = Gtk.HBox(10, 10)
mainb1.pack_start(mainb, True, True, 10)
self.add(mainb1)
if __name__ == '__main__':
window = ControlCenter()
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
|
import setuptools
setuptools.setup(
name="python-gallocloud-utils",
version='1.0',
packages=["gallocloud_utils"],
install_requires=["croniter", "python-json-logger", "PyYAML", "watchdog", "pathspec"]
)
|
from openpyxl import load_workbook
from openpyxl import Workbook
from io import BytesIO
import os
class Excel(object):
def __init__(self):
pass
def read_from_excel(self, fp: str = None, fo: object = None) -> list:
"""Read from excel file or file object
Keyword Arguments:
fp {str} -- excel file path (default: {None})
fo {object} -- excel file object (default: {None})
Returns:
list -- [{'sheet':sheet_name, 'title':[sheet_title], 'data':[sheet_data]}]
"""
file_data = []
if fp is not None:
wb = load_workbook(fp)
if fo is not None:
wb = load_workbook(filename=BytesIO(fo.read()))
for sheet in wb.sheetnames:
sheet_data = {'sheet': sheet}
data_sheet = wb[sheet]
title = [c.value for c in list(data_sheet.rows)[0]]
sheet_data['title'] = title
data = []
for r in list(data_sheet.rows)[1:]:
data.append([c.value for c in r])
sheet_data['data'] = data
file_data.append(sheet_data)
return file_data
def write_to_excel(self, file_name: str, headers: list, data: dict, path: str) -> str:
"""Write content to excel file
Arguments:
file_name {str} -- excel file name
headers {list} -- execl sheet headers (['name', 'age'])
data {dict} -- execl sheet data ({'stu1':[['will','18'],['linda','19']], 'stu2':[['will','18'],['linda','18']])
path {list} -- execl file path to save
Returns:
str -- path/file_name
"""
wb = Workbook()
for sheet_name, sheet_data in data.items():
new_sheet = wb.create_sheet(sheet_name)
new_sheet.append(headers)
for row_data in sheet_data:
new_sheet.append(row_data)
wb.remove_sheet(wb['Sheet'])
out_file_name = os.path.join(path, file_name)
wb.save(out_file_name)
return out_file_name
|
# -*- coding: utf8 -*-
from bson.objectid import ObjectId
from flask import Flask, request
from flask.json import JSONEncoder
import logging
import os
__all__ = ["app"]
# 通过环境变量来进行配置切换
env = os.environ.get('PIGROOT')
if env not in ['Local']:
raise EnvironmentError('The environment variable (PIGROOT) is invalid ')
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
return JSONEncoder.default(self, obj)
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
app.config.from_object("config.%s" % (env))
# 日志记录
from wanx.base.log import appHandler
app.logger.setLevel(logging.WARNING)
app.logger.addHandler(appHandler)
|
# from xspec import *
from numpy import *
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import scipy.special as special
m_max=19
nu_obs=1
def integ(x):
i=integrate.quad(lambda y: special.kv(5/3,y),x,np.inf)
return i[0]
def F(x):
z=x*integ(x)
return z
def P(nu,gamma,B):
nu_ch=B*gamma**2
x=nu/nu_ch
return B*F(x)
def f_nu(nu,B,gamma_arr,N):
tot=0
for n in range(0,len(gamma_arr)):
tot+=P(nu,gamma_arr[n],B)*N[n]*gamma_arr[n]
return tot
slope_loc=[]
f_loc=[]
for m in range(0,m_max):
gamma_arr=[]
N=[]
N_evo=open('N_evo_%s.txt' %m,'r')
k=0
for line in N_evo:
row=line.strip()
row=row.split()
if float(row[1])!=0:
gamma_arr.append(float(row[0]))
N.append(float(row[1]))
if k==0:
B_arr=(float(row[2]))
k+=1
B=B_arr
print(B)
nu_arr=np.logspace(5,11.5,50)
plt.yscale('log')
plt.xscale('log')
F_nu=[]
for i in range(0, len(nu_arr)):
F_nu.append(f_nu(nu_arr[i],B,gamma_arr,N))
plt.plot(nu_arr,F_nu)
plt.yscale('log')
plt.xscale('log')
count=0
while nu_arr[count]<nu_obs:
count+=1
slope_loc.append(1-(np.log10(F_nu[count+1])-np.log10(F_nu[count]))/(np.log10(nu_arr[count+1])-np.log10(nu_arr[count])))
f_loc.append(F_nu[count])
ax1.set_xlabel('$\\nu$ (arbitrary units)', fontsize=28)
ax1.set_ylabel('$F_{\\nu}$', fontsize=28)
plt.show()
|
#!/usr/bin/env python
from sys import argv
f1 = argv[1]
f2 = argv[2]
f3 = argv[3]
argv = []
import PandaCore.Tools.Load as Load
import ROOT as root
Load.Load('Tools','DuplicateRemover')
dr = root.DuplicateRemover()
dr.Merge(f1,f2,f3)
|
import json
import re
_STRING_BASE_NAME = 'string_field_'
'''
2011 data type
[
{
"string_field_0": "United States of America",
"string_field_2": "40-50",
"string_field_3": "11",
"string_field_45": ">$140,000",
"string_field_30": null,
"string_field_31": null,
"string_field_32": null,
"string_field_33": null,
"string_field_34": "Python",
"string_field_35": null,
"string_field_36": null,
"string_field_37": null,
"string_field_38": null,
"string_field_39": "C",
"string_field_40": null,
"string_field_41": null,
"string_field_42": null,
"string_field_44": "I enjoy going to work",
"string_field_43": "Linux",
"string_field_5": "Fortune 1000 (1,000+)"
}
]
'''
class Cleaner():
'''
cleans data from JSON files to fit business logic
'''
_salary_ranges = {
1: [0, 20],
2: [20, 40],
3: [40, 60],
4: [60, 80],
5: [80, 100],
6: [100, 120],
7: [120, 140]
}
_company_size_range = {
1: [1, 25],
2: [26, 100],
3: [101, 1000]
}
_age_range = {
1: [0, 20],
2: [21, 25],
3: [26, 30],
4: [31, 40],
5: [41, 50]
}
_experience_range = {
1: [0, 2],
2: [3, 5],
3: [6, 10],
4: [10, 15],
5: [15, 20],
6: [20, 90]
}
def __init__(self, data_fields, satisfaction_map, path_to_file):
'''
@data_fields: { age': 2, 'experience': 3, 'region': 0, 'salary': 100,
'programming_languages': [56, 69], 'satisfaction': 99,
'gender': None, 'os': 81, 'company_size': 5
}
@satisfaction_map: {enjoy: 5, hurts: 4, not happy: 1, bills: 3}
@path_to_file: /path
'''
self._data_fields = data_fields
self._satisfaction_map = satisfaction_map
self._path_to_file = path_to_file
self._raw_data = None
def _read_file(self):
with open(self._path_to_file) as f:
self._raw_data = json.load(f)
def _create_list_from_field(self, data, field):
result = []
pl_indexes = self._data_fields.get(field)
if len(pl_indexes) == 1:
value = data.get(f'{_STRING_BASE_NAME}{pl_indexes[0]}')
is_valid = self._is_valid_value(value)
if not is_valid:
value = None
result.append(value)
return result
for i in range(pl_indexes[0], pl_indexes[1] + 1):
value = data.get(f'{_STRING_BASE_NAME}{i}')
is_valid = self._is_valid_value(value)
if value is not None and is_valid:
result.append(value)
return result
def _extract_programming_languages(self, data):
return self._create_list_from_field(data, 'programming_languages')
def _extract_os(self, data):
return self._create_list_from_field(data, 'os')
def _extract_upper_limit_num_from_string(self, value):
# Extracts the max raw salary value
value = value.replace(',', '')
number = re.findall(r'\d+', value)
if len(number) == 0:
return None
number = number[-1:][0]
number = float(number)
return number
def _get_satisfaction(self, data):
field = self._data_fields.get('satisfaction')
satisfaction_answer = data.get(f'{_STRING_BASE_NAME}{field}')
is_valid = self._is_valid_value(satisfaction_answer)
if satisfaction_answer is None or not is_valid:
return None
for (key, value) in self._satisfaction_map.items():
if key in satisfaction_answer:
return value
return None
def _extract_range_operator(self, value):
if value is None:
return value
salary_operator = value[:1]
if salary_operator != '>' and salary_operator != '<':
salary_operator = None
return salary_operator
def _extract_raw_number_from_range(self, value):
range_operator = self._extract_range_operator(value)
raw_number = self._extract_upper_limit_num_from_string(value)
if raw_number is None:
return None
if range_operator == '>':
raw_number += 1
elif range_operator == '<':
raw_number -= 1
return raw_number
def _get_salary_range(self, data):
field = self._data_fields.get('salary')
value = data.get(f'{_STRING_BASE_NAME}{field}')
is_valid = self._is_valid_value(value)
if value is None or not is_valid:
return None
salary = self._extract_raw_number_from_range(value)
if salary is None:
return None
salary_range = '>140k'
salary /= 1000
for ran in self._salary_ranges.values():
if salary in range(ran[0], ran[1] + 1):
salary_range = f'{ran[0]}-{ran[1]}'
return salary_range
def _get_company_size_range(self, data):
field = self._data_fields.get('company_size')
value = data.get(f'{_STRING_BASE_NAME}{field}')
is_valid = self._is_valid_value(value)
if value is None or not is_valid or '/' in value:
return None
value = self._extract_raw_number_from_range(value)
if value is None:
return None
company_size_range = '>1000'
for ran in self._company_size_range.values():
if value in range(ran[0], ran[1] + 1):
company_size_range = f'{ran[0]}-{ran[1]}'
return company_size_range
def _get_age_range(self, data):
field = self._data_fields.get('age')
value = data.get(f'{_STRING_BASE_NAME}{field}')
is_valid = self._is_valid_value(value)
if value is None or not is_valid:
return None
age_range = '>50'
age = self._extract_raw_number_from_range(value)
if age is None:
return None
for ran in self._age_range.values():
if age in range(ran[0], ran[1] + 1):
age_range = f'{ran[0]}-{ran[1]}'
return age_range
def _get_experience_range(self, data):
field = self._data_fields.get('experience')
value = data.get(f'{_STRING_BASE_NAME}{field}')
is_valid = self._is_valid_value(value)
if value is None or not is_valid:
return None
experience_range = None
experience = self._extract_raw_number_from_range(value)
if experience is None:
return None
for ran in self._experience_range.values():
if experience in range(ran[0], ran[1] + 1):
experience_range = f'{ran[0]}-{ran[1]}'
return experience_range
def _get_raw_value_from_data(self, data, key):
key_index = self._data_fields.get(key, None)
if key_index is None:
return None
value = data.get(f'{_STRING_BASE_NAME}{key_index}')
is_valid = self._is_valid_value(value)
if not is_valid:
value = None
return value
def _is_valid_value(self, value):
if value is None:
return False
is_valid = True
is_valid = is_valid and ('?' not in value)
is_valid = is_valid and ('please' not in value.lower())
is_valid = is_valid and ('response' not in value.lower())
return is_valid
def _extract_values(self):
self._read_file()
results = []
for data in self._raw_data:
fields = {}
fields.update({'programming_languages': self._extract_programming_languages(data)})
fields.update({'os': self._extract_os(data)})
fields.update({'salary_range': self._get_salary_range(data)})
fields.update({'satisfaction': self._get_satisfaction(data)})
fields.update({'company_size_range': self._get_company_size_range(data)})
fields.update({'age_range': self._get_age_range(data)})
fields.update({'experience_range': self._get_experience_range(data)})
fields.update({'gender': self._get_raw_value_from_data(data, 'gender')})
fields.update({'region': self._get_raw_value_from_data(data, 'region')})
fields.update({'gender': self._get_raw_value_from_data(data, 'gender')})
results.append(fields)
return results
def clean_and_save(self, file_path_to_save):
results = self._extract_values()
with open(file_path_to_save, 'w') as outfile:
json.dump(results, outfile, indent=4, sort_keys=True)
kwargs = {'data_fields': {'age': 1, 'experience': 4, 'region': 0, 'salary': 105,
'programming_languages': [8, 50], 'satisfaction':109,
'gender': 2, 'os': [6], 'company_size': None},
'satisfaction_map': {'Love': 5, 'somewhat satisfied': 4, 'Hate': 1, 'somewhat dissatisfied': 2,'neither': 3, 'Other': None},
'path_to_file': 'raw_data/2015.json'
}
cleaner = Cleaner(**kwargs)
cleaner.clean_and_save('clean_files/2015.json')
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import configparser #To read the profiles.
import os #To join paths.
import pytest
## Makes sure that the variants for the Ultimaker 3 Extended are exactly the
# same as for the Ultimaker 3.
#
# Once we have specialised profiles or a mechanism to inherit variants too, we
# may remove this test and have different profiles for the extended where
# needed, but until then we should keep them the same. It's happened all too
# often that we updated the variants for the UM3 but forgot about the UM3E.
@pytest.mark.parametrize("um3_file, um3e_file", [
#List the corresponding files below.
("ultimaker3_aa0.25.inst.cfg", "ultimaker3_extended_aa0.25.inst.cfg"),
("ultimaker3_aa0.8.inst.cfg", "ultimaker3_extended_aa0.8.inst.cfg"),
("ultimaker3_aa04.inst.cfg", "ultimaker3_extended_aa04.inst.cfg"),
("ultimaker3_bb0.8.inst.cfg", "ultimaker3_extended_bb0.8.inst.cfg"),
("ultimaker3_bb04.inst.cfg", "ultimaker3_extended_bb04.inst.cfg")
])
def test_ultimaker3extended_variants(um3_file, um3e_file):
directory = os.path.join(os.path.dirname(__file__), "..", "resources", "variants") #TODO: Hardcoded path relative to this test file.
um3 = configparser.ConfigParser()
um3.read_file(open(os.path.join(directory, um3_file), encoding = "utf-8"))
um3e = configparser.ConfigParser()
um3e.read_file(open(os.path.join(directory, um3e_file), encoding = "utf-8"))
assert [value for value in um3["values"]] == [value for value in um3e["values"]]
|
"""
Given the fixed points fing the equation
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def f(x,t):return x * (x + 1) * (x - 2)
x0 = np.linspace(-3.,-1.)
x1 = np.linspace(-1.,0)
x2 = np.linspace(0,2)
x3 = np.linspace(2,3)
xn = np.linspace(-3.,3)
x_int = np.array([-1.1,0.1,2.9])
X = odeint(f, x_int, xn )
plt.figure()
plt.plot(x0, f(x0, x_int),
x1, f(x1, x_int),
x2, f(x2, x_int),
x3, f(x3, x_int))
plt.xlabel(r'$x(t)$')
plt.ylabel(r'$\dot{x}$')
plt.axhline()
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 14:40:06 2017
@author: Jonas Lindemann
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
N = 100
x = np.linspace(-6.0, 6.0, N)
y = np.linspace(-6.0, 6.0, N)
X, Y = np.meshgrid(x, y)
z = np.sin(0.5*X*Y)
#plt.ion()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, z, cmap=plt.cm.RdBu)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_zlim(-5, 5)
plt.show()
#plt.draw() |
from cards import *
import random
#play poker
def dealHand(self):
#print("Dealing hand...")
hand = (self.cards.pop(), self.cards.pop())
return hand
setattr(Deck, "dealHand", dealHand)
class Player:
def __init__(self, chips, name, human):
self.chips = chips
self.currentBet = 0
self.name = name
self.human = human
def printPlayer(self):
print("playerName:", self.name)
print("Chips:", self.chips)
print("Human:", self.human)
print()
def showHand(self):
self.hand[0].printCard()
self.hand[1].printCard()
print()
#UNTESTED
def bet(self, amount):
if self.chips >= amount & amount > 0:
self.chips = self.chips - amount
self.currentBet = amount
else:
print("not a valid bet, no action taken.")
print("Player chips:", self.chips, "Amount bet: ", amount)
def hasCard(self, card):
print("Looking at hand")
if self.hand[0].name == card:
return True
elif self.hand[1].name == card:
return True
else:
return False
def checkValue(self):
print("Looking at card values")
return self.hand[0].value + self.hand[1].value
def checkSuits(self):
if self.hand[0].suit == self.hand[1].suit:
print("Hand is suited!")
return True
else:
print("Hand is not suited :(")
return False
def checkPair(self):
if self.hand[0].value == self.hand[1].value:
return True
else:
return False
def evaluateHandStrength(self):
#open any pocket pair
if self.checkPair():
return True
class Seat:
def __init__(self, name):
self.name = name
self.occupied = False
self.bb = False
self.sb = False
self.btn = False
def printSeat(self):
print("seatName:", self.name)
if self.bb:
print("BB")
elif self.sb:
if self.btn:
print("SB + BTN")
else:
print("SB")
elif self.btn:
print("BTN")
class Table:
def __init__(self, players, blinds):
self.maxSize = 9
self.currentSize = 0
self.bb = blinds[1]
self.sb = blinds[0]
self.deck = Deck()
self.deck.shuffle()
self.seats = []
for x in range(self.maxSize):
self.seats.append(Seat("seat"+str(x)))
for x in range(len(players)):
if self.maxSize > self.currentSize:
self.addPlayer(players[x])
def printTable(self):
for x in range(self.currentSize):
self.seats[x].printSeat()
print("Blinds:", self.sb, "/", self.bb)
print("Current size:", self.currentSize, "Max Size:", self.maxSize)
print()
def addPlayer(self, player):
#will need more complicated checks in future to ensure players can't skip bb
for x in range(self.maxSize):
if self.seats[x].occupied == False:
self.seats[x].player = player
self.seats[x].occupied = True
self.currentSize+=1
return True
#UNTESTED
def removePlayer(self, playerName):
for x in range(self.currentSize):
if self.seats[x].player.name == playerName:
delattr(self.seats[x], player)
self.seats[x].occupied = false
return True
print("Could not find a player named ", playerName)
return False
def determineBlinds(self):
if self.currentSize == 1:
print("Need more players to play! Try adding a CPU")
elif self.currentSize == 2:
self.seats[0].sb = True
self.seats[0].btn = True
self.seats[1].bb = True
elif self.currentSize > 2:
self.seats[0].btn = True
self.seats[1].sb = True
self.seats[2].bb = True
def rotateBigBlind(self):
for x in range(len(self.seats)):
if self.seats[x].bb == True:
self.seats[x].printSeat()
self.seats[x].bb = False
self.seats[(x+1) % len(self.seats)].bb = True
break
def rotateSmallBlind(self):
for x in range(len(self.seats)):
if self.seats[x].sb:
self.seats[x].sb = False
self.seats[(x+1) % len(self.seats)].sb = True
break
def rotateButton(self):
for x in range(len(self.seats)):
if self.seats[x].btn:
self.seats[x].btn = False
self.seats[(x+1) % len(self.seats)].btn = True
break
def rotateBlinds(self):
print()
self.rotateBigBlind()
self.rotateSmallBlind()
self.rotateButton()
def setupGame():
#create table and players from user input
#humans = input("Please enter how many human players\n")
#cpus = input("Please enter how many computer players\n")
#chips = input("Please enter how many starting chips per player\n")
humans = 1
cpus = 3
chips = 100
players = []
for x in range(int(humans)):
string = "Please enter player " + str(x+1) + "'s name\n"
name = input(string)
players.append(Player(chips, name, True))
print("added human: ", name)
for x in range(int(cpus)):
players.append(Player(chips, "cpu"+str(x), False))
print("added cpu: ", "cpu"+str(x))
table = Table(players, (int(chips)/100, int(chips)/50))
table.determineBlinds()
#still need to set which seat is bb,sb, and btn
return table
def playGame():
table = setupGame()
for x in range(len(table.seats)):
if table.seats[x].occupied:
table.seats[x].player.hand = table.deck.dealHand()
#print humans' hands (but not CPUs')
if table.seats[x].player.human:
table.seats[x].player.showHand()
table.rotateBlinds()
table.printTable()
#wagering can start here
playGame()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 09:15:08 2021
@author: carolyndavis
"""
# =============================================================================
# SCALING EXERCISES
# =============================================================================
#IMPORTS USED FOR LESSON EXERCISES:
import pandas as pd
import numpy as np
import env
import wrangler2
import matplotlib.pyplot as plt
import sklearn.preprocessing
from sklearn.model_selection import train_test_split
telco_df = wrangler2.get_telco_data()
telco_df = telco_df.drop_duplicates(subset=["customer_id"]).reset_index(drop=True)
#drop dupes bc of faulty SQL query
telco_df.head(10)
#
telco_df.columns
telco_df.info()
quant_df = telco_df[['customer_id', 'tenure', 'monthly_charges', 'total_charges']].copy()
#this helps with later data manipulation
quant_df
# split the data in train, validate and test
train, test = train_test_split(quant_df, test_size = 0.2, random_state = 123)
train, validate = train_test_split(train, test_size = 0.25, random_state = 123)
#Looking at the shape:
train.shape, validate.shape, test.shape
#output: ((4225, 4), (1409, 4), (1409, 4))
train.head()
# =============================================================================
# 1.)Apply the scalers we talked about in this lesson to your data and visualize
# the results for the unscaled and scaled distribution .
# =============================================================================
#DEFINE THE THANG
scaler = sklearn.preprocessing.MinMaxScaler()
# Fit the thing
scaler.fit(train[['monthly_charges']])
#transform
scaled_month = scaler.transform(train[['monthly_charges']])
# single step to fit and transform
scaled_month = scaler.fit_transform(train[['monthly_charges']])
#Add a new scaled col to original train df
train['scaled_month_charges'] = scaled_month
train.head()
####### NOW VISUALIZE #########
#plotting the total_charges and the scaled total charges...
plt.scatter(train.monthly_charges, scaled_month)
plt.xlabel('Monthly_Charges')
plt.ylabel('Scaled_Monthly_Charges')
##### Plotting the Distributuon of Monthky Charges ###########
plt.hist(train.monthly_charges)
###Plotting distrbution of monthly charges with scaled data
plt.hist(scaled_month)
fig = plt.figure(figsize = (12,6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[1,1])
ax1.scatter(train.monthly_charges, scaled_month)
ax1.set(xlabel = 'Monthly_Charges', ylabel = 'Scaled_Monthly_Charges', title = 'Min/Max Scaler')
ax2.hist(train.monthly_charges)
ax2.set(title = 'Original')
ax3.hist(scaled_month)
ax3.set(title = 'Scaled_Charges')
plt.tight_layout();
####################### USING MIN/MAX SCALER ####################################
def visualize_scaled_date(scaler, scaler_name, feature):
scaled = scaler.fit_transform(train[[feature]])
fig = plt.figure(figsize = (12,6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[1,1])
ax1.scatter(train[[feature]], scaled)
ax1.set(xlabel = feature, ylabel = 'Scaled_' + feature, title = scaler_name)
ax2.hist(train[[feature]])
ax2.set(title = 'Original')
ax3.hist(scaled)
ax3.set(title = 'Scaled_Charges')
plt.tight_layout();
# use the function created above for monthly charges
visualize_scaled_date(sklearn.preprocessing.MinMaxScaler(), 'Min_Max_Scaler', 'monthly_charges')
############## VISUALIZE: Scaling Data for Tenure:
visualize_scaled_date(sklearn.preprocessing.MinMaxScaler(), 'Min_Max_Scaler', 'tenure')
###################### VISUALIZE WITH STANDARD SCALER ####################
# visualize scaling for monthly charges
visualize_scaled_date(sklearn.preprocessing.StandardScaler(), 'Standard_Scaler', 'monthly_charges')
# visualize scaling for tenure
visualize_scaled_date(sklearn.preprocessing.StandardScaler(), 'Standard_Scaler', 'tenure')
################### VISUALIZE WUTH ROBUST SCALER ###########################
visualize_scaled_date(sklearn.preprocessing.RobustScaler(), 'Standard_Scaler', 'monthly_charges')
#################### STANDARD SCALER: Tenure
visualize_scaled_date(sklearn.preprocessing.StandardScaler(), 'Standard_Scaler', 'tenure')
# =============================================================================
# 2.)Apply the .inverse_transform method to your scaled data. Is the resulting
# dataset the exact same as the original data?
# =============================================================================
train.head()
scaler = sklearn.preprocessing.MinMaxScaler()
#fitting the scaled data to the train set
scaled = scaler.fit_transform(train[['monthly_charges', 'tenure']])
scaled
#output: array([[0.61791045, 0.65277778],
# [0.65771144, 0.68055556],
# [0.85273632, 0.75 ],
# ...,
# [0.87412935, 0.29166667],
# [0.66268657, 0.55555556],
# [0.34726368, 0.29166667]])
#make it into a df for manipulation:
scaled_df = pd.DataFrame(scaled, index = train.index, columns = ['monthly_charges', 'tenure'])
scaled_df.head()
# #output: monthly_charges tenure
# 440 0.617910 0.652778
# 67 0.657711 0.680556
# 600 0.852736 0.750000
# 4883 0.662189 0.013889
# 1258 0.023881 0.666667
# USING THE INVERSE TRANFORM METHOD #########
scaler.inverse_transform(scaled_df)
# #output array([[ 80.35, 47. ],
# [ 84.35, 49. ],
# [103.95, 54. ],
# ...,
# [106.1 , 21. ],
# [ 84.85, 40. ],
# [ 53.15, 21. ]])
#inverse produced array, changing to DataFrame:
unscaled_df = pd.DataFrame(scaler.inverse_transform(scaled), index = train.index, columns = ['monthly_charges', 'tenure'])
unscaled_df.head()
#output:
# monthly_charges tenure
# 440 80.35 47.0
# 67 84.35 49.0
# 600 103.95 54.0
# 4883 84.80 1.0
# 1258 20.65 48.0
# =============================================================================
# 3.)Read the documentation for sklearn's QuantileTransformer. Use normal for the
# output_distribution and apply this scaler to your data. Visualize the result of
# your data scaling.
# =============================================================================
#visualize monthly charges quantile transformation with /'normal'/ output
visualize_scaled_date(sklearn.preprocessing.QuantileTransformer(output_distribution='normal'), 'Quantile Scaler', 'monthly_charges')
#visualize monthly charges quantile transformation with /'uniform'/ output
####################### VIZ FOR TENURE
visualize_scaled_date(sklearn.preprocessing.QuantileTransformer(output_distribution='normal'), 'Quantile Scaler', 'tenure')
# =============================================================================
# 4.)Use the QuantileTransformer, but omit the output_distribution argument.
# Visualize your results. What do you notice?
# =============================================================================
####################### VIZ FOR MONTHLY_CHARGES
visualize_scaled_date(sklearn.preprocessing.QuantileTransformer(), 'Quantile Scaler', 'monthly_charges')
# =============================================================================
# SCALING TAKEAWAYS:
# =============================================================================
# -HANDLE the outlier first unlessyou establish to use nonlinear
# -MIN/MAX scaler transforms each valuue in the col within desireable range of (0,1)
# (USE THIS AS YOUR FIRST CHOICE TO SCALE. PRESERVES SHAPE OF DISTRIBUTION..NO DISTORT)
# -STANDARD SCALER transforms each value in the col to range mean of 0 and std of 1
# (USE ONLY IF YOU KNOW DATA IS NORMALLY DISTRIBUTED)
# -ROBUST SCALER: Have outlier you dont want to discard. USE ROBUST
# (ALTERNATIVELY remove the outliers and use the two scaling methods above)
# GOOD PRACTICE: visualize the distribution of vars after scaling.. make sure tranformation actually happened
# USE NONLINEAR scalers when you realy have to (quanitle transformer, when u must have normally dist data)
# =============================================================================
# 5.)Based on the work you've done, choose a scaling method for your dataset.
# Write a function within your prepare.py that accepts as input the train, validate,
# and test data splits, and returns the scaled versions of each. Be sure to only learn
# the parameters for scaling from your training data!
# =============================================================================
def Standard_Scaler(X_train, X_validate, X_test):
"""
Takes in X_train, X_validate and X_test dfs with numeric values only
Returns scaler, X_train_scaled, X_validate_scaled, X_test_scaled dfs
"""
scaler = sklearn.preprocessing.StandardScaler().fit(X_train)
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)
X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)
return scaler, X_train_scaled, X_validate_scaled, X_test_scaled
def Min_Max_Scaler(X_train, X_validate, X_test):
"""
Takes in X_train, X_validate and X_test dfs with numeric values only
Returns scaler, X_train_scaled, X_validate_scaled, X_test_scaled dfs
"""
scaler = sklearn.preprocessing.MinMaxScaler().fit(X_train)
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)
X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)
return scaler, X_train_scaled, X_validate_scaled, X_test_scaled
X_train = train[['monthly_charges', 'tenure']]
X_validate = test[['monthly_charges', 'tenure']]
X_test = test[['monthly_charges', 'tenure']]
# Use the Standard_Scaler Function defined above
scaler, X_train_scaled, X_validate_scaled, X_test_scaled = Standard_Scaler(X_train, X_validate, X_test)
X_train_scaled.head()
# output:
# monthly_charges tenure
# 440 0.526056 0.599921
# 67 0.658841 0.681616
# 600 1.309488 0.885854
# 4883 0.673779 -1.279063
# 1258 -1.455763 0.640769 |
import sys
input = sys.stdin.readline
TYPE = { 1 : [[0], [1], [2], [3]], 2 : [[0,2], [1,3]], 3:[[0,1], [1,2], [2,3], [0,3]], 4: [[0,1,2], [1,2,3], [2,3,0], [3,0,1]], 5:[[0,1,2,3]]}
CCTV_INFO = []
MAP = []
ans = 10e7
n, m = map(int,input().split())
for _ in range(n) :
tmpM = list(map(int,input().split()))
MAP.append(tmpM)
for idx, tmp in enumerate(tmpM) :
if tmp in [1,2,3,4,5] :
CCTV_INFO.append([tmp, idx, _])
def mapControl(commands, targetMap, CCTV_X, CCTV_Y) :
for command in commands :
cnt = 1
if command == 0 : #LEFT
while CCTV_X - cnt >= 0 and targetMap[CCTV_Y][CCTV_X - cnt] != 6 :
if targetMap[CCTV_Y][CCTV_X - cnt] == 0 :
targetMap[CCTV_Y][CCTV_X - cnt] = -1
cnt += 1
elif command == 1 : #UP
while CCTV_Y - cnt >= 0 and targetMap[CCTV_Y-cnt][CCTV_X] != 6 :
if targetMap[CCTV_Y-cnt][CCTV_X] == 0 :
targetMap[CCTV_Y-cnt][CCTV_X] = -1
cnt += 1
elif command == 2 : #RIGHT
while CCTV_X + cnt < m and targetMap[CCTV_Y][CCTV_X + cnt] != 6 :
if targetMap[CCTV_Y][CCTV_X + cnt] == 0 :
targetMap[CCTV_Y][CCTV_X + cnt] = -1
cnt += 1
elif command == 3 : #DOWN
while CCTV_Y + cnt < n and targetMap[CCTV_Y + cnt][CCTV_X] != 6 :
if targetMap[CCTV_Y + cnt][CCTV_X] == 0 :
targetMap[CCTV_Y + cnt][CCTV_X] = -1
cnt += 1
'''
for line in targetMap :
print(line)
print('*******')
'''
def getMin(MAP) :
global ans
tmpMin = 0
for line in MAP :
tmpMin += line.count(0)
if ans > tmpMin :
ans = tmpMin
def dfs(depth, MAP) :
if depth == len(CCTV_INFO) :
getMin(MAP)
return
for i in TYPE[CCTV_INFO[depth][0]] :
tmpMAP = deepCopy(MAP)
mapControl(i, MAP, CCTV_INFO[depth][1], CCTV_INFO[depth][2])
dfs(depth+1, MAP)
MAP = tmpMAP
def deepCopy(copy) :
return [ item[:] for item in copy ]
if len(CCTV_INFO) != 0 :
dfs(0, MAP)
else :
tmpMin = 0
for line in MAP :
tmpMin += line.count(0)
ans = tmpMin
print(ans) |
#!/bin/python3
print('{0:>2}{1:>16}'.format('0', 2**0))
print('{0:>2}{1:>16}'.format('1', 2**1))
print('{0:>2}{1:>16}'.format('2', 2**2))
print('{0:>2}{1:>16}'.format('3', 2**3))
print('{0:>2}{1:>16}'.format('4', 2**4))
print('{0:>2}{1:>16}'.format('5', 2**5))
##
## End of file..
|
print("SUM OF THE SERIES")
print()
def sum_series(num):
res = 0
fact = 1
for i in range(1, num+1):
fact=fact*i
res = res + (i/ fact)
print ("Sum of this series is:",res)
n = int(input("Enter the value of N:"))
sum_series(n) |
from dipy.tracking import utils
from dipy.tracking.streamline import Streamlines
from weighted_tracts import *
subj = all_subj_folders
names = all_subj_names
for s, n in zip(subj[27::], names[27::]):
folder_name = subj_folder + s
dir_name = folder_name + '\streamlines'
gtab, data, affine, labels, white_matter, nii_file, bvec_file = load_dwi_files(folder_name)
tract_path = f'{dir_name}{n}_wholebrain_4d_labmask.trk'
streamlines = load_ft(tract_path, nii_file)
file_list = os.listdir(folder_name)
for file in file_list:
if 'cc_mask' in file and file.endswith('.nii'):
mask_file = os.path.join(folder_name, file)
mask_img = nib.load(mask_file)
cc_mask_mat = mask_img.get_fdata()
mask_include = cc_mask_mat == 1
break
masked_streamlines = utils.target(streamlines, affine, mask_include)
#masked_streamlines = utils.target(masked_streamlines, affine, mask_exclude, include=False)
masked_streamlines = Streamlines(masked_streamlines)
weighting_streamlines(folder_name, streamlines, bvec_file, show=True, weight_by='2_2_AxPasi7',
scale=[3, 10], hue=[0.25, -0.05], saturation=[0.1, 1.0], fig_type='cc')
save_ft(folder_name, n, masked_streamlines, nii_file, file_name='_' + mask_type + '.trk')
|
import sys
import RPi.GPIO as GPIO
import Adafruit_DHT
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import subprocess
RST = None
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
disp.begin()
disp.clear()
disp.display()
width = disp.width
height = disp.height
padding = -2
top = padding
bottom = height - padding
x = 0
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
all_lights = [18,22,23]
GPIO.setup(all_lights,GPIO.OUT)
font = ImageFont.load_default()
def light_on(pin):
GPIO.output(pin,GPIO.HIGH)
def light_off(pin):
GPIO.output(pin,GPIO.LOW)
GPIO.output(all_lights,GPIO.LOW)
current_light = None
while True:
humidity, temperature = Adafruit_DHT.read_retry(11, 4)
f_temp = (temperature * (9.0/5.0)) + 32
#print 'Temp: {0:0.1f}F Humidity: {1:0.1f}%'.format(f_temp, humidity)
if current_light:
light_off(current_light)
if 0 <= humidity <= 24:
light_on(22)
current_light = 22
elif 25 <= humidity <= 29:
light_on(23)
current_light = 23
else:
light_on(18)
current_light = 18
draw.rectangle((0,0,width,height), outline=0, fill=0)
draw.text((2, top), "Humidity: {0:0.1f}".format(humidity), font=font, fill=255)
draw.text((2, top+8), "Temp: {0:0.1f}F".format(f_temp), font=font, fill=255)
disp.image(image)
disp.display()
time.sleep(1)
|
#! /usr/bin/env python
"""
Script showing how to create multiple vlans on Cisco IOS Switches by reading VLANS ID and Name from Excel File.
"""
import csv
from jinja2 import Template
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException # Device Unreachable
from netmiko.ssh_exception import SSHException # SSH is not enabled
from netmiko.ssh_exception import AuthenticationException # SSH Authentication error
import pyfiglet
#
# # Prints Banner with tool name
banner = pyfiglet.figlet_format("VLANs Creator", font="doom")
print(banner)
#
# Read Vlan Excel File
source_file = "vlans-info.csv"
# Read VLAN Template File
vlans_template_file = "switch-vlans-template.j2"
# Asks user for Switch credentials
host_ip = input("Enter your device IP: ")
username = input("Enter device SSH Username: ")
user_password = input("Enter device SSH Password: ")
enable_secret = input("Enter device Enable Password: ")
#
source_file = input("Enter your Excel file name with .csv extension: ")
device = {
"address": host_ip,
"device_type": "cisco_ios",
# "ssh_port": 22,
"username": username,
"password": user_password,
"secret": enable_secret
}
# String that will hold final full configuration of all VLANS
vlans_configs = ""
# Open up the Jinja template file (as text) and then create a Jinja Template Object
with open(vlans_template_file) as f:
vlans_template = Template(f.read(), keep_trailing_newline=True)
# Open up the CSV file containing the data
with open(source_file) as f:
# Use DictReader to access data from CSV
reader = csv.DictReader(f)
# For each row in the CSV, generate an vlan configuration using the jinja template
for row in reader:
vlan_config = vlans_template.render(
vlan=row["ID"],
name=row["Name"],
)
# Append this vlan configuration to the full configuration
vlans_configs += vlan_config
# Save the final configuraiton to a file
with open("vlans_configs.txt", "w") as f:
f.write(vlans_configs)
# Use Netmiko to connect to the device and send the configuration
try:
print("Try connecting to device:", device["address"])
with ConnectHandler(ip=device["address"],
# port=device["ssh_port"],
username=device["username"],
password=device["password"],
device_type=device["device_type"],
secret=device["secret"]) as ch:
print("Connection Successful, Creating VLANS in progress ... !")
print("It will print out the result on the terminal and pushes it to your switch automatically")
config_set = vlans_configs.split("\n")
output = ch.send_config_set(config_set)
print(output)
except (AuthenticationException):
print("Authentication Failure:", device["address"])
except (NetMikoTimeoutException):
print("Timeout to device:",
device["address"], ", Check the host IP and Make sure it's Up and Running")
except(SSHException):
print("SSH Issue, Are you sure SSH is enabled?", device["address"])
except Exception as unkown_error:
print("Some other error:"), unkown_error
|
#!/usr/bin/python
import sys
from ui.ui import *
class naishocompression:
def __init__(self, message):
self.message = message
#--------------------------------------------|
# BZ2 compress |
#--------------------------------------------|------------------------------------------------------------------------------------------------
def naishobz2com(self):
try:
import bz2
self.message = bz2.compress(self.message)
except:
raw_input ("[-] Bz2 commpressing error. Data was not encoded")
#--------------------------------------------|
# zlib compress |
#--------------------------------------------|------------------------------------------------------------------------------------------------
def naishozlibcom(self):
try:
import zlib
self.message = zlib.compress(self.message, 6)
except:
raw_input ("[-] zlib commpressing error. Data was not encoded")
#--------------------------------------------|
# Show Data |
#--------------------------------------------|------------------------------------------------------------------------------------------------
def naishoshowdata(self):
banner1()
print self.message + "\n\n"
raw_input("Press Enter to continue...")
|
# -*- coding:utf8 -*-
# @author = WMaker
'''
13.2物体跟踪
现在我们知道怎样将一幅图像从 BGR 转换到 HSV 了,我们可以利用这
一点来提取带有某个特定颜色的物体。在 HSV 颜色空间中要比在 BGR 空间
中更容易表示一个特定颜色。在我们的程序中,我们要提取的是一个蓝色的物
体。下面就是就是我们要做的几步:
• 从视频中获取每一帧图像
• 将图像转换到 HSV 空间
• 设置 HSV 阈值到蓝色范围。
• 获取蓝色物体,当然我们还可以做其他任何我们想做的事,比如:在蓝色
物体周围画一个圈。
利用cv2.inRange函数设阈值,去除背景部分
mask = cv2.inRange(hsv, lower, upper #lower20===>0,upper200==>0,
函数很简单,参数有三个
第一个参数:hsv指的是原图
第二个参数:lower指的是图像中低于这个lower的值,图像值变为0
第三个参数:upper指的是图像中高于这个upper的值,图像值变为0
而在lower~upper之间的值变成255
'''
import cv2
import numpy as np
cap=cv2.VideoCapture('../images/jiesen.mp4')
while(1):
# 获取每一帧
ret,frame=cap.read()
# 转换到 HSV
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# 设定紫色的阈值
# 可以把图片转换为HSV图之后,获取该指定像素点的bgr值,再转成数组
# 相当于对转成HSV之后的图构建掩模
lower=np.array([60,60,150])
upper=np.array([170,170,240])
# 根据阈值构建掩模
mask=cv2.inRange(hsv,lower,upper)
# 对原图像和掩模进行位运算
res=cv2.bitwise_and(frame,frame,mask=mask)
# 显示图像
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
#k=cv2.waitKey(5)&0xFF
k=cv2.waitKey(10)&0xFF
print(k)
if k==27:
break
# 关闭窗口
cv2.destroyAllWindows()
#----------------------------------------------
image = cv2.imread('../images/jiesen.jpg')
# 转换到 HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# 设定紫色的阈值
# 可以把图片转换为HSV图之后,获取该指定像素点的bgr值,再转成数组
# 相当于对转成HSV之后的图构建掩模
lower = np.array([110, 100, 200])
upper = np.array([170, 170, 240])
# 根据阈值构建掩模
mask = cv2.inRange(hsv, lower, upper)
# 对原图像和掩模进行位运算
res = cv2.bitwise_and(image, image, mask=mask)
# 显示图像
cv2.imshow('image', image)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
def greet_user(names):
"""Print a simple greeting to each user in the list."""
for name in names:
msg = "Hello, " + name.title() + "!"
print(msg)
usernames = ['redlio89', 'zezema', 'seriousburn']
greet_user(usernames)
magicians = ['houdini', 'zoro', 'some dude']
def show_magicians(magician_names):
for name in magician_names:
print("Hello, " + name.title() + "!")
def make_great(magician_names):
for name in magician_names:
name + " the Great!"
#make_great(magician_names=magicians)
show_magicians(magician_names=magicians)
make_great(magician_names=magicians[:])
show_magicians(magician_names=magicians) |
#!/usr/bin/env python
from argparse import ArgumentParser
from math import pi, sin
from quad import integrate, sin2
if __name__ == '__main__':
arg_parser = ArgumentParser(description='compute quadratures')
arg_parser.add_argument('--n', type=int, default=1000,
help='number of integration points')
arg_parser.add_argument('--version', default='quad',
choices=['quad', 'quad_prof', 'quad_prof_indiv',
'quad_pure'],
help='version to run')
options = arg_parser.parse_args()
if options.version == 'quad':
from quad import integrate, sin2
elif options.version == 'quad_prof':
from quad_prof import integrate, sin2
elif options.version == 'quad_prof_indiv':
from quad_prof_indiv import integrate, sin2
elif options.version == 'quad_pure':
from quad_pure import integrate, sin2
print(integrate(sin2, 0.0, 3.2*pi, options.n))
print(integrate(sin, 0.0, 3.2*pi, options.n))
|
from django.shortcuts import render, redirect
from .models import Secret, Like
def index(request):
if request.method == "GET":
context = {
"secrets": Secret.objects.all().order_by('-created_at'),
"likes": Like.objects.all()
}
return render(request, 'first_app/index.html', context)
elif request.method == "POST":
Secret.objects.create(secret=request.POST['secret'])
return redirect('/')
def like(request, secret_id):
Like.objects.create(secret=secret_id)
return redirect('/')
|
from blinker import signal
'''
Life cycle
'''
initialized = signal('mac_initialized')
'''
Messages
'''
message_received = signal('message_received') # Plain message
command_received = signal('command_received') # !<command> |
import logging
import networkx as nx
from dateutil import parser
EDGE_DEPENDENCY = 'p'
EDGE_DEV_DEPENDENCY = 'd'
EVENT_ADD = "a"
EVENT_DELETE = "d"
class PackagesGraph:
def __init__(self, events_source, G=None, directed=True):
if isinstance(G, (nx.Graph, nx.DiGraph)):
self.G = G
elif G is None:
self.G = nx.DiGraph() if directed else nx.Graph()
else:
raise Exception('Invalid G parameter')
self.events_source = events_source
def get_isolated_nodes(self):
return nx.isolates(self.G)
def get_connected_graph_view(self):
graph_size = len(self.G.nodes)
min_size = graph_size * .9
views = nx.connected_component_subgraphs(self.G, False)
largest_size = 0
largest_view = None
for view in views:
view_size = len(view.nodes)
if view_size > min_size:
return view
logging.warning('view of', view_size,
'nodes is smaller then the minimum size:', min_size)
if view_size > largest_size:
largest_size = view_size
largest_view = view
return largest_view
def add_event(self, event):
pkg_name, _, _, event_type, edge_type, target = event
u = pkg_name
v = target
if event_type == EVENT_ADD:
if edge_type == EDGE_DEPENDENCY:
self.G.add_edge(u, v, prod=True)
elif edge_type == EDGE_DEV_DEPENDENCY:
self.G.add_edge(u, v, dev=True)
else:
self.G.add_edge(u, v)
elif event_type == EVENT_DELETE:
edge_data = self.G.get_edge_data(u, v)
if edge_data is None:
logging.error('network_delete', "edge not exist", event_type)
elif edge_type == EDGE_DEPENDENCY and edge_data.get("dev"):
edge_data["prod"] = False
elif edge_type == EDGE_DEV_DEPENDENCY and edge_data.get("prod"):
edge_data["dev"] = False
else:
self.G.remove_edge(u, v)
def build_graph_until(self, time_scope):
stop_time = time_scope.strftime("%Y-%m-01")
logging.info('Building graph until: %s', stop_time)
if hasattr(self, 'last_event'):
if self.last_event.date[:10] >= stop_time:
raise Exception("No events to cover the target time scope")
self.add_event(self.last_event)
for event in self.events_source:
if event.date[:10] >= stop_time:
self.last_event = event
return True
self.add_event(event)
return False
def get_pagerank(self, sort=False):
return self._get_metrics(nx.pagerank_scipy, sort)
def get_in_degree_centrality(self, sort=False):
return self._get_metrics(nx.in_degree_centrality, sort)
def get_in_degree(self, sort=False):
def in_degree(G):
return dict(G.in_degree())
return self._get_metrics(in_degree, sort)
def get_out_degree(self, sort=False):
return self._get_metrics(nx.out_degree_centrality, sort)
def _get_metrics(self, metrics_function, sort=False):
metrics_result = metrics_function(self.G)
if not sort:
return metrics_result
sorted_list = sorted(metrics_result.items(),
key=lambda kv: kv[1], reverse=True)
dict_result = {}
index = 0
top_list = 10
for name, value in sorted_list:
index += 1
if index / top_list > 1:
top_list *= 10
dict_result[name] = (value, index, top_list)
return dict_result
|
from mainframe import run_aermod_framework
__author__ = 'Max'
# run_framework.py is the interface for the framework, and calls the run_framework function that runs mainframe
# mainframe.py is the framework, it checks inputs, writes the input files, runs AERMOD and processes the outputs
# input_script_functions contains all functions to write AERMOD/AERPLOT input files and check inputs
# output_processing_functions contains all functions to process AERMOD output files
# more details about output processing are presented below, refer to the receptor_style and run_aerplot variables
# the overview of the framework is easily allowing one to add emission sources in discrete locations and assigning
# them emission rates, which AERMOD will use to predict concentrations at locations also specified by the user. This
# framework also processes the output data, currently set to hourly averages for a year of data, into a spreadsheet
# to easily be used to create graphs, analyze maxima or anything else. The output spreadsheet is currently configured
# to neatly display the time information and yearly average information. If wanted, one can easily go into excel and
# delete all the columns that don't contain emission concentration data.
# concentration data that aermod produces is in the form *****micrograms per meter cubed******
# meteorological data comes from AERMET processor
# AERMET will be needed to obtain meteorological data used for AERMOD
# processing meteorological data with AERMET is the *only* requirement to run this framework
# AERMOD is currently set to calculate all 1-hour concentration averages for a given year of meteorological data
# To change this, go to the writing control and output lines functions in input_script_functions.py
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@ INPUT OPTIONS @@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Name of the surface observations file
# Mandatory, file type: SFC
# Enter as string data type
surface_observations_file = 'temp.SFC'
# Name of the upper air observations file
# Mandatory, file type: PFL
# Enter as string data type
upper_air_data_file = 'temp.PFL'
# list of pollutant source points coordinates in meters
# INPUT LIST OF X COORDINATES IN source_x_points AND Y COORDINATES IN source_y_points
# INDEXES WILL CORRESPOND TO EACH OTHER IN THE TWO LISTS
# LISTS MUST BE MATCHING LENGTH
source_coordinate_list_x = []
source_coordinate_list_y = []
# list of pollutant source release heights in meters
# can be a single data point in which case the value will be applied to EVERY pollutant source
# if manually entering each height, must be the exact same length as the source_x_points/source_y_points
# each height will be associated to the source with the same index as in the coordinate list
source_release_height_list = []
# list of emission rates that will correspond to the source points list
# can be a single data point in which case the value will be applied to EVERY pollutant source
# if manually entering each rate, must be the exact same length as the source_x_points/source_y_points
# each rate will be associated to the source with the same index as in the coordinate list
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$**** IF USING POINT SOURCE, WILL BE INTERPRETED IN GRAMS PER SECOND ***********************$$
# $$**** IF USING AREA SOURCE, WILL BE INTERPRETED IN GRAMS PER SECOND PER METER SQUARED ******$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
source_emission_rate_list = []
# determines what pollutant source type you want
# data required for simulation depends on what type is chosen
# if point is chosen, all 'source_point_...' data is needed
# if area is chosen, all 'source_area_...' data is needed
# MUST BE EITHER 'point' OR 'area' OR FRAMEWORK WILL NOT WORK
# Enter as string data type
source_type = 'area' # 'point'
# area source dimensions of area source polluters, in meters
# this will determine the size of the pollutant source and the emission rate,
# since emissions for area sources are in g/s/m^2,
# ONLY NEEDED IF USING AREA SOURCE, IF NOT NEEDED SET EQUAL TO 'None'
# can be a single data point in which case the value will be applied to EVERY pollutant source
# if manually entering each value, must be the exact same length as the source_x_points/source_y_points
# each value will be associated to the source with the same index as in the coordinate list
source_area_x_direction_length_list = []
source_area_y_direction_length_list = []
# temperature of gas from the point source as it exits
# UNITS ARE IN ***KELVIN***, ENTER 0 TO JUST USE AMBIENT TEMPERATURE
# ONLY NEEDED IF USING POINT SOURCE, IF NOT NEEDED SET EQUAL TO 'None'
# can be a single data point in which case the value will be applied to EVERY pollutant source
# if manually entering each value, must be the exact same length as the source_x_points/source_y_points
# each value will be associated to the source with the same index as in the coordinate list
source_point_stack_gas_exit_temperature_list = [0]
# velocity of gas from the point source as it exits
# units are in meters/second
# ONLY NEEDED IF USING POINT SOURCE, IF NOT NEEDED SET EQUAL TO 'None'
# can be a single data point in which case the value will be applied to EVERY pollutant source
# if manually entering each value, must be the exact same length as the source_x_points/source_y_points
# each value will be associated to the source with the same index as in the coordinate list
source_point_stack_gas_exit_velocity_list = []
# diameter of pollutant stack from point source emittor
# units are in meters
# ONLY NEEDED IF USING POINT SOURCE, IF NOT NEEDED SET EQUAL TO 'None'
# can be a single data point in which case the value will be applied to EVERY pollutant source
# if manually entering each value, must be the exact same length as the source_x_points/source_y_points
# each value will be associated to the source with the same index as in the coordinate list
source_point_stack_inside_diameter_list = []
# the year the meteorological data starts
# Should reflect meteorological data
met_data_start_year = ''
# there are two options for receptor styles
# the first is discrete in which you put in the coordinates you want AERMOD to calculate concentrations at
# the second is grid in which AERMOD calculates concentrations in a full grid
# grid simulations can take a lot longer since there are much more receptor locations
# either enter 'discrete' or 'grid'
# for discrete you only have to enter the receptor_coordinate_list_... variables
# for grid you have to enter all receptor_grid_... variables
# EXTRACTING CONCENTRATION DATA TO EXCEL SPREADSHEET ONLY WORKS WITH DISCRETE COORDINATES
receptor_style = 'grid' # 'discrete'
# coordinate list of discrete receptors in METERS
# enter as many as you want, enter as lists of numbers
# length of each list must match
# the index of each coordinate in each list will correspond to the index of the coordinate in the other list
# if using grid receptors, enter as None
receptor_coordinate_list_x = []
receptor_coordinate_list_y = []
# the starting coordinate for the x and y receptors
# the first receptor will be at this location, followed by as many receptors specified at the spacing distance
# entered in meters
# hint: enter negative coordinate so receptors form a grid around the origin
receptor_grid_starting_point_x = 0
receptor_grid_starting_point_y = 0
# the number of receptors in the x and y location
# this will determine the size of the grid since there will be as many receptors as specified,
# seperated by the spacing distance
receptor_grid_number_receptors_x = 0
receptor_grid_number_receptors_y = 0
# this determines the spacing distance between receptors in the x and y direction
# the distance between each receptor multiplied by the number of receptors minus one in each direction (x/y),
# will determine the size of the grid
# units are in meters
# for example, starting point -1000, number receptors = 21, grid_spacing = 100, the x length of the grid is 2000
receptor_grid_spacing_x = 0
receptor_grid_spacing_y = 0
# the base elevation for the region in the study
# data is MANDATORY by AERMOD
# units are in METERS
base_elevation = ''
# the station number that the upper air data was collected at
# data is MANDATORY by AERMOD
uair_data_station_number = ''
# the station number that the surface data observations were collected at
# data is MANDATORY by AERMOD
surf_data_station_number = ''
# if you ran an AERMAP simulation for this scenario
# enter the names of the source and receptor files that AERMAP outputted
# these files should be in a txt format
# example; receptor_aermap_output_file_name='aermap_receptor.txt'
# if AERMAP was not used set both variables to None and the program will run fine
receptor_aermap_output_file_name = None,
source_aermap_output_file_name = None,
# if you want to run AERPLOT to make contour plots of the results
# all you need is information about the location of the area of interest
# for running aerplot, set run_aerplot to 'yes' or 'no'
# *** aerplot needs more than one receptor or it will throw an error
run_aerplot = 'no'
# the northing and easting **UTM** coordinates of the location of interest
# if you don't care about the location that the plot is overlayed in on google earth
# you can enter 0 for the northing and easting and just see the contour plot on the ocean
# enter the coordinates, or set them to None if not running aerplot
aerplot_northing = ''
aerplot_easting = ''
# the utm zone for the location of interest
# set to None if not using aerplot
aerplot_UTM_zone = ''
# if using aerplot, define if in the northern hemisphere or not
# **ENTER ARGUMENT AS A STRING**
# if not using aerplot, set as None. Otherwise, set *ONLY* to 'True' or 'False' with quotations
aerplot_northern_hemisphere = 'True' # 'False'
####################################################################################
####################################################################################
####################################################################################
################################### FUNCTION CALL ##################################
####################################################################################
####################################################################################
####################################################################################
run_aermod_framework(surface_observations_file=surface_observations_file,
upper_air_data_file=upper_air_data_file,
source_coordinate_list_x=source_coordinate_list_x,
source_coordinate_list_y=source_coordinate_list_y,
source_release_height_list=source_release_height_list,
source_emission_rate_list=source_emission_rate_list,
source_type=source_type,
source_area_x_direction_length_list=source_area_x_direction_length_list,
source_area_y_direction_length_list=source_area_y_direction_length_list,
source_point_stack_gas_exit_temperature_list=source_point_stack_gas_exit_temperature_list,
source_point_stack_gas_exit_velocity_list=source_point_stack_gas_exit_velocity_list,
source_point_stack_inside_diameter_list=source_point_stack_inside_diameter_list,
met_data_start_year=met_data_start_year,
receptor_style=receptor_style,
receptor_coordinate_list_x=receptor_coordinate_list_x,
receptor_coordinate_list_y=receptor_coordinate_list_y,
receptor_grid_starting_point_x=receptor_grid_starting_point_x,
receptor_grid_starting_point_y=receptor_grid_starting_point_y,
receptor_grid_number_receptors_x=receptor_grid_number_receptors_x,
receptor_grid_number_receptors_y=receptor_grid_number_receptors_y,
receptor_grid_spacing_x=receptor_grid_spacing_x,
receptor_grid_spacing_y=receptor_grid_spacing_y,
base_elevation=base_elevation,
uair_data_station_number=uair_data_station_number,
surf_data_station_number=surf_data_station_number,
receptor_aermap_output_file_name=receptor_aermap_output_file_name,
source_aermap_output_file_name=source_aermap_output_file_name,
run_aerplot=run_aerplot,
aerplot_northing=aerplot_northing,
aerplot_easting=aerplot_easting,
aerplot_UTM_zone=aerplot_UTM_zone,
aerplot_northern_hemisphere=aerplot_northern_hemisphere
)
|
#local media assets agent
import os, string, hashlib
from mp4file import atomsearch, mp4file
from mutagen.id3 import ID3
artExt = ['jpg','jpeg','png','tbn']
artFiles = {'posters': ['poster','default','cover','movie','folder'],
'art': ['fanart']}
subtitleExt = ['utf','utf8','utf-8','sub','srt','smi','rt','ssa','aqt','jss','ass','idx']
class localMediaMovie(Agent.Movies):
name = 'Local Media Assets (Movies)'
languages = [Locale.Language.NoLanguage]
primary_provider = False
contributes_to = ['com.plexapp.agents.imdb', 'com.plexapp.agents.none']
def search(self, results, media, lang):
results.Append(MetadataSearchResult(id = 'null', score = 100))
def update(self, metadata, media, lang):
filename = media.items[0].parts[0].file.decode('utf-8')
path = os.path.dirname(filename)
if 'video_ts' == path.lower().split('/')[-1]:
path = '/'.join(path.split('/')[:-1])
basename = os.path.basename(filename)
(fileroot, ext) = os.path.splitext(basename)
pathFiles = {}
for p in os.listdir(path):
pathFiles[p.lower()] = p
# Add the filename as a base, and the dirname as a base for poster lookups
passFiles = {}
passFiles['posters'] = artFiles['posters'] + [fileroot, path.split('/')[-1]]
passFiles['art'] = artFiles['art'] + [fileroot + '-fanart']
# Look for posters and art
valid_art = []
valid_posters = []
for t in ['posters','art']:
for e in artExt:
for a in passFiles[t]:
f = (a + '.' + e).lower()
if f in pathFiles.keys():
data = Core.storage.load(os.path.join(path, pathFiles[f]))
if t == 'posters':
if f not in metadata.posters:
metadata.posters[f] = Proxy.Media(data)
valid_posters.append(f)
Log('Local asset (type: ' + t + ') added: ' + f)
elif t == 'art':
if f not in metadata.art:
metadata.art[f] = Proxy.Media(data)
valid_art.append(f)
Log('Local asset (type: ' + t + ') added: ' + f)
metadata.posters.validate_keys(valid_posters)
metadata.art.validate_keys(valid_art)
# Look for subtitles
for i in media.items:
for part in i.parts:
FindSubtitles(part)
getMetadataAtoms(part, metadata, type='Movie')
class localMediaTV(Agent.TV_Shows):
name = 'Local Media Assets (TV)'
languages = [Locale.Language.NoLanguage]
primary_provider = False
contributes_to = ['com.plexapp.agents.thetvdb', 'com.plexapp.agents.none']
def search(self, results, media, lang):
results.Append(MetadataSearchResult(id = 'null', score = 100))
def update(self, metadata, media, lang):
# Look for subtitles for each episode.
for s in media.seasons:
# If we've got a date based season, ignore it for now, otherwise it'll collide with S/E folders/XML and PMS
# prefers date-based (why?)
if int(s) < 1900:
for e in media.seasons[s].episodes:
for i in media.seasons[s].episodes[e].items:
for part in i.parts:
FindSubtitles(part)
getMetadataAtoms(part, metadata, type='TV', episode=metadata.seasons[s].episodes[e])
else:
# Whack it in case we wrote it.
del metadata.seasons[s]
class localMediaAlbum(Agent.Album):
name = 'Local Media Assets (Albums)'
languages = [Locale.Language.NoLanguage]
primary_provider = False
contributes_to = ['com.plexapp.agents.discogs', 'com.plexapp.agents.lastfm', 'com.plexapp.agents.none']
def search(self, results, media, lang):
results.Append(MetadataSearchResult(id = 'null', score = 100))
def update(self, metadata, media, lang):
valid_posters = []
for t in media.tracks:
for i in media.tracks[t].items:
for p in i.parts:
filename = p.file.decode('utf-8')
path = os.path.dirname(filename)
(fileroot, fext) = os.path.splitext(filename)
pathFiles = {}
for pth in os.listdir(path):
pathFiles[pth.lower()] = pth
# Add the filename as a base, and the dirname as a base for poster lookups
passFiles = {}
passFiles['posters'] = artFiles['posters'] + [fileroot, path.split('/')[-1]]
# Look for posters
for e in artExt:
for a in passFiles['posters']:
f = (a + '.' + e).lower()
if f in pathFiles.keys():
data = Core.storage.load(os.path.join(path, pathFiles[f]))
posterName = hashlib.md5(data).hexdigest()
if posterName not in metadata.posters:
metadata.posters[posterName] = Proxy.Media(data)
valid_posters.append(posterName)
Log('Local asset image added: ' + f + ', for file: ' + filename)
else:
Log('skipping add for local art')
# Look for embedded id3 APIC images in mp3 files
if fext.lower() == '.mp3':
f = ID3(filename)
for frame in f.getall("APIC"):
if (frame.mime == 'image/jpeg') or (frame.mime == 'image/jpg'): ext = 'jpg'
elif frame.mime == 'image/png': ext = 'png'
elif frame.mime == 'image/gif': ext = 'gif'
else: ext = ''
posterName = hashlib.md5(frame.data).hexdigest()
if posterName not in metadata.posters:
Log('Adding embedded APIC art from mp3 file: ' + filename)
metadata.posters[posterName] = Proxy.Media(frame.data, ext=ext)
valid_posters.append(posterName)
else:
Log('skipping already added APIC')
# Look for coverart atoms in mp4/m4a
elif fext.lower() in ['.mp4','.m4a']:
mp4fileTags = mp4file.Mp4File(filename)
try:
data = find_data(mp4fileTags, 'moov/udta/meta/ilst/coverart')
posterName = hashlib.md5(data).hexdigest()
if posterName not in metadata.posters:
metadata.posters['atom_coverart'] = Proxy.Media(data)
valid_posters.append(posterName)
Log('Adding embedded coverart from m4a/mp4 file: ' + filename)
except: pass
metadata.posters.validate_keys(valid_posters)
def cleanFilename(filename):
#this will remove any whitespace and punctuation chars and replace them with spaces, strip and return as lowercase
return string.translate(filename.encode('utf-8'), string.maketrans(string.punctuation + string.whitespace, ' ' * len (string.punctuation + string.whitespace))).strip().lower()
def FindSubtitles(part):
filename = part.file.decode('utf-8') #full pathname
basename = os.path.basename(filename) #filename only (no path)
(fileroot, ext) = os.path.splitext(basename)
fileroot = cleanFilename(fileroot)
ext = ext.lower()
path = os.path.dirname(filename) #get the path, without filename
# Get all the files in the path.
pathFiles = {}
for p in os.listdir(path):
pathFiles[p] = p
#Support for global sub dir.
if Prefs["enableSubDir"]:
Log("Searching %s for subs aswell." % Prefs["subDir"])
for p in os.listdir(Prefs["subDir"]):
pathFiles[p] = p
# Start with the existing languages.
lang_sub_map = {}
for lang in part.subtitles.keys():
lang_sub_map[lang] = []
addAll = False
for f in pathFiles:
(froot, fext) = os.path.splitext(f)
froot = cleanFilename(froot)
if f[0] != '.' and fext[1:].lower() in subtitleExt:
langCheck = cleanFilename(froot).split(' ')[-1].strip()
# Remove the language from the filename for comparison purposes.
frootNoLang = froot[:-(len(langCheck))-1].strip()
if addAll or ((fileroot == froot) or (fileroot == frootNoLang)):
Log('Found subtitle file: ' + f + ' language: ' + langCheck)
lang = Locale.Language.Match(langCheck)
part.subtitles[lang][f] = Proxy.LocalFile(os.path.join(path, pathFiles[f]))
if not lang_sub_map.has_key(lang):
lang_sub_map[lang] = []
lang_sub_map[lang].append(f)
# Now whack subtitles that don't exist anymore.
for lang in lang_sub_map.keys():
part.subtitles[lang].validate_keys(lang_sub_map[lang])
def getMetadataAtoms(part, metadata, type, episode=None):
filename = part.file.decode('utf-8')
file = os.path.basename(filename)
(file, ext) = os.path.splitext(file)
if ext.lower() in ['.mp4', '.m4v', '.mov']:
mp4fileTags = mp4file.Mp4File(filename)
try: metadata.posters['atom_coverart'] = Proxy.Media(find_data(mp4fileTags, 'moov/udta/meta/ilst/coverart'))
except: pass
try:
title = find_data(mp4fileTags, 'moov/udta/meta/ilst/title') #Name
if type == 'Movie': metadata.title = title
else: episode.title = title
except:
pass
try:
try:
summary = find_data(mp4fileTags, 'moov/udta/meta/ilst/ldes') #long description
except:
summary = find_data(mp4fileTags, 'moov/udta/meta/ilst/desc') #short description
if type == 'Movie': metadata.summary = summary
else: episode.summary = summary
except:
pass
if type == 'Movie':
try:
genres = find_data(mp4fileTags, 'moov/udta/meta/ilst/genre') #genre
if len(genres) > 0:
genList = genres.split(',')
metadata.genres.clear()
for g in genList:
metadata.genres.add(g.strip())
except:
pass
try:
artists = find_data(mp4fileTags, 'moov/udta/meta/ilst/artist') #artist
if len(artists) > 0:
artList = artists.split(',')
metadata.roles.clear()
for a in artList:
role = metadata.roles.new()
role.actor = a.strip()
except:
pass
try:
releaseDate = find_data(mp4fileTags, 'moov/udta/meta/ilst/year')
releaseDate = releaseDate.split('T')[0]
parsedDate = Datetime.ParseDate(releaseDate)
metadata.year = parsedDate.year
metadata.originally_available_at = parsedDate.date() #release date
except:
pass
def find_data(atom, name):
child = atomsearch.find_path(atom, name)
data_atom = child.find('data')
if data_atom and 'data' in data_atom.attrs:
return data_atom.attrs['data']
|
a,b=input("Enter a string:"),""
for i in a:
if i.isalpha():
if i=='Z'or i=='z': #错误: i=='Z'or 'z'
i=chr(ord(i)-25)
else:
i=chr(ord(i)+1)
b+=i
print(b)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import time
from typing import Callable, Any # pylint: disable=unused-import
from azure.core.polling import PollingMethod, LROPoller
from ._shared.utils import process_storage_error
from ._generated.models import StorageErrorException
from ._blob_utils import deserialize_blob_properties
logger = logging.getLogger(__name__)
class CopyStatusPoller(LROPoller):
"""Poller for a long-running copy operation."""
def __init__(self, client, copy_id, polling=True, configuration=None, **kwargs):
if configuration:
polling_interval = configuration.blob_settings.copy_polling_interval
else:
polling_interval = 2
polling_method = CopyBlobPolling if polling else CopyBlob
poller = polling_method(polling_interval, **kwargs)
super(CopyStatusPoller, self).__init__(client, copy_id, None, poller)
def copy_id(self):
# type: () -> str
"""Get the ID of the copy operation.
:rtype: str
"""
return self._polling_method.id
def abort(self):
# type: () -> None
"""Abort the copy operation.
This will leave a destination blob with zero length and full metadata.
This will raise an error if the copy operation has already ended.
:rtype: None
"""
return self._polling_method.abort()
def status(self): # pylint: disable=useless-super-delegation
# type: () -> str
"""Returns the current status of the copy operation.
:rtype: str
"""
return super(CopyStatusPoller, self).status()
def result(self, timeout=None):
# type: (Optional[int]) -> Model
"""Return the BlobProperties after the completion of the copy operation,
or the properties available after the specified timeout.
:returns: The destination blob properties.
:rtype: ~azure.storage.blob.models.BlobProperties
"""
return super(CopyStatusPoller, self).result(timeout=timeout)
class CopyBlob(PollingMethod):
"""An empty poller that returns the deserialized initial response.
"""
def __init__(self, interval, **kwargs):
self._client = None
self._status = None
self._exception = None
self.id = None
self.etag = None
self.last_modified = None
self.polling_interval = interval
self.kwargs = kwargs
self.blob = None
def _update_status(self):
try:
self.blob = self._client._client.blob.get_properties( # pylint: disable=protected-access
cls=deserialize_blob_properties, **self.kwargs)
except StorageErrorException as error:
process_storage_error(error)
self._status = self.blob.copy.status
self.etag = self.blob.etag
self.last_modified = self.blob.last_modified
def initialize(self, client, initial_status, _): # pylint: disable=arguments-differ
# type: (Any, Any, Callable) -> None
self._client = client
if isinstance(initial_status, str):
self.id = initial_status
self._update_status()
else:
self._status = initial_status['copy_status']
self.id = initial_status['copy_id']
self.etag = initial_status['etag']
self.last_modified = initial_status['last_modified']
def run(self):
# type: () -> None
"""Empty run, no polling."""
def abort(self):
# type: () -> None
try:
return self._client._client.blob.abort_copy_from_url( # pylint: disable=protected-access
self.id, **self.kwargs)
except StorageErrorException as error:
process_storage_error(error)
def status(self):
# type: () -> str
self._update_status()
return self._status
def finished(self):
# type: () -> bool
"""Is this polling finished?
:rtype: bool
"""
return str(self.status()).lower() in ['success', 'aborted', 'failed']
def resource(self):
# type: () -> Any
self._update_status()
return self.blob
class CopyBlobPolling(CopyBlob):
def run(self):
# type: () -> None
try:
while not self.finished():
self._update_status()
time.sleep(self.polling_interval)
if str(self.status()).lower() == 'aborted':
raise ValueError("Copy operation aborted.")
if str(self.status()).lower() == 'failed':
raise ValueError("Copy operation failed: {}".format(self.blob.copy.status_description))
except Exception as e:
logger.warning(str(e))
raise
def status(self):
# type: () -> str
"""Return the current status as a string.
:rtype: str
"""
try:
return self._status.value # type: ignore
except AttributeError:
return self._status # type: ignore
def resource(self):
# type: () -> Any
if not self.blob:
self._update_status()
return self.blob
|
IMAGE_EXT = [".apng", ".avif", ".gif", ".jpg", ".jpeg", ".jfif", ".pjpeg", ".pjp",
".png", ".svg", ".webp", ".bmp", ".ico", ".cur", ".tif", ".tiff"]
AUDIO_EXT = [".3gp", ".aa", ".aac", ".aax", ".act", ".aiff", ".alac", ".amr"
".ape", ".au", ".awb", ".dss", ".dvf", ".flac", ".gsm", ".iklax",
".vs", ".m4a", ".m4p", ".mmf", ".mp3", ".mpc", ".msv", ".nmf",
".ogg", ".oga", ".mogg", ".opus", ".org", ".ra", ".rm", ".raw",
".rf64", ".sln", ".tta", ".voc", ".vox", ".wav", ".wma", ".wv"
".webm", ".8svx", ".cda"]
VIDEO_EXT = [".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".flv", ".f4v",
".f4p", ".f4a", ".f4b", ".gifv", ".m4v", ".mkv", ".mng", ".mov",
".qt", ".mp4", ".m4p", ".m4v" ".mpg", ".mp2", ".mpeg", ".mpe",
".mpv", ".m2v", ".MTS", ".M2TS", ".TS", ".mxf", ".nsv", ".ogv",
".rm", ".rmvb", ".roq", ".svi", ".viv", ".vob", ".webm", ".wmv", ".yuv"]
DOC_EXT = [".doc", ".docx", ".pdf", ".odt", ".xls", ".xlsx", ".ods", ".odt",
".epub", ".ppt", ".pptx", ".txt"]
INSTALL_EXT = [".exe", ".msi"]
CODE_EXT = [".java", ".py", ".html", ".css", ".cs", ".js", ".cpp", ".c", ".h", ".rkt"]
USER = "ryanj" |
import sys
input = sys.stdin.readline
n = int(input())
if n % 2 != 0:
print(0)
sys.exit(0)
ans = 0
base = 1
while True:
base *= 5
even = base * 2
if n < even:
break
ans += n // even
print(ans)
|
#
# Copyright (C) 2016, Jason S. McMullan <jason.mcmullan@gmail.com>
# All rights reserved.
#
# Licensed under the MIT License:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import sys
import numpy
import fab
from fab import in2mm, mm2in
BED_X = 150
BED_Y = 200
BED_Z = 150
X_BIN_FEED=0 # Start of the feed bin
X_BIN_PART=198 # Start of the part bin
X_BIN_WASTE=385 # Start of the waste bin
X_OFFSET_RECOAT=0 # Offset of the recoater blade
X_OFFSET_FUSER=45 # Offset of midpoint of fuser
X_OFFSET_THERM=95 # Offset of midpoint of thermal sensor
X_OFFSET_PEN=195 # Offset of the pen
FEED_RETRACT=2.0 # E retraction, in mm
FEED_SPREAD=3000 # Spread rate while depositing the layer
FEED_POWDER=4500 # Extruder feed rate (mm/minute)
FEED_FUSER_WARM=200 # Fuser pass rate during warm-up (mm/minute)
TIME_FUSER_WARM=6 # Time (in seconds) for fuser to complete its warm-up
FEED_FUSER_HOT=700 # Fuser pass rate during hot (mm/minute)
FEED_PEN=5000 # Pen movement (mm/minute)
X_DPI=96.0
Y_DPI=96.0
Y_DOTS=12
class Fab(fab.Fab):
def size(self):
return (BED_X, BED_Y, BED_Z)
def gc(self, comment, code = None):
if code is not None:
code = code.encode() + b"\n"
self.send(comment = comment, code = code)
pass
def prepare(self, svg = None, name = "Unknown", config = {}):
super(Fab, self).prepare(svg = svg, name = name, config = config)
layers = svg.layers()
max_z_mm = svg.z_mm(layers)
svg.resolution(dpi = (X_DPI, Y_DPI))
self.w_dots, self.h_dots = svg.size()
self.gc("Print %s to the BrundleFab, %dmm, %d layers" % (name, max_z_mm, layers))
self.gc("Units are mm", "G21")
self.gc("Absolute positioning", "G90")
self.gc("Set pen base offset", "G10 L1 P0 X%.3f" % (X_OFFSET_PEN))
self.gc("Set black tool offset", "G10 L1 P1 X%.3f" % (X_OFFSET_PEN))
self.gc("Set fuser tool offset", "G10 L1 P20 X%.3f" % (X_OFFSET_FUSER))
self.gc("Set repowder blade offset", "G10 L1 P21 X%.3f" % (X_OFFSET_RECOAT))
self.gc("Set thermal monitor tool offset", "G10 L1 P22 X%.3f" % (X_OFFSET_THERM))
self.gc("Ink spray rate (sprays/dot)", "T1 S%d" % (config['sprays']))
self.gc("Re-home the ink head", "G28 Y0")
if config['do_startup']:
self.gc(None, "M117 Ready to home")
self.gc("Let the user make sure we're ready to home axes", "M0")
self.gc("Home print axes", "G28 X0 Y0 E0")
self.gc("NOTE: Z is _not_ homed, as it may be part of a multi-file print")
self.gc(None, "M117 Prep Part")
self.gc("Select the recoater tool", "T21")
self.gc("Wait for Z prep", "M0")
self.gc("Move to start of the Waste Bin", "G0 X%.3f" % (X_BIN_WASTE))
self.gc(None, "M117 Levelling")
self.gc("Move to start of the Part Bin", "G1 X%.3f F%.3f" % (X_BIN_PART, FEED_SPREAD))
self.gc(None, "M117 Feed %dmm" % (int(max_z_mm)+5))
self.gc("Wait for manual fill operation", "M0")
self.gc("Clear status message", "M117")
self.gc("Select repowder tool", "T21")
self.gc("Move to feed start", "G1 X%.3f" % (X_BIN_FEED))
def finish(self):
self.last_z = None
self.svg = None
pass
def brundle_line(self, x_dots, w_dots, toolmask, weave=True):
origin = None
for i in range(0, w_dots):
if toolmask[i] != 0:
origin = i
break
if origin == None:
return
self.gc(None, "T0")
self.gc(None, "T1 P0")
self.gc(None, "G1 X%.3f F%.3f" % (X_BIN_PART + in2mm(x_dots / X_DPI), FEED_PEN))
self.gc(None, "G1 Y%.3f" % (in2mm(origin / Y_DPI)))
for i in range(origin+1, w_dots):
if (toolmask[origin] != toolmask[i]) or (i == w_dots - 1):
if (i == w_dots - 1) and (toolmask[origin] == 0):
break
self.gc(None, "T1 P%d" % (toolmask[origin]))
self.gc(None, "G1 Y%.3f" % (in2mm((i - 1) / Y_DPI)))
origin = i
# Switching to tool 0 will cause a forward flush of the
# inkbar, and the ink head will end up at the end of the
# line.
self.gc(None, "T0")
# For interweave support, we retract X by a half-dot, and
# cover the dots inbetween the forward pass on the
# reverse pass
if weave:
self.gc(None, "G1 X%.3f F%.3f" % (X_BIN_PART + in2mm((x_dots - 0.5) / X_DPI), FEED_PEN))
# Switch back to T1 to ink on the reverse movement of
# then inkbar
self.gc(None, "T1 P0")
self.gc(None, "G0 Y0")
pass
def render(self, layer = 0):
config = self.config
z_delta_mm = self.svg.height_mm(layer)
# Extrude a bit more than the layer width
e_delta_mm = z_delta_mm * 1.1;
self.gc(None, "M117 Slice %d of %d" % (layer+1, self.layers()))
self.gc("1. Assume layer head is at feed start")
self.gc( "Select recoat tool", "T21")
if config['do_extrude']:
self.gc("2. Raise Feed Bin by one layer width")
self.gc( "Relative positioning", "G91")
self.gc( "Extrude a feed layer", "G1 E%.3f F%d" % (e_delta_mm, FEED_POWDER))
self.gc( "Absolute positioning", "G90")
self.gc("3. Advance recoat blade past Waste Bin")
self.gc( "Advance to waste bin", "G1 X%.3f F%d" % (X_BIN_WASTE+15, FEED_SPREAD))
self.gc("4. Drop Part Bin by %.3fmm, and Feed Bin by %.3fmm" % (FEED_RETRACT, FEED_RETRACT))
self.gc( "Relative positioning", "G91")
self.gc( "Drop bins to get out of the way", "G1 E%.3f Z%.3f F%d" % (-FEED_RETRACT, FEED_RETRACT, FEED_POWDER))
self.gc( "Absolute positioning", "G90")
if config['do_layer']:
self.gc("5. Move pen to start of the part bin")
self.gc( "Select ink tool", "T1 P0")
self.gc( "Move pen to end of the part bin", "G0 X%.3f" % (X_BIN_PART))
self.gc("6. Ink the layer")
# See brundle_layer()
# Ink the layer
w_dots = self.w_dots
h_dots = self.h_dots
weave = self.config['do_weave']
surface = self.svg.surface(layer)
stride = surface.get_stride()
image = numpy.frombuffer(surface.get_data(), dtype=numpy.uint8)
image = numpy.reshape(image, (h_dots, stride))
image = numpy.greater(image, 0)
for y in range(0, h_dots):
l = y % Y_DOTS
if l == 0:
toolmask = numpy.zeros((stride))
pass
toolmask = toolmask + image[y]*(1 << l)
if l == (Y_DOTS-1):
self.brundle_line(y, w_dots, toolmask, weave)
pass
pass
if y % Y_DOTS != 0:
self.brundle_line(y, w_dots, toolmask, weave)
# Finish the layer
if config['do_fuser']:
self.gc("7. Select fuser, and advance to Waste Bin start")
self.gc( "Select fuser, but unlit", "T20 P0 Q0")
x_warm_delta_mm = FEED_FUSER_WARM * TIME_FUSER_WARM / 60
self.gc( "Advance to Waste Bin start + warm up", "G0 X%.3f" % (X_BIN_WASTE + x_warm_delta_mm+50))
self.gc("8. The fuser is enabled, and brought up to temp")
self.gc( "Select fuser and temp", "T20 P%.3f Q%.3f" % (config['fuser_temp']+5, config['fuser_temp']-5))
self.gc("9. Retract fuser to start of Part Bin")
self.gc( "Fuser warm-up", "G1 X%.3f F%d" % (X_BIN_WASTE+50, FEED_FUSER_WARM))
for delta in range(0, int(X_BIN_WASTE - X_BIN_PART)/10):
self.gc( "Fuse ..", "G1 X%.3f F%d" % (X_BIN_WASTE - delta*10, FEED_FUSER_HOT))
self.gc( "Fuse ..", "G1 X%.3f F%d" % (X_BIN_PART, FEED_FUSER_HOT))
self.gc("10. The fuser is disabled", "T20 P0 Q0")
self.gc("11. Retract recoating blade to start of the Feed Bin")
self.gc( "Select the recoating tool", "T21")
self.gc( "Move to start", "G0 X%.3f Y0" % (X_BIN_FEED))
if config['do_extrude']:
self.gc("12. The Feed Bin and Part bin raises by %.3fmm" % FEED_RETRACT)
self.gc( "Relative positioning", "G91")
self.gc( "Raise the bins", "G1 E%.3f Z%.3f F%d" % (FEED_RETRACT, z_delta_mm - FEED_RETRACT, FEED_POWDER))
self.gc( "Absolute positioning", "G90")
# vim: set shiftwidth=4 expandtab: #
|
#_*_coding:utf-8 _*_
import datetime
from pymongo import MongoClient
client = MongoClient('localhost',27017)
ctrip_comment = client['ctrip_comment']
'''单条点评数据,包括hotel_id,comment_id,comment_dat,comment_text,score, sentiment_score'''
comment_detail = ctrip_comment['comment_detail']
'''一个酒店点评数据的概要信息,包括hotel_id,comment_num,available_comment_num,score,recommend_rate,sentiment_score,deadline'''
comment_basic = ctrip_comment['comment_basic']
'''批次记录'''
comment_batch = ctrip_comment['comment_batch']
# comment_batch = client['ctrip_0811']['orderlist']
log_file = 'log/log_{}.txt'.format(str(datetime.date.today()))
ids_total_file = 'hotel_ids/ids_total.txt' # 全部id
ids_got_file = 'hotel_ids/ids_got.txt' # 已爬取成功的id
ids_empty_file = 'hotel_ids/ids_empty.txt' # 没有点评数据的id
|
import sys
sys.path.append('../doubly_linked_list')
from doubly_linked_list import DoublyLinkedList
# a quene is first in first out(FIFO)
# ie if theres a line of people waiting for something, the new person
# enters at the back of the line, and doesnt get served until the
# people ahead of him have gotten served first
# a stack is first in last out
''' Python lists not allowed for this weeks assignments '''
class Queue:
'''
A Queue is first in first out
'''
def __init__(self):
self.size = 0
# Why is our DLL a good choice to store our elements?
self.storage = DoublyLinkedList()
def enqueue(self, value):
'''
Add an item to the back of the Quene
'''
self.storage.add_to_tail(value)
self.size += 1
def dequeue(self):
'''
Remove an item from the front of the Quene
'''
if self.size > 0:
self.size -= 1
return self.storage.remove_from_head()
else:
return None
def len(self):
'''
Return the number of items in the Quene
'''
return self.size |
from intcode import Intcode
def print_map():
for y in range(26, -25, -1):
row = ''
for x in range(-25, 26):
tile = tile_map.get((x, y))
if (x, y) == loc:
row += 'D'
elif tile is None:
row += ' '
elif tile == 2:
row += 'O'
elif tile == 0:
row += '█'
elif tile == 1:
row += '░'
print(row)
print(' ')
with open('repair.ic') as f:
inputs = f.read()
memory = [int(value) for value in inputs.split(',')]
ic = Intcode(memory, inputs=[1, 2, 4, 3, 1, 2, 3, 1, 2, 3, 1, 2])
#ic = Intcode(memory)
ic.io = False
tile_map = {(0, 0): 1}
last_loc = (0, 0)
loc = (0, 0)
stop = False
while True:
if len(ic.inputs) < 1:
direction = input('1-N, 2-S, 3-W, 4-E')
while direction not in ['1', '2', '3', '4']:
direction = input('1-N, 2-S, 3-W, 4-E')
direction = int(direction)
ic.inputs.append(direction)
if direction == 1:
loc = (loc[0], loc[1]+1)
elif direction == 2:
loc = (loc[0], loc[1]-1)
elif direction == 3:
loc = (loc[0]-1, loc[1])
elif direction == 4:
loc = (loc[0]+1, loc[1])
else:
direction = ic.inputs[0]
if direction == 1:
loc = (loc[0], loc[1]+1)
elif direction == 2:
loc = (loc[0], loc[1]-1)
elif direction == 3:
loc = (loc[0]-1, loc[1])
elif direction == 4:
loc = (loc[0]+1, loc[1])
while len(ic.outputs) < 1:
if ic.step():
stop = True
break
if stop:
break
status_code = ic.outputs.pop()
print(loc, status_code)
tile_map[loc] = status_code
if status_code == 0:
loc = last_loc
else:
last_loc = loc
print_map()
|
#Ayra Tusneem
#CPSC 353-02
#Assignment 2: Twitter Sentiment Analysis
import twitter
import json
import sys
import codecs
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
print('Example 1')
print('Establish Authentication Credentials')
CONSUMER_KEY = 'ftyzJKRgJpic0c77u7GMvwsxS'
CONSUMER_SECRET = '6uBZu8ab0AlSkamQprQ0P80kBLg1dCkmbTK3jlVf85l75m7sej'
OAUTH_TOKEN = '970370538-UuwT1VLbd9c0Wln5HRqqfN3huBcEvyFJ1HxDpSl6'
OAUTH_TOKEN_SECRET = 'W5AlWTEwwCSfQA9HzRVGYv2IfDu5HMhd4vVDNqxle39v8'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
print("Nothing to see by displaying twitter_api")
print(" except that it's now a defined variable")
print()
print(twitter_api)
# Import unquote to prevent url encoding errors in next_results
# XXX: Set this variable to a trending topic,
# or anything else for that matter. The example query below
# was a trending topic when this content was being developed
# and is used throughout the remainder of this chapter.
# q = '#MentionSomeoneImportantForYou'
# finds tweets with sentiment term and returns the score of the sentiment analysis
def sentiment(term):
count = 1000
# See https://dev.twitter.com/docs/api/1.1/get/search/tweets
search_results = twitter_api.search.tweets(q=q, count=count)
statuses = search_results['statuses']
for _ in range(5):
print("Length of statuses", len(statuses))
try:
next_results = search_results['search_metadata']['next_results']
except KeyError:
break
kwargs = dict([kv.split('=') for kv in next_results[1:].split("&")])
search_results = twitter_api.search.tweets(**kwargs)
statuses += search_results['statuses']
print(json.dumps(statuses[0], indent=1))
status_texts = [status['text']
for status in statuses]
words = [w
for t in status_texts
for w in t.split()]
print(json.dumps(words[0:5], indent=1))
print()
sent_file = open('AFINN-111.txt')
scores = {} # initialize an empty dictionary
for line in sent_file:
term, score = line.split("\t")
# The file is tab-delimited.
# "\t" means "tab character"
scores[term] = int(score) # Convert the score to an integer.
score = 0
for word in words:
if word in scores.keys():
score = score + scores[word]
return float(score)
#user enters two search terms and recieves sentiment score, output of which word has higher sentiment
q = input('Enter a search term: ')
senOne = sentiment(q)
print("Sentiment for " + q)
print(senOne)
p = input('Enter a second search term: ')
senTwo = sentiment(p)
print("Sentiment for " + p)
print(senTwo)
if senOne > senTwo:
print(q + "has a higher sentiment")
elif senOne == senTwo:
print("The sentiment scores are equal")
else:
print(p + " has a higher sentiment")
|
from matplotlib import pyplot as plt
import numpy as np
import cv2
lpnts = []
rpnts = []
def show(cells):
cv2.imshow('img',cells)
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_perspective(img):
dst_size=(1280,720)
h,w= np.shape(img)
dst=np.float32([(0,0), (1, 0), (0,1), (1,1)])
src=np.float32([(0.43,0.65),(0.58,0.65),(0.1,1),(1,1)])
img_size = np.float32([(img.shape[1],img.shape[0])])
pts2= dst * np.float32(dst_size)
pts1=src*img_size
M = cv2.getPerspectiveTransform(pts1, pts2)
warped = cv2.warpPerspective(img, M, dst_size)
return warped
def inv_perspective_warp(img,
dst_size=(1280,720),
src=np.float32([(0,0), (1, 0), (0,1), (1,1)]),
dst=np.float32([(0.43,0.65),(0.58,0.65),(0.1,1),(1,1)])):
img_size = np.float32([(img.shape[1],img.shape[0])])
src = src* img_size
dst = dst * np.float32(dst_size)
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(img, M, dst_size)
return warped
def draw_lane(img,lane):
histogram = np.sum(img,axis = 0)
midpoint = int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
window_height = np.int(img.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the imag
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for
leftx_current = leftx_base
rightx_current = rightx_base
draw_windows = True
margin = 150
minpix = 1
left_lane_inds = []
right_lane_inds = []
left_a, left_b, left_c = [],[],[]
right_a, right_b, right_c = [],[],[]
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
if draw_windows == True:
cv2.rectangle(img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(100,255,255), 3)
cv2.rectangle(img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(100,255,255), 3)
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# show(img)
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
color_img = np.zeros_like(lane)
left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
if len(rpnts)<2:
rpnts.append(right)
lpnts.append(left)
if len(rpnts)==2:
right = (right+rpnts[0]+rpnts[1])/3
left = (left+lpnts[0]+lpnts[1])/3
rpnts[0] = rpnts[1]
rpnts[1] = right
lpnts[0] = lpnts[1]
lpnts[1] = left
points = np.hstack((left, right))
for pt in left[0]:
cv2.circle(color_img ,(int(pt[0]),int(pt[1])), 7, (10,200,10), -1)
for pt in right[0]:
cv2.circle(color_img ,(int(pt[0]),int(pt[1])), 7, (10,200,10), -1)
cv2.fillPoly(color_img, np.int_(points), (255,10,10))
h,w,c = np.shape(lane)
inv = inv_perspective_warp(color_img,dst_size=(w,h))
img = cv2.addWeighted(inv, 0.5,lane,1,0)
return (img)
#----------------------------------------------
def load(lane):
orig = lane.copy()
lane = cv2.resize(lane,(1280,720),cv2.INTER_NEAREST)
h,w,c = np.shape(lane)
blur = cv2.blur(lane, (10,10))
hls = cv2.cvtColor(lane, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
binary = l_channel
sobelx = cv2.Sobel(binary, cv2.CV_64F, 1,1, ksize=1)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
perspect = get_perspective(scaled_sobel)
binary = cv2.threshold(perspect, 15, 255, cv2.THRESH_BINARY)
binary = np.array(binary[1])
# show (binary)
img = binary
return (draw_lane(img, lane))
#------------------------------------------------------------------------------------#
cap = cv2.VideoCapture("challenge.mp4")
cnt = 0
while(1):
ret, frame = cap.read()
if ret == 0:
break
if frame.max()<20:
continue
img = load(frame)
cv2.imshow("frame", img)
if cv2.waitKey(1) == 27: ## 27 - ASCII for escape key
break
######
cap.release()
cv2.destroyAllWindows() |
#set adequate flag for Theano on lxplus
import theano
theano.config.gcc.cxxflags = '-march=corei7'
#load needed things
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from keras.models import Sequential, Model
from keras.optimizers import SGD
from keras.layers import Input, Activation, Dense, Convolution2D, MaxPooling2D, Dropout, Flatten
from keras.utils import np_utils
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import EarlyStopping
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as pyp
import ROOT
import itertools
import math
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
#boost particles towards Higgs referencial
def boostToHiggs(p4Origin):
HiggsBoostVector = p4Origin[0].BoostVector()
p4Boosted = []
for ip in range(len(p4Origin)):
p4O = p4Origin[ip]
p4O.Boost(-HiggsBoostVector)
p4Boosted.append( p4O )
return p4Boosted
#format the inputs from TTree
def formatInputs(files):
formated_inputs = []
sigcode = 11
for ifile in range(len(files)):
tfile = ROOT.TFile.Open(files[ifile])
tree = tfile.Get('HZZ4LeptonsAnalysisReduced')
nacepted = 0
for ievt, evt in enumerate(tree):
if(evt.f_njets_pass >= 2 and evt.f_mass4l >= 118 and evt.f_mass4l <= 130): #VBF region
event = []
if(ifile == sigcode):
event.append(1)
elif(ifile == 9):
event.append(0)
#else:
# event.append(0)
else:
break
nacepted += 1
lep = []
lep.append( evt.f_lept1_pt )
lep.append( evt.f_lept1_eta )
lep.append( evt.f_lept1_phi )
lep.append( evt.f_lept1_pdgid )
#lep.append( evt.f_lept1_charge )
event.append(lep)
lep = []
lep.append( evt.f_lept2_pt )
lep.append( evt.f_lept2_eta )
lep.append( evt.f_lept2_phi )
lep.append( evt.f_lept2_pdgid )
#lep.append( evt.f_lept2_charge )
event.append(lep)
lep = []
lep.append( evt.f_lept3_pt )
lep.append( evt.f_lept3_eta )
lep.append( evt.f_lept3_phi )
lep.append( evt.f_lept3_pdgid )
#lep.append( evt.f_lept3_charge )
event.append(lep)
lep = []
lep.append( evt.f_lept4_pt )
lep.append( evt.f_lept4_eta )
lep.append( evt.f_lept4_phi )
lep.append( evt.f_lept4_pdgid )
#lep.append( evt.f_lept4_charge )
event.append(lep)
jet = []
jet.append( evt.f_jet1_highpt_pt )
jet.append( evt.f_jet1_highpt_eta )
jet.append( evt.f_jet1_highpt_phi )
#jet.append( evt.f_jet1_highpt_e*(ROOT.TMath.CosH(evt.f_jet1_highpt_eta)) )
event.append(jet)
jet = []
jet.append( evt.f_jet2_highpt_pt )
jet.append( evt.f_jet2_highpt_eta )
jet.append( evt.f_jet2_highpt_phi )
#jet.append( evt.f_jet2_highpt_e*(ROOT.TMath.CosH(evt.f_jet2_highpt_eta)) )
event.append(jet)
jet = []
jet.append( evt.f_jet3_highpt_pt )
jet.append( evt.f_jet3_highpt_eta )
jet.append( evt.f_jet3_highpt_phi )
#jet.append( evt.f_jet3_highpt_e*(ROOT.TMath.CosH(evt.f_jet3_highpt_eta)) )
event.append(jet)
event.append(evt.f_D_jet) #CMS VBF discriminant
event.append(evt.f_Djet_VAJHU) #MELA
event.append(evt.f_weight) #event weight
event.append(evt.f_run)
event.append(evt.f_lumi)
event.append(evt.f_event)
formated_inputs.append(event)
if(nacepted > 200):
break
print ('Processed (%i)' % nacepted), tfile.GetName()
if(ifile == sigcode):
print '>>> SIGNAL FILE <<<'
return formated_inputs
#loads input data
file_names2e2mu = open('/afs/cern.ch/work/m/mmelodea/private/Higgs_Ntuples/files2016/histos2e2mu_25ns/filelist_2e2mu_2016_Spring16_AN_Bari_MC.txt','r')
file_names4e = open('/afs/cern.ch/work/m/mmelodea/private/Higgs_Ntuples/files2016/histos4e_25ns/filelist_4e_2016_Spring16_AN_Bari_MC.txt','r')
file_names4mu = open('/afs/cern.ch/work/m/mmelodea/private/Higgs_Ntuples/files2016/histos4mu_25ns/filelist_4mu_2016_Spring16_AN_Bari_MC.txt','r')
path = '/afs/cern.ch/work/m/mmelodea/private/Higgs_Ntuples/files2016/'
files2e2mu = [path+'histos2e2mu_25ns/'+i.rstrip() for i in file_names2e2mu.readlines()]
files4e = [path+'histos4e_25ns/'+i.rstrip() for i in file_names4e.readlines()]
files4mu = [path+'histos4mu_25ns/'+i.rstrip() for i in file_names4mu.readlines()]
events2e2mu = formatInputs(files2e2mu)
events4e = formatInputs(files4e)
events4mu = formatInputs(files4mu)
print 'events2e2mu: %i' % len(events2e2mu)
print 'events4e: %i' % len(events4e)
print 'events4mu: %i' % len(events4mu)
def include_subjets(events, subjets_files):
nevents = len(events)
for ie in range(nevents):
print 'Remaining %i' % (nevents-ie)
run = events[ie][11]
event = events[ie][12]
lumi = events[ie][13]
for i in range(len(subjets_files)):
tfile = ROOT.TFile.Open(subjets_files[i])
tree = tfile.Get('JetImage')
for ievt, evt in enumerate(tree):
Run = evt.Run
Event = evt.Event
Lumi = evt.Lumi
if(Run == run and Event == event and Lumi == lumi):
subjets = []
for sbj in range(len(evt.SubJetPt)):
eta = [ieta for ieta in evt.SubJetEta[sbj]]
phi = [iphi for iphi in evt.SubJetPhi[sbj]]
pt = [ipt for ipt in evt.SubJetPt[sbj]]
subjets.append( [eta, phi, pt] )
events[ie].append( subjets )
break
return events
#subjets
file_vbf = '/afs/cern.ch/work/m/mmelodea/private/MonoHiggs/CMSSW_9_0_0/src/JetImageFiles/VBF_HToZZTo4L_M125_13TeV_powheg2_JHUgenV6_pythia8.root'
file_ggh = '/afs/cern.ch/work/m/mmelodea/private/MonoHiggs/CMSSW_9_0_0/src/JetImageFiles/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUgenV6_pythia8.root'
subjets_files = [file_vbf, file_ggh]
events = []
for iv in events2e2mu:
events.append(iv)
for iv in events4e:
events.append(iv)
for iv in events4mu:
events.append(iv)
events = include_subjets(events, subjets_files)
print events[0]
#reconstruct Z's and Higgs and organize things
events = [events2e2mu, events4e, events4mu]
full_event = {}
full_event['signal'] = []
full_event['background'] = []
max_per_ch = 1000000
for ich in range(len(events)):
snch = 0
bnch = 0
for iev in range(len(events[ich])):
lp4 = [ROOT.TLorentzVector() for i in range(4)]
for il in range(4):
mass = 0.
if(abs(events[ich][iev][il+1][3]) == 13):
mass = 0.106
lp4[il].SetPtEtaPhiM(events[ich][iev][il+1][0],events[ich][iev][il+1][1],events[ich][iev][il+1][2],mass)
z1 = lp4[0] + lp4[1]
z2 = lp4[2] + lp4[3]
h = z1 + z2
if(events[ich][iev][0] == 1):
snch += 1
if(snch > max_per_ch):
continue
full_event['signal'].append([
[h.Pt(),h.Eta(),h.Phi(),h.E()], #Higgs p4
[z1.Pt(),z1.Eta(),z1.Phi(),z1.E()], #Z1 p4
[z2.Pt(),z2.Eta(),z2.Phi(),z2.E()], #Z2 p4
[lp4[0].Pt(),lp4[0].Eta(),lp4[0].Phi(),lp4[0].E()], #l1 p4
[lp4[1].Pt(),lp4[1].Eta(),lp4[1].Phi(),lp4[1].E()], #l2 p4
[lp4[2].Pt(),lp4[2].Eta(),lp4[2].Phi(),lp4[2].E()], #l3 p4
[lp4[3].Pt(),lp4[3].Eta(),lp4[3].Phi(),lp4[3].E()], #l4 p4
events[ich][iev][5], #j1 p4
events[ich][iev][6], #j2 p4
events[ich][iev][7], #j3 p4
events[ich][iev][8], #Djet
events[ich][iev][9] #MELA
])
else:
bnch += 1
if(bnch > max_per_ch):
continue
full_event['background'].append([
[h.Pt(),h.Eta(),h.Phi(),h.E()], #Higgs p4
[z1.Pt(),z1.Eta(),z1.Phi(),z1.E()], #Z1 p4
[z2.Pt(),z2.Eta(),z2.Phi(),z2.E()], #Z2 p4
[lp4[0].Pt(),lp4[0].Eta(),lp4[0].Phi(),lp4[0].E()], #l1 p4
[lp4[1].Pt(),lp4[1].Eta(),lp4[1].Phi(),lp4[1].E()], #l2 p4
[lp4[2].Pt(),lp4[2].Eta(),lp4[2].Phi(),lp4[2].E()], #l3 p4
[lp4[3].Pt(),lp4[3].Eta(),lp4[3].Phi(),lp4[3].E()], #l4 p4
events[ich][iev][5], #j1 p4
events[ich][iev][6], #j2 p4
events[ich][iev][7], #j3 p4
events[ich][iev][8], #Djet
events[ich][iev][9] #MELA
])
print '# Sig Events: %i' % len(full_event['signal'])
print '# Bkg Events: %i' % len(full_event['background'])
#print full_event['signal'][0]
# Run classifier with cross-validation and plot ROC curves
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from scipy import interp
#esY = np.ones(len(full_event['signal']))
#ebY = np.zeros(len(full_event['background']))
#Yexp = np.concatenate([esY, ebY])
#encoder = LabelEncoder()
#encoder.fit(Yexp)
#encoded_Y = encoder.transform(Yexp)
#print encoded_Y
#osY = [full_event['signal'][i][8] for i in range(len(full_event['signal']))]
#obY = [full_event['background'][i][8] for i in range(len(full_event['background']))]
#Yobs = np.concatenate([osY,obY])
#fpr, tpr, thresholds = roc_curve(encoded_Y, Yobs)
#roc_auc = auc(fpr, tpr)
#print '----->> Djet ROC area (all events): %.2f' % roc_auc
#osY = [full_event['signal'][i][9] for i in range(len(full_event['signal']))]
#obY = [full_event['background'][i][9] for i in range(len(full_event['background']))]
#Yobs = np.concatenate([osY,obY])
#fpr, tpr, thresholds = roc_curve(encoded_Y, Yobs)
#roc_auc = auc(fpr, tpr)
#print '----->> MELA ROC area (all events): %.2f' % roc_auc
#shuffle the channels
np.random.shuffle(full_event['signal'])
np.random.shuffle(full_event['background'])
from keras.layers import LSTM, Bidirectional, Masking
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.optimizers import SGD, Adam
# Bidirectional LSTM Model
def build_bilstm_model(n_cand_per_jet, features_per_jet):
# Headline input: meant to receive sequences of 200 floats
# Note that we can name any layer by passing it a "name" argument.
i = Input(shape=(n_cand_per_jet, features_per_jet,), name='main_input')
# the masking layer will prevent the LSTM layer to consider the 0-padded jet values
m = Masking()(i)
# A LSTM will transform the vector sequence into a single vector,
# containing information about the entire sequence
# the Bidirectional will make the LSTM cell read the sequence from end to start and start to end at the same time
m = Bidirectional( LSTM(50) ) (m)
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(m)
model = Model(input=[i], output=[auxiliary_output])
#opt = SGD()
opt = Adam()
#model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
njets = 61000
n_cand_per_jet = 10
features = {
'pt' : None,
'eta': None
#'phi': None,
#'e' : None
}
features_per_jet = len(features)
model = build_bilstm_model(n_cand_per_jet, features_per_jet)
print model.summary()
momentum_input = {}
momentum_input['signal'] = np.zeros((njets, n_cand_per_jet, features_per_jet)) # 3 momentum
momentum_input['background'] = np.zeros((njets, n_cand_per_jet, features_per_jet)) # 3 momentum
for i in range(njets):
for j in range(n_cand_per_jet):
for iprop in range(features_per_jet):
if(full_event['signal'][i][j][iprop] != -999.):
momentum_input['signal'][i][j][iprop] = full_event['signal'][i][j][iprop]
else:
break
for iprop in range(features_per_jet):
if(full_event['background'][i][j][iprop] != -999.):
momentum_input['background'][i][j][iprop] = full_event['background'][i][j][iprop]
else:
break
#train the network
X = np.concatenate([momentum_input['signal'], momentum_input['background']])
Y_TT = np.ones(momentum_input['signal'].shape[0])
Y_QCD = np.zeros(momentum_input['background'].shape[0])
Y = np.concatenate([Y_TT, Y_QCD])
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
kfold = StratifiedKFold(n_splits=2, shuffle=True, random_state=seed)
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange', 'red', 'black', 'green', 'brown'])
lw = 2
i = 0
histories = []
for i,((train, test), color) in enumerate(zip(kfold.split(X, encoded_Y), colors)):
print "\t\tFold",i
bilstm_model = build_bilstm_model(n_cand_per_jet, features_per_jet)
history = bilstm_model.fit(X[train], encoded_Y[train],
validation_data=(X[test], encoded_Y[test]),
epochs=100, batch_size=128,
verbose=2, callbacks=[early_stopping])
Y_score = bilstm_model.predict(X[test])
histories.append(history)
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(encoded_Y[test], Y_score)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
pyp.plot(fpr, tpr, lw=lw, color=color, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
pyp.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k', label='Luck')
mean_tpr /= kfold.get_n_splits(X, encoded_Y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
print 'ROC AUC: %.2f' % mean_auc
#pyp.plot(mean_fpr, mean_tpr, color='g', linestyle='--',label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
#pyp.xlim([0, 1.0])
#pyp.ylim([0, 1.0])
#pyp.xlabel('False Positive Rate')
#pyp.ylabel('True Positive Rate')
#pyp.title('Receiver operating characteristic example')
#pyp.legend(loc="lower right")
#pyp.show()
#over all events
momentum_input['signal'] = np.zeros((len(full_event['signal']), n_cand_per_jet, features_per_jet)) # 3 momentum
momentum_input['background'] = np.zeros((len(full_event['background']), n_cand_per_jet, features_per_jet)) # 3 momentum
for i in range(len(full_event['signal'])):
for j in range(n_cand_per_jet):
for iprop in range(features_per_jet):
if(full_event['signal'][i][j][iprop] != -999.):
momentum_input['signal'][i][j][iprop] = full_event['signal'][i][j][iprop]
else:
break
for i in range(len(full_event['background'])):
for j in range(n_cand_per_jet):
for iprop in range(features_per_jet):
if(full_event['background'][i][j][iprop] != -999.):
momentum_input['background'][i][j][iprop] = full_event['background'][i][j][iprop]
else:
break
X = np.concatenate([momentum_input['signal'], momentum_input['background']])
Y_TT = np.ones(momentum_input['signal'].shape[0])
Y_QCD = np.zeros(momentum_input['background'].shape[0])
Y = np.concatenate([Y_TT, Y_QCD])
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
Y_score = bilstm_model.predict(X)
fpr, tpr, thresholds = roc_curve(encoded_Y, Y_score)
roc_auc = auc(fpr, tpr)
print 'ROC AUC: %.2f' % roc_auc
|
import sys
from number_processing.parse_file_lines import parse_file_lines
if __name__ == '__main__':
try:
file_path = sys.argv[1]
result = parse_file_lines(file_path)
for sentence in result:
print(sentence)
except IndexError:
print('Missing file name')
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Base Class for Discriminator CNN
class Discriminator(nn.Module):
def __init__(self, conv_dim):
super(Discriminator, self).__init__()
self.conv_dim = conv_dim
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(conv_dim)
self.conv2 = nn.Conv2d(conv_dim, conv_dim*2,kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm2 = nn.BatchNorm2d(conv_dim*2)
self.conv3 = nn.Conv2d(conv_dim*2, conv_dim*4, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm3 = nn.BatchNorm2d(conv_dim*4)
self.conv4 = nn.Conv2d(conv_dim*4, conv_dim*8, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm4 = nn.BatchNorm2d(conv_dim*8)
self.conv5 = nn.Conv2d(conv_dim*8, conv_dim*16, kernel_size=4, stride=2, padding=1, bias=False)
self.fc = nn.Linear(conv_dim*4*4, 1)
def forward(self, x):
x = F.leaky_relu(self.batch_norm1(self.conv1(x)), 0.2)
x = F.leaky_relu(self.batch_norm2(self.conv2(x)), 0.2)
x = F.leaky_relu(self.batch_norm3(self.conv3(x)), 0.2)
x = F.leaky_relu(self.batch_norm4(self.conv4(x)), 0.2)
x = self.conv5(x)
x = x.view(-1, self.conv_dim*4*4)
x = F.sigmoid(self.fc(x))
return x
|
import sys
tc = int(sys.stdin.readline())
for _ in range(tc):
n = int(sys.stdin.readline())
memo = [0, 1, 2, 4]
if n == 1 or n == 2 or n == 3:
print(memo[n])
else:
for i in range(4, n+1):
memo.append(memo[i-1] + memo[i-2] + memo[i-3])
print(memo[-1])
'''
1 2 3 더하기 문제
어떤 수가 주어지면 1과 2와 3을 가지고 더해서 그 수를 만들 수 있는 방법이 몇가지 인지 찾아내면 된다.
기본적으로 1은 한가지, 2는 두가지, 3은 4가지를 가지고 있다.
이것들을 초기값으로 정해주고
각 입력숫자 별로 경우의 수가 몇가지 인지 나열해보면
1 : 1
2 : 2
3 : 4
4 : 7
이런식으로 나오고 실제 각각의 경우의 수가 어떤 식으로 구현되는지 (1+1+2 등)
확인해보면 4부터는 이전의 값들을 더한 값이 나오는 걸 확인할 수 있다.
따라서 점화식은 다음과 같이 나온다.
D(n) = D(n-1) + D(n-2) + D(n-3) //// n 이 4 이상일때
''' |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
# Examples:
url(r'^$', 'leo_app.views.index'), # root
url(r'^login$', 'leo_app.views.login_view'), # login
url(r'^logout$', 'leo_app.views.logout_view'), # logout
url(r'^signup$', 'leo_app.views.signup'), # signup
url(r'^footprint/$', 'leo_app.views.footprint'), # road safety foot print
url(r'^submit$', 'leo_app.views.submit'), # submit new ribbit
url(r'^users/$', 'leo_app.views.users'),
url(r'^users/(?P<username>\w{0,30})/$', 'leo_app.views.users'),
url(r'^user_profile/(?P<username>\w{0,30})/$', 'leo_app.views.user_profile'),
url(r'^user_profile/$', 'leo_app.views.user_profile'),
url(r'^vehicle_owner/$', 'leo_app.views.vehicle_owner'),
url(r'^vehicle_lov/$', 'leo_app.views.vehicle_lov'),
url(r'^infridgement_lov/$', 'leo_app.views.infridgement_lov'),
)
urlpatterns += patterns('django.contrib.staticfiles.views',
url(r'^static/(?P<path>.*)$', 'serve'),
)
# pos_year_1 = SubjectReferral.objects.filter(hiv_result='POS',subject_visit__appointment__visit_definition__code='T0', subject_visit__household_member__household_structure__household__plot__community__in=['digawana', 'ranaka', 'molapowabojang', 'otse'])
# pos_year_1 = pos_year_1.order_by('subject_visit__household_member__household_structure__household__plot__community')
# new_pos_year_2=[]
# count = 0
# for ref in pos_year_1:
# try:
# subject_ref = SubjectReferral.objects.get(subject_identifier=ref.subject_identifier, new_pos=True, subject_visit__appointment__visit_definition__code='T1')
# new_pos_year_2.append(subject_ref)
# except:
# pass
# count = count + 1
# print count
#
# f = open('/home/django/incorrect_new_pos.txt','w')
#
# for entry in new_pos_year_2:
# f.write('{},{},{},{},{},\n'.format(entry.subject_identifier, entry.hiv_result, entry.referral_code, entry.new_pos, entry.subject_visit.household_member.household_structure.household.plot.community))
|
class AudiCar:
def __init__(self):
self.models = ['q7', 'a8', 'a3', '343']
def out_models(self):
print("Existing models are: ")
for model in self.models:
print(model)
return ""
class AudiSubDealers:
def __init__(self):
self.dealers = ['1', '2', '3', '4']
def out_dealers(self):
print("Existing dealers are: ")
for dealer in self.dealers:
print("Dealer" + dealer) |
# -*- coding: utf-8 -*-
from BasicTest import BasicTest
from pages.DirectoryPage import DirectoryPage
from pages.main_page.letters.LetterSelector import LetterSelector
from pages.main_page.menu.navigation.NavigationManager import NavigationManager
from pages.MainPage import MainPage
from config import config
class DirectoryTest(BasicTest):
def setUp(self):
super(DirectoryTest, self).setUp()
self.directory_page = DirectoryPage(self.driver)
self.directory_page.open()
self.auth()
self.main_page = MainPage(self.driver)
self.main_page.hide_app_loader()
self.manager = self.main_page.letter_manager
def test_move_to_archive(self):
self.letter_subject = self.add_random_number('Mail for archive ')
LETTER_TEXT = 'Lorem text for archive'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.manager.letter_selector.select_letter(self.letter_subject)
self.directory_page.move_to_archive()
self.directory_page.click_nav_archive_button()
letter_selector = LetterSelector(self.driver)
letter_selector.find_letter_subject_real(self.letter_subject)
def test_move_to_inbox_from_archive(self):
self.letter_subject = self.add_random_number('$$$ Archive ')
LETTER_TEXT = 'Lorem text for archive'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.manager.letter_selector.select_letter(self.letter_subject)
self.directory_page.move_to_archive()
self.directory_page.click_nav_archive_button()
self.manager.restore_letter(self.letter_subject)
navigation_manager = NavigationManager(self.driver)
navigation_manager.go_to_inbox()
letter_selector = LetterSelector(self.driver)
letter_selector.find_letter_subject_real(self.letter_subject)
def test_set_important_letter(self):
self.letter_subject = self.add_random_number('The IMPORTANT letter ')
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.directory_page.set_check_flag()
self.assertTrue(self.directory_page.get_important_status())
def test_unset_important_letter(self):
self.letter_subject = self.add_random_number('The UNimportant letter ')
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.directory_page.set_check_flag()
self.directory_page.unset_check_flag()
self.assertFalse(self.directory_page.get_important_status())
def test_move_to_social(self):
self.letter_subject = self.add_random_number('The SOCIAL letter ')
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.manager.letter_selector.select_letter(self.letter_subject)
self.directory_page.move_to_social()
self.directory_page.go_to_social()
letter_selector = LetterSelector(self.driver)
letter_selector.find_letter_subject_real(self.letter_subject)
def test_move_to_inbox_from_social(self):
self.letter_subject = self.add_random_number('not SOCIAL letter ')
LETTER_TEXT = 'Lorem text for archive'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.manager.letter_selector.select_letter(self.letter_subject)
self.directory_page.move_to_social()
self.directory_page.go_to_social()
self.manager.restore_letter(self.letter_subject)
navigation_manager = NavigationManager(self.driver)
navigation_manager.go_to_inbox()
letter_selector = LetterSelector(self.driver)
letter_selector.find_letter_subject_real(self.letter_subject)
def test_move_to_newsletters(self):
self.letter_subject = self.add_random_number('The NewsLetter letter ')
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.write_letter(config.DEFAULT_MAIL, self.letter_subject, LETTER_TEXT)
self.manager.letter_selector.select_letter(self.letter_subject)
self.directory_page.move_to_newsletters()
self.directory_page.go_to_newsletters()
def test_send_empty_letter(self):
self.manager.letter_writer.click_write_letter_button()
self.manager.letter_writer.click_send_letter_button()
self.letter_subject = ''
EXPECTED_MESSAGE = u'Не указан адрес получателя'
self.assertEqual(EXPECTED_MESSAGE, self.directory_page.error_message())
def test_send_letter_without_subject(self):
self.letter_subject = ''
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.letter_writer.click_write_letter_button()
self.manager.letter_writer.enter_email_receiver(config.DEFAULT_MAIL)
self.manager.letter_writer.enter_textbox(LETTER_TEXT)
self.manager.letter_writer.click_send_letter_button()
self.manager.letter_writer.close_sent_window()
letter_selector = LetterSelector(self.driver)
self.letter_subject = self.directory_page.empty_subject_text
actual_text = letter_selector.get_letter_text(self.letter_subject)
self.assertEqual(LETTER_TEXT, actual_text)
def test_send_letter_without_receiver(self):
self.letter_subject = 'Subject letter'
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.letter_writer.click_write_letter_button()
self.manager.letter_writer.enter_subject(self.letter_subject)
self.manager.letter_writer.enter_textbox(LETTER_TEXT)
self.manager.letter_writer.click_send_letter_button()
self.letter_subject = ''
EXPECTED_MESSAGE = u'Не указан адрес получателя'
self.assertEqual(EXPECTED_MESSAGE, self.directory_page.error_message())
def test_save_draft_letter(self):
self.letter_subject = self.add_random_number('Draft letter ')
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.letter_writer.click_write_letter_button()
self.manager.letter_writer.enter_subject(self.letter_subject)
self.manager.letter_writer.enter_textbox(LETTER_TEXT)
self.directory_page.click_save_mail()
self.directory_page.close_writer_window()
self.directory_page.go_to_drafts()
letter_selector = LetterSelector(self.driver)
actual_text = letter_selector.get_mini_letter_text(self.letter_subject)
self.directory_page.close_writer_window()
self.assertEqual(LETTER_TEXT, actual_text)
def test_send_draft_letter(self):
self.letter_subject = self.add_random_number('Send draft letter ')
LETTER_TEXT = 'Lorem text lorem lorem lorem'
self.manager.letter_writer.click_write_letter_button()
self.manager.letter_writer.enter_email_receiver(config.DEFAULT_MAIL)
self.manager.letter_writer.enter_subject(self.letter_subject)
self.manager.letter_writer.enter_textbox(LETTER_TEXT)
self.directory_page.click_save_mail()
self.directory_page.close_writer_window()
self.directory_page.go_to_drafts()
self.directory_page.open_draft()
self.manager.letter_writer.click_send_letter_button()
self.manager.letter_writer.close_sent_window()
def tearDown(self):
if self.letter_subject != '':
cond1 = 'Mail for archive' not in self.letter_subject
cond2 = '$$$ Archive' not in self.letter_subject
cond3 = 'The SOCIAL letter' not in self.letter_subject
cond4 = 'not SOCIAL letter' not in self.letter_subject
cond5 = 'The NewsLetter letter' not in self.letter_subject
cond6 = 'Draft letter ' not in self.letter_subject
if cond1 and cond2 and cond3 and cond4 and cond5 and cond6:
self.main_page.navigation_manager.go_to_inbox()
self.manager.remove_letter(self.letter_subject)
self.driver.quit()
|
from django.shortcuts import render, get_object_or_404
from .models import Page, Contact
from .forms import ContactForm
from musicae_web import settings
from django.core.mail import EmailMessage
from django.template.loader import get_template
from django.utils.translation import gettext_lazy as _
from operator import __add__, __sub__
def PageView(request, link):
page = get_object_or_404(Page, link=link)
context = {
"content": page.content,
"title": f"{page.link.text} - Fundamenta Musicae",
"description": page.description,
"keywords": page.keywords
}
return render(request, 'musicae_base/genericPage.html', context)
ops_text = {
__add__: _("Добавете {number} към всяка цифра"),
__sub__: _("Извадете {number} от всяка цифра"),
}
mods_text = {
1: _("едно"),
2: _("две"),
3: _("три"),
}
def contact(request):
# https://hellowebbooks.com/news/tutorial-setting-up-a-contact-form-with-django/
form = ContactForm(request.POST or None)
failClass = "w3-hide"
successClass = "w3-hide"
print(f"view: {settings.challenge.mod} {settings.challenge.op}")
if request.method == 'POST':
if form.is_valid():
contact_name = request.POST.get('contact_name', '')
contact_email = request.POST.get('contact_email', '')
form_content = request.POST.get('content', '')
c = Contact()
c.save(force_insert=True)
template = get_template('musicae_base/contact_email.txt')
mail_context = {
'contact_name': contact_name,
'contact_email': contact_email,
'form_content': form_content,
}
content = template.render(mail_context)
email = EmailMessage(
f"contact form submission #{c.pk}", #
content,
#"Fundamenta Musicae",
to=settings.CONTACT_EMAILS,
headers={'Reply-To': contact_email}
)
# print(email)
email.send()
form = ContactForm()
successClass = "w3-show"
else:
form = ContactForm(request.POST)
failClass = "w3-show"
else:
form = ContactForm()
context = {
'form': form,
'capInstruction': ops_text[settings.challenge.op].format(number=mods_text[settings.challenge.mod]),
'successClass': successClass,
'failClass': failClass,
}
return render(request, 'musicae_base/contact.html', context)
|
import types
IntTypes = (types.IntType, types.LongType)
NumberTypes = (types.IntType, types.LongType,
types.FloatType, types.ComplexType)
def isNumber(maybe_num):
ret = (maybe_num is not None and
(isinstance(maybe_num, NumberTypes) or
(isinstance(maybe_num, basestring) and maybe_num.isdigit())))
if ret is False:
try:
float(maybe_num)
ret = True
except (ValueError, TypeError):
pass
return ret
def isInteger(maybe_num):
ret = False
if maybe_num:
ret = isinstance(maybe_num, IntTypes)
if not ret and isinstance(maybe_num, basestring):
maybe_num = maybe_num.strip()
ret = maybe_num.isdigit() or \
(maybe_num[0] in "+-" and maybe_num[1:].isdigit())
return ret
|
# Generated by Django 2.2.5 on 2019-12-02 16:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('openbook_posts', '0066_auto_20191129_1630'),
('openbook_posts', '0066_trendingpost'),
]
operations = [
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-28 21:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recettes', '0008_categorie'),
]
operations = [
migrations.RunSQL(
[("INSERT INTO recettes_categorie (code, description) VALUES (%s,%s);", ['divers', 'catégorie non définie'])]
),
migrations.AddField(
model_name='recette',
name='categorie',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='recettes.Categorie'),
),
]
|
import h5py
import numpy as np
from scipy.signal import convolve
class BG():
def __init__(self, data_path):
"""Processes and provides design/response matrices for the basal
ganglia recordings during stop/go tasks from the Berkes Lab."""
self.data_path = data_path
# populate list of trials
self.trials, self.bad_trials, self.good_units = \
self.populate_trials(self.data_path)
self.n_good_units = self.good_units.size
self.n_trials = len(self.trials)
# obtain good trials
self.good_trials = np.setdiff1d(np.arange(self.n_trials),
self.bad_trials)
self.n_good_trials = self.good_trials.size
def get_pre_tone_condition_trials(self, condition=True):
"""Get pre-tone conditions for all trials.
Parameters
----------
condition : bool
If True, the pre-tone successes are returned. If False, the
pre-tone failures are returned.
trials : ndarray
The trials satisfying the specified condition.
"""
trials = np.array([], dtype='int')
for idx, trial in enumerate(self.trials):
if condition:
if trial.is_pretone_success():
trials = np.append(trials, idx)
else:
if trial.is_pretone_failure():
trials = np.append(trials, idx)
return trials
def get_binned_spikes(self, trial, unit, sampling_rate=500, bounds=None):
"""Bin spike sequences for a given trial and unit.
Parameters
----------
trial : int
The trial index.
unit : int
The unit index.
sampling_rate : int
The sampling rate of the binning.
bounds : tuple
The endpoints, in seconds, to perform the binning.
Returns
-------
binned_spikes : ndarray
The binned spike counts.
bins : ndarray
The timestamps for each point in the firing rate.
"""
spike_times = self.trials[trial].spike_times[unit]
if bounds is None:
bins = self.trials[trial].timestamps
else:
bins = np.arange(bounds[0], bounds[1], 1 / sampling_rate)
binned_spikes, _ = np.histogram(spike_times, bins=bins)
return binned_spikes, bins
def get_successful_left_trials(self):
"""Get the trial indices for which the rat successfully moved to the
left port."""
indices = np.array([
idx for idx, trial in enumerate(self.trials)
if trial.events['go'] == 1
])
return indices
def get_successful_right_trials(self):
"""Get the trial indices for which the rat successfully moved to the
right port."""
indices = np.array([
idx for idx, trial in enumerate(self.trials)
if trial.events['go'] == 2
])
return indices
def get_successful_go_trials(self):
"""Get the trial indices for which the rat successfully completed a
GO."""
indices = np.array([
idx for idx, trial in enumerate(self.trials)
if trial.is_successful_go()
])
return indices
def get_successful_stop_trials(self):
"""Get the trial indices for which the rat successfully completed a
STOP."""
indices = np.array([
idx for idx, trial in enumerate(self.trials)
if trial.is_successful_stop()
])
return indices
def get_firing_rate(
self, trial, unit, sampling_rate=500, sigma=0.03, bounds=None,
kernel_extent=(-2, 2)
):
"""Obtain a firing rate estimate using a Gaussian kernel.
Parameters
----------
trial : int
The trial index.
unit : int
The unit index.
sampling_rate : int
The sampling rate of the binning and kernel.
sigma : float
The width of the Gaussian kernel.
bounds : tuple
The endpoints, in seconds, to extract the firing rate.
kernel_extent : tuple
The extent of the Gaussian kernel around the mean.
Returns
-------
firing_rate : ndarray
The firing rate within the bounds.
bins : ndarray
The timestamps for each point in the firing rate.
"""
spike_times = self.trials[trial].spike_times[unit]
if bounds is None:
bins = self.trials[trial].timestamps
else:
bins = np.arange(bounds[0], bounds[1], 1 / sampling_rate)
binned_spikes, _ = np.histogram(spike_times, bins=bins)
x = np.arange(kernel_extent[0], kernel_extent[1], 1 / sampling_rate)
kernel = np.exp(-x**2 / (2 * sigma**2)) / np.sqrt(2 * np.pi * sigma**2)
firing_rate = convolve(binned_spikes, kernel, mode='same')
return firing_rate, bins[:-1]
@staticmethod
def populate_trials(data_path):
"""Populate a list of Trial objects given a data path.
Parameters
----------
data_path : string
The path to the dataset.
Returns
-------
trials :
"""
data = h5py.File(data_path, 'r')
trials = []
bad_trials = np.array([])
# extract trial data
trial_data = data['Trials']
n_trials = trial_data['time'].shape[0]
for idx in range(n_trials):
trial = Trial()
# important times in the trial #
# time of trial start
trial.t_trial_start = np.asscalar(
data[trial_data['time'][idx, 0]][:]
)
# time of center cue
trial.t_center_cue = np.asscalar(
data[trial_data['CenterCueEvent'][idx, 0]][:]
)
# time that rat enters center port
trial.t_center_in = np.asscalar(
data[trial_data['CenterInEvent'][idx, 0]][:]
)
# time of cue to move left or right
t_side_cue = data[trial_data['SideCueEvent'][idx, 0]][:]
# check if there was a side cue event
if t_side_cue.size == 1:
trial.t_side_cue = np.asscalar(t_side_cue)
else:
trial.t_side_cue = None
# time that rat leaves center port
trial.t_center_out = np.asscalar(
data[trial_data['CenterOutEvent'][idx, 0]][:]
)
# time that rat enters the side port
t_side_in = data[trial_data['SideInEvent'][idx, 0]][:]
if t_side_in.size == 1:
trial.t_side_in = np.asscalar(t_side_in)
else:
trial.t_side_in = None
# important experimental setup features #
# center port
trial.center = np.asscalar(
data[trial_data['center'][idx, 0]][:]
)
# target port
trial.target = np.asscalar(
data[trial_data['target'][idx, 0]][:]
)
# event codes #
evt = data[trial_data['Evt'][idx, 0]]['Cond']
events = {}
events['pre_tone'] = np.asscalar(data[evt[0, 0]][:])
events['proactive_inhibition'] = np.asscalar(data[evt[1, 0]][:])
events['go_cue'] = np.asscalar(data[evt[2, 0]][:])
events['go_LHMH'] = np.asscalar(data[evt[3, 0]][:])
events['go'] = np.asscalar(data[evt[4, 0]][:])
events['go_vs_stop'] = np.asscalar(data[evt[5, 0]][:])
events['stop'] = np.asscalar(data[evt[6, 0]][:])
trial.events = events
# timestamps
unit_data = data[trial_data['Units'][idx, 0]]
# sift out bad trials
if isinstance(unit_data, h5py.Dataset):
bad_trials = np.append(bad_trials, idx)
trial.timestamps = None
trial.spike_times = None
else:
# extract time stamps
trial.timestamps = data[unit_data['times'][0, 0]][:].ravel()
# extract spike times
spike_time_data = unit_data['spkTimes']
n_units = spike_time_data.size
spike_times = []
# iterate over units
for unit in range(n_units):
spike_times.append(
data[spike_time_data[unit, 0]][:].ravel()
)
trial.spike_times = spike_times
trials.append(trial)
# get good units
if 'GoodUnits' in data:
good_units = data['GoodUnits'][:].ravel().astype('int') - 1
else:
# assume all units are good
good_units = np.arange(n_units)
data.close()
return trials, bad_trials, good_units
@staticmethod
def decode_event_condition(event, code):
"""Provides the experimental setting given an event and code.
Parameters
----------
event : string
The event category.
code : int
The event code.
Returns
-------
definition : string
A string clarifying the event condition.
"""
if event == 'pre_tone':
if code == 0:
return 'exclude'
elif code == 1:
return 'pre_tone_violation'
elif code == 2:
return 'pre_tone_success'
else:
raise ValueError('Incorrect pre-tone code.')
elif event == 'proactive_inhibition':
if code == 0:
return 'exclude'
elif code == 1:
return '50_right_stop'
elif code == 2:
return '0_stop'
elif code == 3:
return '50_left_stop'
else:
raise ValueError('Incorrect proactive inhibition code.')
elif event == 'go_cue':
if code == 0:
return 'exclude'
elif code == 1:
return 'left'
elif code == 2:
return 'right'
else:
raise ValueError('Incorrect go cue code.')
elif event == 'go_LHMH':
if code == 0:
return 'exclude'
elif code == 1:
return 'left'
elif code == 2:
return 'left_LHMH'
elif code == 3:
return 'right'
elif code == 4:
return 'right_LHMH'
else:
raise ValueError('Incorrect LHMH go code.')
elif event == 'go':
if code == 0:
return 'exclude'
elif code == 1:
return 'left'
elif code == 2:
return 'right'
else:
raise ValueError('Incorrect go code.')
elif event == 'go_vs_stop':
if code == 0:
return 'exclude'
elif code == 1:
return 'stop_success'
elif code == 2:
return 'stop_fail'
elif code == 3:
return 'go_success'
else:
raise ValueError('Incorrect go vs. stop code.')
elif event == 'stop':
if code == 0:
return 'exclude'
elif code == 1:
return 'stop_success'
elif code == 2:
return 'stop_fail'
else:
raise ValueError('Incorrect stop code.')
else:
raise ValueError('Incorrect event code.')
class Trial():
def __init__(self, **kwargs):
"""Acts as a struct to store information about a trial in the
experiment.
Attributes
----------
t_trial_start : int
The time, in seconds, when the trial started.
t_center_cue : int
The time, in seconds, when the center cue turned on.
t_center_in : int
The time, in seconds, when the rat entered the center port.
t_side_cue : int
The time, in seconds, when the tone cued to move to the side.
If the rat failed pre-tone, this attribute is None.
t_center_out : int
The time, in seconds, when the rat left the center port.
t_side_in : int
The time, in seconds, when the rat entered the side port.
If the rat did not enter the side port, this attribute is None.
t_trial_end : int
The time, in seconds, when the trial ended.
events : dict
Contains the event codes detailing the experimental conditions of
the trial.
spike_times : list of ndarrays
List of arrays containing the times, in seconds, that each unit
spiked.
"""
self.t_trial_start = kwargs.get('t_trial_start', None)
self.t_center_cue = kwargs.get('t_center_cue', None)
self.t_center_in = kwargs.get('t_center_in', None)
self.t_side_cue = kwargs.get('t_side_cue', None)
self.t_center_out = kwargs.get('t_center_out', None)
self.t_side_in = kwargs.get('t_side_in', None)
self.t_trial_end = kwargs.get('t_trial_end', None)
self.events = kwargs.get('events', None)
self.spike_times = kwargs.get('spike_times', None)
def is_valid_trial(self):
"""Checks whether this Trial object was a valid trial."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['pre_tone'] != 0
def is_pretone_success(self):
"""Checks whether this Trial object resulted in a pre-tone success."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['pre_tone'] == 2
def is_pretone_failure(self):
"""Checks whether this Trial object resulted in a pre-tone failure."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['pre_tone'] == 1
def is_successful_left(self):
"""Checks whether this trial resulted in a successful left to side
port."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['go'] == 1
def is_successful_right(self):
"""Checks whether this trial resulted in a successful right to side
port."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['go'] == 2
def is_successful_stop(self):
"""Checks whether the rat successfully stopped on this trial."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['go_vs_stop'] == 1
def is_successful_go(self):
"""Checks whether the rat successfully completed a go trial."""
if self.events is None:
raise AttributeError('Trial has no events attribute.')
return self.events['go_vs_stop'] == 3
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.conf import settings
from django.shortcuts import redirect
from django.urls import reverse
from django.template.defaultfilters import register
import vyos
from perms import is_authenticated
import perms
import vycontrol_vyos_api as vapi
from config.models import Instance
import pprint
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@is_authenticated
def index(request):
hostname_default = vyos.get_hostname_prefered(request)
all_instances = vyos.instance_getall()
firewall_all = vyos.get_firewall_all(hostname_default)
interfaces = vyos.get_interfaces(hostname_default)
is_superuser = perms.get_is_superuser(request.user)
interfaces_all_names = vyos.get_interfaces_all_names(hostname_default)
interface_firewall_in = {}
interface_firewall_out = {}
interface_address = {}
firewall_names = []
# set interface_alias in format eth0 if has not vif and eth0.vlan if has vlan
for iname in interfaces_all_names:
if 'vif' in iname:
iname['interface_alias'] = "{interface_name}.{vif}".format(interface_name=iname['interface_name'], vif=iname['vif'])
else:
iname['interface_alias'] = iname['interface_name']
# create firewall_in and firewall_out vars
for interface_type in interfaces:
for interface_name in interfaces[interface_type]:
try:
interface_firewall_in[interface_name] = interfaces[interface_type][interface_name]['firewall']['in']['name']
except:
pass
try:
interface_firewall_out[interface_name] = interfaces[interface_type][interface_name]['firewall']['out']['name']
except:
pass
if interface_name not in interface_address:
interface_address[interface_name] = []
try:
interface_address[interface_name].append(interfaces[interface_type][interface_name]['address'])
except:
pass
if 'vif' in interfaces[interface_type][interface_name]:
for vif in interfaces[interface_type][interface_name]['vif']:
interface_name_full = "{interface_name}.{vif}".format(interface_name=interface_name, vif=vif)
try:
interface_firewall_in[interface_name_full] = interfaces[interface_type][interface_name]['vif'][vif]['firewall']['in']['name']
except:
pass
try:
interface_firewall_out[interface_name_full] = interfaces[interface_type][interface_name]['vif'][vif]['firewall']['out']['name']
except:
pass
if interface_name_full not in interface_address:
interface_address[interface_name_full] = []
try:
interface_address[interface_name_full].append(interfaces[interface_type][interface_name]['vif'][vif]['address'])
except:
pass
# put all information in a single var: interface_all_names
for iname in interfaces_all_names:
if 'vif' in iname:
ialias = "{interface_name}.{vif}".format(interface_name=iname['interface_name'], vif=iname['vif'])
else:
ialias = iname['interface_name']
if ialias in interface_firewall_out:
iname['firewall_out'] = interface_firewall_out[ialias]
if ialias in interface_firewall_in:
iname['firewall_in'] = interface_firewall_in[ialias]
if ialias in interface_address:
iname['address'] = interface_address[ialias]
if 'name' in firewall_all:
for fname in firewall_all['name']:
firewall_names.append(fname)
# create a dict
interfaces_all_names_dict = {}
for iname in interfaces_all_names:
if 'vif' in iname:
ialias = "{interface_name}.{vif}".format(interface_name=iname['interface_name'], vif=iname['vif'])
else:
ialias = iname['interface_name']
interfaces_all_names_dict[ialias] = iname
fw_changed = False
for el in request.POST:
interface_vif = None
if el.startswith('firewall-ipv4-in'):
pos = el.split(".")
interface_type = pos[1]
interface_name = pos[2]
if len(pos) >= 4:
interface_vif = pos[3]
ialias = "{interface_name}.{vif}".format(interface_name=interface_name, vif=interface_vif)
else:
ialias = interface_name
firewall_name = request.POST[el]
if firewall_name == "--remove--":
if 'firewall_in' in interfaces_all_names_dict[ialias]:
v = vapi.delete_interface_firewall_ipv4(hostname_default, interface_type, interface_name, "in", interface_vif)
#print("@@@@@@@@@@@@@@@@@ in delete", hostname_default, interface_type, interface_name, "in", firewall_name, interface_vif)
else:
pass
#print("@@@@@ not 1", interfaces_all_names_dict[ialias], firewall_name)
else:
if 'firewall_in' not in interfaces_all_names_dict[ialias] or interfaces_all_names_dict[ialias]['firewall_in'] != firewall_name:
v = vapi.set_interface_firewall_ipv4(hostname_default, interface_type, interface_name, "in", firewall_name, interface_vif)
#print("@@@@@@@@@@@@@@@@@ in add", hostname_default, interface_type, interface_name, "in", firewall_name, interface_vif)
else:
pass
#print("@@@@@ not 2", interfaces_all_names_dict[ialias], firewall_name )
fw_changed = True
elif el.startswith('firewall-ipv4-out'):
pos = el.split(".")
interface_type = pos[1]
interface_name = pos[2]
if len(pos) >= 4:
interface_vif = pos[3]
ialias = "{interface_name}.{vif}".format(interface_name=interface_name, vif=interface_vif)
else:
ialias = interface_name
firewall_name = request.POST[el]
if firewall_name == "--remove--":
if 'firewall_out' in interfaces_all_names_dict[ialias]:
v = vapi.delete_interface_firewall_ipv4(hostname_default, interface_type, interface_name, "out", interface_vif)
#print("@@@@@@@@@@@@@@@@@ out delete", hostname_default, interface_type, interface_name, "out", firewall_name, interface_vif)
else:
#print("@@@@@ not 3", interfaces_all_names_dict[ialias], firewall_name)
pass
else:
if 'firewall_out' not in interfaces_all_names_dict[ialias] or interfaces_all_names_dict[ialias]['firewall_out'] != firewall_name:
v = vapi.set_interface_firewall_ipv4(hostname_default, interface_type, interface_name, "out", firewall_name, interface_vif)
#print("@@@@@@@@@@@@@@@@@ out add", hostname_default, interface_type, interface_name, "out", firewall_name, interface_vif)
else:
#print("@@@@@ not 4", interfaces_all_names_dict[ialias], firewall_name)
pass
fw_changed = True
if fw_changed == True:
return redirect('interface:interface-list')
template = loader.get_template('interface/index.html')
context = {
'interfaces': interfaces,
'interfaces_pretty': pprint.pformat(interfaces, indent=4, width=120),
'interfaces_all_names': interfaces_all_names,
'interfaces_all_names_pretty': pprint.pformat(interfaces_all_names, indent=4, width=120),
'instances': all_instances,
'hostname_default': hostname_default,
'firewall_all' : firewall_all,
'firewall_names' : firewall_names,
'interface_firewall_in' : interface_firewall_in,
'interface_firewall_out' : interface_firewall_out,
'interface_firewall_in_pretty' : pprint.pformat(interface_firewall_in, indent=4, width=120),
'interface_firewall_out_pretty' : pprint.pformat(interface_firewall_out, indent=4, width=120),
'username': request.user,
'is_superuser' : is_superuser,
}
return HttpResponse(template.render(context, request))
@is_authenticated
def interfaceshow(request, interface_type, interface_name):
all_instances = vyos.instance_getall()
hostname_default = vyos.get_hostname_prefered(request)
firewall_all = vyos.get_firewall_all(hostname_default)
interface = vyos.get_interface(interface_type, interface_name, hostname=hostname_default)
is_superuser = perms.get_is_superuser(request.user)
template = loader.get_template('interface/show.html')
context = {
'interface': interface,
'instances': all_instances,
'interface_type' : interface_type,
'interface_name' : interface_name,
'hostname_default': hostname_default,
'firewall_all' : firewall_all,
'username': request.user,
'is_superuser' : is_superuser,
}
return HttpResponse(template.render(context, request))
@is_authenticated
def interfacefirewall(request, interface_type, interface_name):
all_instances = vyos.instance_getall()
is_superuser = perms.get_is_superuser(request.user)
hostname_default = vyos.get_hostname_prefered(request)
interface = vyos.get_interface(interface_type, interface_name, hostname=hostname_default)
template = loader.get_template('interface/show.html')
context = {
'interface': interface,
'instances': all_instances,
'hostname_default': hostname_default,
'interface_type' : interface_type,
'interface_name' : interface_name,
'username': request.user,
'is_superuser' : is_superuser,
}
return HttpResponse(template.render(context, request))
|
#!/usr/bin/env python3
"""
Module to execute functions
"""
import redis
import uuid
from typing import Union, Optional, Callable
from functools import wraps
def count_calls(method: Callable) -> Callable:
"""Decorator that takes a single method Callable arguments
and return a Callable"""
key = method.__qualname__
@wraps(method)
def wrapper(self, *args):
value = method(self, *args)
self._redis.incr(key)
return value
return wrapper
def call_history(method: Callable) -> Callable:
"""Decorator to store the history of inputs and outputs for
a particular function"""
input_list = method.__qualname__ + ":inputs"
output_list = method.__qualname__ + ":outputs"
@wraps(method)
def wrapper(self, *args):
self._redis.rpush(input_list, str(args))
value = method(self, *args)
self._redis.rpush(output_list, str(value))
return value
return wrapper
def replay(method: Callable) -> Callable:
"""Function to dispay history of calls of a particular function"""
pass
class Cache:
"""Cache class"""
def __init__(self):
""" Init method that store an instance of the Redis client as
private variable and flush the instance"""
self._redis = redis.Redis()
self._redis.flushdb()
@call_history
@count_calls
def store(self, data: Union[str, bytes, int, float]) -> str:
"""The method should generate a random key, store the input data
in Redis using the random key and return the key"""
id = str(uuid.uuid4())
self._redis.mset({id: data})
return id
def get(self, key: str,
fn: Optional[Callable] = None) -> Union[str, bytes, int, float]:
"""Get method takes a key string argument and optional Callable
argument """
data = self._redis.get(key)
if fn:
return fn(data)
return data
def get_str(self, data: str) -> str:
""" Return data to string"""
self._redis.get(data).decode('utf-8')
def get_int(self, data: str) -> int:
""" Return data to integer"""
return int(self._redis.get(data))
|
from test import test_launch_time
from common import dev_sn
from common import kill_process
import os
os.system("adb -s %s root"%dev_sn)
kill_process(dev_sn,"native_agent")
np_music = ["Music","","com.android.music"]
music_launch_time_test = test_launch_time(dev_sn,np_music)
music_launch_time_test.exec_test()
print("result of music_launch_time_test is %sms"%music_launch_time_test.result)
|
#!~/Documents/tensorflowVE3/bin/python3
'''
Introduction to Python
REF : Jump to Python / https://docs.python.org/3/tutorial/index.html
'''
money = 1
print ("money = ", end=" ")
print(money, end=" ")
print(type(money))
if money == 1:
print ("money == 1 (int)")
else:
pass
if money is 1:
print ("money is 1 (int)")
else:
pass
print("\n")
money = 12345
print ("money = ", end=" ")
print(money, end=" ")
print(type(money))
if money is 12345:
print ("money is 12345 (int)")
else:
pass
print("\n")
money = "1"
print ("money = ", end=" ")
print(money, end=" ")
print(type(money))
if money == 1:
print ("money = 1 (int)")
elif money == "1":
print ("money = '1' (str)")
else:
print ("none")
print("\n")
money = 1000000
print ("money = ", end=" ")
print(money, end=" ")
print(type(money))
# compare operator : > < >= <= == !=
if money > 10000:
print ("money > 10000")
# and or not
if money > 10000 and money < 10e8:
print ("10e4 < money < 10e8")
print("\n")
str_a = "python is easy...?"
print ("str_a = ", end=" ")
print(str_a, end=" ")
print(type(str_a))
if "python" in str_a:
print ("python is in str_a")
if "cpp" not in str_a:
print ("cpp is not in str_a")
print("\n")
treeHit = 0
while treeHit < 10:
treeHit = treeHit +1
print("나무를 %d번 찍었습니다." % treeHit)
if treeHit == 10:
print("나무 넘어갑니다.\n")
num = 0
while num < 10:
num = num +1
if num % 2 == 0:
continue
print("num : %d" % (num))
print("\n")
coffee = 10
money = 300
# 무한 루프?
while money:
print("돈을 받았으니 커피를 줍니다.")
coffee = coffee -1
print("남은 커피의 양은 %d개입니다." % coffee)
if not coffee:
print("커피가 다 떨어졌습니다. 판매를 중지합니다.\n")
break
marks = [90, 25, 67, 45, 80]
number = 0
for mark in marks:
number = number +1
if mark < 60: continue
print("%d번 학생 %d 점으로 축하합니다. 합격입니다. " % (number, mark))
print("\n")
for number in range(len(marks)):
if marks[number] < 60: continue
print("%d번 학생 %d 점으로 축하합니다. 합격입니다." % (number+1, marks[number]))
print("\n")
for number, element in enumerate(marks):
if element < 60: continue
print("%d번 학생 %d 점으로 축하합니다. 합격입니다." % (number+1, element))
print("\n")
'''
money = 1 <class 'int'>
money == 1 (int)
money is 1 (int)
money = 12345 <class 'int'>
money is 12345 (int)
money = 1 <class 'str'>
money = '1' (str)
money = 1000000 <class 'int'>
money > 10000
10e4 < money < 10e8
str_a = python is easy...? <class 'str'>
python is in str_a
cpp is not in str_a
나무를 1번 찍었습니다.
나무를 2번 찍었습니다.
나무를 3번 찍었습니다.
나무를 4번 찍었습니다.
나무를 5번 찍었습니다.
나무를 6번 찍었습니다.
나무를 7번 찍었습니다.
나무를 8번 찍었습니다.
나무를 9번 찍었습니다.
나무를 10번 찍었습니다.
나무 넘어갑니다.
num : 1
num : 3
num : 5
num : 7
num : 9
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 9개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 8개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 7개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 6개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 5개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 4개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 3개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 2개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 1개입니다.
돈을 받았으니 커피를 줍니다.
남은 커피의 양은 0개입니다.
커피가 다 떨어졌습니다. 판매를 중지합니다.
1번 학생 90 점으로 축하합니다. 합격입니다.
3번 학생 67 점으로 축하합니다. 합격입니다.
5번 학생 80 점으로 축하합니다. 합격입니다.
1번 학생 90 점으로 축하합니다. 합격입니다.
3번 학생 67 점으로 축하합니다. 합격입니다.
5번 학생 80 점으로 축하합니다. 합격입니다.
1번 학생 90 점으로 축하합니다. 합격입니다.
3번 학생 67 점으로 축하합니다. 합격입니다.
5번 학생 80 점으로 축하합니다. 합격입니다.
'''
|
# Copyright 2019 Matthew Egan Odendahl
# SPDX-License-Identifier: Apache-2.0
import ast
import builtins
import os
import re
from contextlib import contextmanager, nullcontext
from functools import reduce
from importlib import import_module, resources
from itertools import chain
from pathlib import Path, PurePath
from pprint import pprint
from types import ModuleType
from typing import Any, Iterable, Iterator, NewType, Tuple, Union
from unittest.mock import ANY
from hissp.compiler import Compiler, readerless
from hissp.munger import munge
TOKENS = re.compile(
r"""(?x)
(?P<open>\()
|(?P<close>\))
|(?P<string>
" # Open quote.
(?:|[^"\\] # Any non-magic character.
|\\(?:.|\n) # Backslash only if paired, including with newline.
)* # Zero or more times.
" # Close quote.
)
|(?P<comment>;.*)
|(?P<whitespace>[\n ]+) # Tabs are not allowed outside of strings.
|(?P<macro>
,@
|['`,]
# Ends in ``#``, but not bytes, dict, set, list, str.
|(?:[Bb](?!')
|[^ \n"(){}[\]#Bb]
)[^ \n"(){}[\]#]*[#])
|(?P<symbol>[^ \n"()]+)
"""
)
Token = NewType("Token", Tuple[str, str])
DROP = object()
def lex(code: str, file: str = "<?>") -> Iterator[Token]:
pos = 0
while pos < len(code):
match = TOKENS.match(code, pos)
if match is None:
good = code[0:pos].split("\n")
line = len(good)
column = len(good[-1])
raise SyntaxError("Unexpected token", (file, line, column, code))
assert match.end() > pos, match.groups()
pos = match.end()
yield Token((match.lastgroup, match.group()))
class _Unquote(tuple):
def __repr__(self):
return f"_Unquote{super().__repr__()}"
def gensym_counter(count=[0]):
count[0] += 1
return count[0]
class Parser:
def __init__(
self, qualname="_repl", ns=None, verbose=False, evaluate=False, filename="<?>"
):
self.qualname = qualname
self.ns = ns or {"__name__": "<compiler>"}
self.compiler = Compiler(self.qualname, self.ns, evaluate)
self.verbose = verbose
self.filename = filename
self.reinit()
def reinit(self):
self.gensym_stack = []
self.depth = 0
def parse(self, tokens: Iterator[Token]) -> Iterator:
return (form for form in self._parse(tokens) if form is not DROP)
def _parse(self, tokens: Iterator[Token]) -> Iterator:
for k, v in tokens:
if k == "open":
depth = self.depth
self.depth += 1
yield (*self.parse(tokens),)
if self.depth != depth:
raise SyntaxError("Unclosed '('.")
elif k == "close":
self.depth -= 1
if self.depth < 0:
raise SyntaxError("Unopened ')'.")
return
elif k == "string":
yield "quote", ast.literal_eval(
v.replace("\\\n", "").replace("\n", r"\n")
), {":str": True}
elif k in {"comment", "whitespace"}:
continue
elif k == "macro":
with {
"`": self.gensym_context,
",": self.unquote_context,
",@": self.unquote_context,
}.get(v, nullcontext)():
form = next(self.parse(tokens))
yield self.parse_macro(v, form)
elif k == "symbol":
try:
yield ast.literal_eval(v)
except (ValueError, SyntaxError):
yield munge(v)
else:
assert False, "unknown token: " + repr(k)
if self.depth:
SyntaxError("Ran out of tokens before completing form.")
def parse_macro(self, tag: str, form):
if tag == "'":
return "quote", form
if tag == "`":
return self.template(form)
if tag == ",":
return _Unquote([":?", form])
if tag == ",@":
return _Unquote([":*", form])
assert tag.endswith("#")
tag = tag[:-1]
if tag == "_":
return DROP
if tag == "$":
return self.gensym(form)
if tag == ".":
return eval(readerless(form), {})
if ".." in tag and not tag.startswith(".."):
module, function = tag.split("..", 1)
function = munge(function)
if is_string(form):
form = form[1]
return reduce(getattr, function.split("."), import_module(module))(form)
raise ValueError(f"Unknown reader macro {tag}")
def template(self, form):
case = type(form)
if case is tuple and form:
if is_string(form):
return "quote", form
return (
("lambda", (":", ":*", "xAUTO0_"), "xAUTO0_"),
":",
*chain(*self._template(form)),
)
if case is str and not form.startswith(":"):
return "quote", self.qualify(form)
if case is _Unquote and form[0] == ":?":
return form[1]
return form
def _template(self, forms: Iterable) -> Iterable[Tuple[str, Any]]:
for form in forms:
case = type(form)
if case is str and not form.startswith(":"):
yield ":?", ("quote", self.qualify(form))
elif case is _Unquote:
yield form
elif case is tuple:
yield ":?", self.template(form)
else:
yield ":?", form
def qualify(self, symbol: str) -> str:
if symbol in {e for e in dir(builtins) if not e.startswith("_")}:
return f"builtins..{symbol}"
if re.search(r"\.\.|^\.|^quote$|^lambda$|xAUTO\d+_$", symbol):
return symbol
if symbol in vars(self.ns.get("_macro_", lambda: ())):
return f"{self.qualname}.._macro_.{symbol}"
return f"{self.qualname}..{symbol}"
def reads(self, code: str) -> Iterable:
res = self.parse(lex(code, self.filename))
self.reinit()
if self.verbose:
res = list(res)
pprint(res)
return res
def compile(self, code: str) -> str:
hissp = self.reads(code)
return self.compiler.compile(hissp)
def gensym(self, form: str):
return f"_{munge(form)}xAUTO{self.gensym_stack[-1]}_"
@contextmanager
def gensym_context(self):
self.gensym_stack.append(gensym_counter())
try:
yield
finally:
self.gensym_stack.pop()
@contextmanager
def unquote_context(self):
gensym_number = self.gensym_stack.pop()
try:
yield
finally:
self.gensym_stack.append(gensym_number)
def is_string(form):
return form == ("quote", ANY, ANY) and form[2].get(":str")
def transpile(package: resources.Package, *modules: Union[str, PurePath]):
for module in modules:
transpile_module(package, module + ".lissp")
def transpile_module(
package: resources.Package,
resource: Union[str, PurePath],
out: Union[None, str, bytes, Path] = None,
):
code = resources.read_text(package, resource)
path: Path
with resources.path(package, resource) as path:
out = out or path.with_suffix(".py")
if isinstance(package, ModuleType):
package = package.__package__
if isinstance(package, os.PathLike):
resource = resource.stem
qualname = f"{package}.{resource.split('.')[0]}"
with open(out, "w") as f:
print("writing to", out)
f.write(Parser(qualname, evaluate=True, filename=str(out)).compile(code))
|
from mytest.myproject.register.util_reg import DriverReg
class BasePage():
def __init__(self):
self.driver = DriverReg().get_driver()
def emt_fd(self,location):
return self.driver.find_element(*location)
class BaseHandle():
def input_text(self,inputtext, username):
inputtext.send_keys(username) |
#!/usr/bin/python
#
# test.py -- interactive test script
#
import json
import os
import signal
import subprocess
import sys
import types
import urllib2
nodes = []*5
procs = {}
# populate list of nodes
def get_nodes():
try:
resp = urllib2.urlopen('http://cs.ucsb.edu/~dkudrow/cs271/nodes')
raw = resp.read()
return raw.splitlines()
except:
return [
'127.0.0.1:6001',
'127.0.0.1:6002',
'127.0.0.1:6003',
'127.0.0.1:6004',
'127.0.0.1:6005' ]
vim_indentation_sucks = True
# decorator to run command for list of pids
def RunForPids(func):
def run_for_pids(args, **kwargs):
if type(args) != types.ListType:
result = False
try:
pid = int(args)
result = func(pid, **kwargs)
except:
print 'invalid argument: ', type(args), args
else:
result = {}
for arg in args:
try:
pid = int(arg)
result[pid] = func(pid, **kwargs)
except:
print 'invalid argument: ', type(arg), arg
continue
return result
return run_for_pids
# hit a route on a node
@RunForPids
def send(pid, route='/'):
host = nodes[pid]
url = 'http://' + host + route
try:
return urllib2.urlopen(url).read()
except:
return json.dumps({'status':'bad'})
# start a node
@RunForPids
def start(pid):
if ping(pid):
return False
path = os.getcwd() + '/server.py'
args = str(6001 + pid)
procs[pid] = subprocess.Popen([path, args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid)
return True
# ping a node
@RunForPids
def ping(pid):
resp = json.loads(send(pid, route='/'))
if resp['status'] != 'ok':
return False
return True
# stop a node, write output to logfile
@RunForPids
def stop(pid):
try:
os.killpg(procs[pid].pid, signal.SIGTERM)
except:
return False
if procs[pid].poll() == None:
logfile = open('node_%d.log' % pid, 'w')
for l in procs[pid].stdout.readlines():
logfile.write(l)
return True
return False
# kill all nodes and write output to log files
def cleanup():
for pid in iter(procs):
pid = int(pid)
if procs[pid].poll() == None:
stop(pid)
# interactive loop
def loop():
cont = True
while (cont):
args = raw_input('> ').split()
if len(args) < 1: continue
if args[-1] == 'all': args = args[:-1] + ['0', '1', '2', '3', '4']
if args[0] == 'help':
print '''
help -- this message
start <pid pid ... | all> -- start nodes
ping <pid pid ... | all> -- ping nodes
stop <pid pid ... | all> -- terminate nodes
send <route> <pid pid ... | all> -- hit <route> on nodes
exit -- kill all nodes, write logs and exit
'''
continue
if args[0] == 'exit':
print 'Writing logs, good bye.'
cleanup()
cont = False
continue
if args[0] == 'start':
result = start(args[1:])
for pid in iter(result):
if result[pid]:
print 'started node %d' % pid
else:
print 'could not start node %d' % pid
continue
if args[0] == 'ping':
result = ping(args[1:])
for pid in iter(result):
if result[pid]:
print 'node %d is up' % pid
else:
print 'node %d is down' % pid
continue
if args[0] == 'stop':
result = stop(args[1:])
for pid in iter(result):
if result[pid]:
print 'stopped node %d' % pid
else:
print 'could not stop node %d' % pid
continue
if args[0] == 'send':
result = send(args[2:], route='/'+args[1])
for pid in iter(result):
print '%d: %s' % (pid, result[pid])
continue
print 'invalid command'
# catch ^C interrupts
def sigint_handler(signal, frame):
print 'Writing logs, good bye.'
cleanup()
sys.exit(0)
if __name__ == '__main__':
nodes = get_nodes()
signal.signal(signal.SIGINT, sigint_handler)
loop()
|
import os
import time
from cognite.client import ClientConfig
from cognite.client.credentials import OAuthClientCredentials
from cognite.client.utils._logging import _configure_logger_for_debug_mode
from pytest import fixture, mark
from cognite.experimental import CogniteClient
from cognite.experimental.data_classes.simulators import SimulationRun, SimulationRunFilter
@fixture(scope="class")
def cognite_client() -> CogniteClient:
_configure_logger_for_debug_mode()
creds = OAuthClientCredentials(
token_url=os.environ["COGNITE_TOKEN_URL"],
client_id=os.environ["COGNITE_CLIENT_ID"],
client_secret=os.environ["COGNITE_CLIENT_SECRET"],
scopes=[os.environ["COGNITE_TOKEN_SCOPES"]],
)
return CogniteClient(
config=ClientConfig(
base_url=os.environ["COGNITE_BASE_URL"],
client_name="experimental",
project="charts-azuredev",
headers={"cdf-version": "alpha"},
credentials=creds,
)
)
@mark.skipif(
os.environ.get("ENABLE_SIMULATORS_TESTS") == None, reason="Skipping simulators API tests due to service immaturity"
)
class TestSimulatorsIntegration:
def test_run_single_simulation(self, cognite_client: CogniteClient):
now = int(round(time.time() * 1000))
test_run = SimulationRun(
simulator_name="DWSIM",
model_name="ShowerMixerIntegrationTest",
routine_name="ShowerMixerCalculation",
validation_end_time=now,
queue=False,
)
res = cognite_client.simulators.run(test_run)
assert isinstance(res, SimulationRun)
assert res.simulator_name == test_run.simulator_name
assert res.model_name == test_run.model_name
assert res.routine_name == test_run.routine_name
assert res.validation_end_time == test_run.validation_end_time
assert res.queue is None
assert res.id is not None
assert res.created_time is not None
def test_list_simulation_runs(self, cognite_client: CogniteClient):
res = cognite_client.simulators.list_runs(
simulator_name="DWSIM",
model_name="ShowerMixerIntegrationTest",
routine_name="ShowerMixerCalculation",
status="ready",
)
assert len(res) > 0
assert res[0].simulator_name == "DWSIM"
assert res[0].status == "ready"
assert res[0].created_time > 0
|
#!/usr/bin/env python3
"""
Programmer: Chris Blanks
Last Edited: 1/11/2019
Project: Automated Self-Serving System
Purpose: This script defines the AppWindow Class, which
the Employee and Customer windows inherit from.
"""
from tkinter import messagebox
from tkinter import ttk
import tkinter as tk
from PIL import Image
from PIL import ImageTk
import math
class AppWindow():
background_color = "LightCyan3"
bg_color_other = "mint cream"
var = 10
def __init__(self,main_app):
"""Provides basic functionality to each window of the main application."""
self.main_app_instance = main_app
pass
def displayDrinkOptionsInGUI(self):
"""Displays each drink button/image/label in the GUI."""
num_of_drinks = len(self.main_app.active_drink_objects)
necessary_rows = math.ceil(num_of_drinks/5) #5 drinks fill up a row in the window
width_of_bar = 25 #scrollbar width
canvas_width = self.main_app.screen_width - width_of_bar
canvas_height = self.main_app.screen_height
scroll_width = self.main_app.screen_width #fills up window
# 2 rows fill up most of the screen, so the canvas height will be multiples of the screen height
scroll_height = math.ceil(necessary_rows/2) * self.main_app.screen_height
self.canvas = tk.Canvas(self.frame,width=canvas_width,height=canvas_height,bg = self.background_color,
scrollregion=(0,0,scroll_width,scroll_height))
self.scrollbar = tk.Scrollbar(self.frame,width=width_of_bar,orient= tk.VERTICAL)
self.scrollbar.pack(side=tk.LEFT,fill=tk.Y)
self.canvas.config(yscrollcommand = self.scrollbar.set)
self.canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
self.scrollbar.config(command=self.canvas.yview)
self.canvas_frame = tk.Frame(self.canvas,bg = self.background_color)
#for widgets to be scrollable, a window(or frame in this case) must be made for the canvas
self.canvas.create_window((0,0),window=self.canvas_frame,anchor="nw")
drink_num = 0
column_position = 0
row_position = 0
self.drink_option_references = []
for drink in self.main_app.active_drink_objects:
if column_position > 4:
row_position = row_position + 2 #goes to next set of rows
column_position = 0 #resets column position to fit all buttons
drink_img = Image.open(drink.pic_location)
drink_img = drink_img.resize((200,200),Image.ANTIALIAS)
drink_photo = ImageTk.PhotoImage(drink_img)
self.drink_button = ttk.Button(self.canvas_frame,image=drink_photo
,command=lambda drink_op= self.main_app.active_drink_objects[drink_num]: self.setupDrinkEvent(drink_op))
self.drink_button.img_ref = drink_photo
self.drink_button.grid(row =row_position,column=column_position, padx = 25
,pady = 15)
drink.name =(drink.name).replace("_"," ")
self.drink_label = ttk.Label(self.canvas_frame,text=(drink.name).title(),
font=("Georgia","15","bold"))
self.drink_label.grid(row=row_position+1,column=column_position)
self.drink_option_references.append( (self.drink_button,self.drink_label) )
column_position = column_position + 1
drink_num = drink_num + 1
def setupDrinkEvent(self,drink_option):
"""Changes current drink before initiating drink event."""
self.current_drink = drink_option
self.initiateDrinkEvent()
def initiateDrinkEvent(self):
"""Initiates drink event """
print("Drink #",int(self.current_drink.id_number)+1,": ",(self.current_drink.name).replace("_"," "))
self.clearDrinkOptionsFromGUI()
self.setupDrinkProfileInGUI()
def clearDrinkOptionsFromGUI(self):
"""Clears drink option items in GUI in order to make room for the next window."""
for item in self.drink_option_references:
item[0].grid_forget()
item[1].grid_forget()
#must be discarded, so that grid widgets can be put in self.frame
self.canvas.pack_forget()
self.scrollbar.pack_forget()
def setupDrinkProfileInGUI(self):
"""Creates a drink profile for the current drink."""
self.drink_profile_elements = []
img = Image.open(self.current_drink.pic_location)
img = img.resize((500,400),Image.ANTIALIAS)
tk_photo = ImageTk.PhotoImage(img)
self.img_item_reference = tk_photo #keeping a reference allows photo to display
img_item = ttk.Label(self.frame,image=tk_photo)
img_item.grid(row=0,column=0)
name_of_drink = ttk.Label(self.frame,text=(self.current_drink.name).title())
name_of_drink.grid(row=1,column=0)
text_builder =" ".join(self.current_drink.ingredients).replace(' ',', ').replace('_',' ')
ingredient_text = ttk.Label(self.frame,text="Ingredients: " + text_builder)
ingredient_text.grid(row=0,column = 1,columnspan=10,sticky="n")
if self.main_app.isEmployeeMode == False:
drink_price_str = "Price: $"+str(self.current_drink.price)
drink_price = tk.Label(self.frame,text= drink_price_str)
drink_price.grid(row=1,column=2)
buy_button = ttk.Button(self.frame,text="Buy?",command=self.startBuyEvent)
buy_button.grid(row=2,column=2,sticky="nsew")
self.drink_profile_elements.extend((buy_button,drink_price))
else:
quantity_label = ttk.Label(self.frame,text="Order Quantity:")
quantity_label.grid(row=2,column=1,sticky="n")
self.drink_profile_elements.append(quantity_label)
for i in range(5):
quantity_btn = ttk.Button(self.frame,text=str(i+1),
command= lambda x = i+1: self.startEmployeeOrderEvent(x) )
quantity_btn.configure(width=2)
quantity_btn.grid(row= 2,column=i+2,padx=6,sticky="w")
self.drink_profile_elements.append(quantity_btn)
back_button = ttk.Button(self.frame, text="Back",command=self.resetDrinkOptions)
back_button.grid(row=3,column=0)
self.drink_profile_elements.extend((img_item,name_of_drink,ingredient_text,back_button))
def startBuyEvent(self):
"""Starts the buying process for the customer mode."""
self.isOrdered = self.displayConfirmationMessageBox()
if self.isOrdered:
pass
def startEmployeeOrderEvent(self,num_of_drinks):
"""Starts the ordering process for the employee mode."""
self.isOrdered = self.displayConfirmationMessageBox("Employee",num_of_drinks)
if self.isOrdered:
pass
def displayConfirmationMessageBox(self,mode="Customer",num_of_drinks=1):
"""Asks the user if they are sure about their drink selection """
if mode == "Customer":
if messagebox.askokcancel("Confirmation","Are you sure that you want a "+self.current_drink.name+"?",
parent=self.master):
print("Order is confirmed.")
print("One order of "+self.current_drink.name +" on the way.")
msg = "1 "+ self.current_drink.name + " was ordered."
self.main_app_instance.writeToDrinkSalesLog(msg)
return True
else:
return False
else:
if messagebox.askokcancel("Confirmation",
"Are you sure that you want "+str(num_of_drinks)+" "+self.current_drink.name.title().replace("_"," ")+"(s) ?",
parent=self.master):
print("Order is confirmed.")
print( str(num_of_drinks)+" order(s) of "+self.current_drink.name +" on the way.")
if num_of_drinks == 1:
msg = str(num_of_drinks)+" "+ self.current_drink.name + " was ordered."
elif num_of_drinks > 1:
msg = str(num_of_drinks)+" "+ self.current_drink.name + "s were ordered."
self.main_app_instance.writeToDrinkSalesLog(msg)
return True
else:
return False
def resetDrinkOptions(self):
for element in self.drink_profile_elements:
element.grid_forget()
self.displayDrinkOptionsInGUI()
def createHelpMenu(self,menu_name=""):
"""Defines a menu that offers information about the machine."""
help_menu = tk.Menu(self.parent_menu,tearoff=0)
self.parent_menu.add_cascade(label=menu_name, menu= help_menu)
help_menu.add_separator()
help_menu.add_command(label="", command= self.secret) #allows exit out of customer window
help_menu.add_separator()
#for employee window
if menu_name != "":
help_menu.add_separator()
help_menu.add_command(label="How to operate", command= self.showOperationInstructions)
help_menu.add_separator()
help_menu.add_command(label="Info About Contributors",command=self.showContributors)
help_menu.add_separator()
def secret(self):
"""Does a secret action."""
self.var = self.var - 1
if self.var == 0:
self.master.destroy()
self.main_app.master.deiconify()
def showContributors(self):
"""Lists contributors of the project in a top level window's message box."""
top = tk.Toplevel()
top.tk.call("wm","iconphoto",top._w,self.main_app.icon_img)
top.attributes('-topmost','true')
top.title("Contributors:")
top.geometry("350x280")
self.contributors_msg = tk.Message(top)
msg= """Nathan Bane:\nEmbedded Systems Design\n\nChris Blanks:\nSoftware Design
\nRyan Valente:\nMechanical Design\n\n University of Maryland Eastern Shore\n\nContact >>> Cablanks@umes.edu
\nIcon Creator: RoundIcons\nhttps://www.flaticon.com/authors/roundicons"""
self.contributors_msg.config(text=msg,font= ("Arial",12,""))
self.contributors_msg.grid()
def showOperationInstructions(self):
"""Instructs the user on how to order from the GUI."""
with open(self.operation_instructions_file_path,'r') as file:
lines = file.readlines()
msg = " ".join(lines)
top = tk.Toplevel()
top.tk.call("wm","iconphoto",top._w,self.main_app.icon_img)
top.attributes('-topmost','true')
top.title("How to Operate:")
top.geometry("600x230")
scroll = tk.Scrollbar(top,orient= tk.VERTICAL)
scroll.grid(row=0,column=1,sticky="ns")
canvas = tk.Canvas(top,width=350,
height=230,
scrollregion=(0,0,2000,2000))
canvas.grid(row=0,column=0,sticky="nsew")
scroll.config(command=canvas.yview)
canvas.config(yscrollcommand = scroll.set)
canvas.create_text((0,0),text=msg,anchor="nw") #top left and anchored to the right
top.rowconfigure(0,weight=1)
top.columnconfigure(0,weight=1)
|
from typing import List
from enum import Enum
from pydantic import BaseModel, Field
class ConstraintType(str, Enum):
UNIQUENESS = 'UNIQUENESS'
class EntityType(str, Enum):
NODE = 'NODE'
RELATIONSHIP = 'RELATIONSHIP'
class Neo4jConstraintQueryRecord(BaseModel):
id_: int = Field(..., alias='id')
owned_index_id: int = Field(..., alias='ownedIndexId')
entity_type: EntityType = Field(..., alias='entityType')
labels_or_types: List[str] = Field(..., alias='labelsOrTypes')
type_: ConstraintType = Field(..., alias='type')
name: str
properties: List[str]
|
import numpy as np
import math
import cv2.ximgproc
from matplotlib import pylab as plt
import scipy as sp
from scipy import ndimage
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
from scipy.signal import find_peaks_cwt
import matplotlib.cm as cm
plotIt=False
GBP=False #gain bad pix correction
BLF=False #bilaterial filter and sobel operations
SOB=False
Fields=np.array([8,10,20,25,30,40,50])
pixels_x=4096
pixels_y=4096
pixels=4096*4096
gains=np.fromfile("FlatFieldGains.raw", dtype='float64', sep="")
gains=gains.reshape([1,4096])
dark=np.fromfile("Pedestal/ped_10_27_LE.raw", dtype='<i2', sep="") # little endian (<) int 2 (i2) bytes = 16 bit signed int
dark=dark.reshape(1,pixels)
distanceCalibration=1/0.6072
colors = iter(cm.rainbow(np.linspace(0, 1, 6)))
fig,ax=plt.subplots()
fig2,ax2=plt.subplots()
fig3,ax3=plt.subplots()
leaf_of_choice=10
std_file=np.ones(1024*1024)
std_file=std_file*20
std_file=std_file.reshape(1024,1024)
errorOnPixIntensity=15
# leaf_gradients_dict={}
def line(x,m,c):
return m*x+c
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))
def lognorm(x,a,mu,sigma):
b = (math.log(x) - mu)/math.sqrt(2*sigma**2)
p = a*(0.5 + 0.5*math.erf(b))
return p
def gainsBadPix(origIm, gainIm, pedIm,frames):
# pedIm=pedIm.reshape(1,pixels)
origIm=origIm.reshape([frames,pixels])
#pedestal subtraction
origIm=origIm-pedIm
print "ped subtraction"
origIm=origIm.reshape([frames,pixels_y,pixels_x])
origIm[:,1022,:]=(origIm[:,1021,:]+origIm[:,1024,:])/2
origIm[:,1023,:]=(origIm[:,1022,:]+origIm[:,1024,:])/2
origIm[:,138,:]=(origIm[:,137,:]+origIm[:,141,:])/2
origIm[:,139,:]=(origIm[:,138,:]+origIm[:,141,:])/2
origIm[:,140,:]=(origIm[:,139,:]+origIm[:,141,:])/2
origIm[:,2599,:]=(origIm[:,2598,:]+origIm[:,2602,:])/2
origIm[:,2600,:]=(origIm[:,2599,:]+origIm[:,2602,:])/2
origIm[:,2601,:]=(origIm[:,2600,:]+origIm[:,2602,:])/2
print "bad cols corrected"
#"firmware lines"
origIm[:,4095,:]=origIm[:,4094,:]
origIm[:,3967,:]=(origIm[:,3966,:]+origIm[:,3968,:])/2
origIm[:,3839,:]=(origIm[:,3838,:]+origIm[:,3840,:])/2
origIm[:,3711,:]=(origIm[:,3710,:]+origIm[:,3712,:])/2
origIm[:,3583,:]=(origIm[:,3582,:]+origIm[:,3584,:])/2
origIm[:,3455,:]=(origIm[:,3454,:]+origIm[:,3456,:])/2
origIm[:,3327,:]=(origIm[:,3326,:]+origIm[:,3328,:])/2
origIm[:,3199,:]=(origIm[:,3198,:]+origIm[:,3200,:])/2
origIm[:,3071,:]=(origIm[:,3070,:]+origIm[:,3072,:])/2
origIm[:,2943,:]=(origIm[:,2942,:]+origIm[:,2944,:])/2
origIm[:,2815,:]=(origIm[:,2814,:]+origIm[:,2816,:])/2
origIm[:,2687,:]=(origIm[:,2686,:]+origIm[:,2688,:])/2
origIm[:,2559,:]=(origIm[:,2558,:]+origIm[:,2560,:])/2
origIm[:,2431,:]=(origIm[:,2430,:]+origIm[:,2432,:])/2
origIm[:,2303,:]=(origIm[:,2302,:]+origIm[:,2304,:])/2
origIm[:,2175,:]=(origIm[:,2174,:]+origIm[:,2176,:])/2
origIm[:,2047,:]=(origIm[:,2046,:]+origIm[:,2048,:])/2
origIm[:,1919,:]=(origIm[:,1918,:]+origIm[:,1920,:])/2
origIm[:,1791,:]=(origIm[:,1790,:]+origIm[:,1792,:])/2
origIm[:,1663,:]=(origIm[:,1662,:]+origIm[:,1664,:])/2
origIm[:,1535,:]=(origIm[:,1534,:]+origIm[:,1536,:])/2
origIm[:,1407,:]=(origIm[:,1406,:]+origIm[:,1408,:])/2
origIm[:,1279,:]=(origIm[:,1278,:]+origIm[:,1280,:])/2
origIm[:,1151,:]=(origIm[:,1150,:]+origIm[:,1152,:])/2
origIm[:,1023,:]=(origIm[:,1022,:]+origIm[:,1024,:])/2
origIm[:,895,:]=(origIm[:,894,:]+origIm[:,896,:])/2
origIm[:,767,:]=(origIm[:,766,:]+origIm[:,768,:])/2
origIm[:,639,:]=(origIm[:,638,:]+origIm[:,640,:])/2
origIm[:,511,:]=(origIm[:,510,:]+origIm[:,512,:])/2
origIm[:,383,:]=(origIm[:,382,:]+origIm[:,384,:])/2
origIm[:,255,:]=(origIm[:,254,:]+origIm[:,256,:])/2
origIm[:,127,:]=(origIm[:,126,:]+origIm[:,128,:])/2
print "firmware lines corrected"
gainMean=gainIm.mean()
gainIm=np.tile([gainIm],(frames,1,1))
origIm=origIm/gains*gainMean
origIm=origIm.reshape(frames,1024,4,1024,4).mean((2,4))
print "gain corrected"
return origIm
def BLFilt(im, frames):
print im.shape
for frame in range(0,frames):
if (frame%20==0):
print "frame = "+str(frame)
startFrame=0+(1024*1024)*frame
endFrame=0+(1024*1024)*(frame+1)
singleFrameIm=im[startFrame:endFrame]
singleFrameIm=singleFrameIm.reshape([1024,1024])
singleFrameIm = np.array(singleFrameIm, dtype = np.float32)
#bilateral filter to remove noise while preserving edges
singleFrameIm=cv2.bilateralFilter(singleFrameIm,50,3500,2000)
singleFrameIm=cv2.bilateralFilter(singleFrameIm,10,1000,2000)
im[startFrame:endFrame]=singleFrameIm.ravel()
print im.shape
return im
def Sob_op(im, frames):
print im.shape
for frame in range(0,frames):
if (frame%20==0):
print "frame = "+str(frame)
startFrame=0+(1024*1024)*frame
endFrame=0+(1024*1024)*(frame+1)
singleFrameIm=im[startFrame:endFrame]
singleFrameIm=singleFrameIm.reshape([1024,1024])
sobel_kernel=np.array([[1,2,1],[0,0,0],[-1,-2,-1]])
sobelSigned = ndimage.convolve(singleFrameIm, sobel_kernel)
sobelAbs=np.absolute(sobelSigned)
sobelAbs2 = np.array(sobelAbs, dtype = np.float32)
sobelAbs2=cv2.bilateralFilter(sobelAbs2,24,5000,5000)
im[startFrame:endFrame]=sobelAbs2.ravel()
print im.shape
return im
for field in range(1,7):#0,len(Fields)
colorChoice=next(colors)
# if (Fields[field]==8 or Fields[field]==20 or Fields[field]==30 or Fields[field]==40):
# continue
print ("field ",Fields[field])
frames=0
if (GBP):
im=np.fromfile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"_Sub_LE.raw", dtype='<i2', sep="") # little endian (<) int 2 (i2) bytes = 16 bit signed int
frames=len(im)/pixels
im=gainsBadPix(im,gains,dark,frames)
print "gain and bad pix corrected"
print ("dtype",im.dtype)
im.tofile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorr"+"_Sub_LE.raw")
continue
#iterate over frames 22 - 41 for 8MU
if BLF:
im=np.fromfile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorr"+"_Sub_LE.raw", dtype='float64', sep="") # little endian (<) int 2 (i2) bytes = 16 bit signed int
frames=len(im)/1024/1024
im=BLFilt(im, frames)
im.tofile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBL"+"_Sub_LE.raw")
continue
else:
im=np.fromfile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBL"+"_Sub_LE.raw", dtype='float64', sep="")
print "loaded Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBL"+"_Sub_LE.raw"
frames=len(im)/1024/1024
# if SOB:
# im=np.fromfile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBL"+"_Sub_LE.raw", dtype='float64', sep="") # little endian (<) int 2 (i2) bytes = 16 bit signed int
# frames=len(im)/1024/1024
# im=Sob_op(im, frames)
# im.tofile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBLSOB"+"_Sub_LE.raw")
# continue
# else:
# im=np.fromfile("Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBLSOB"+"_Sub_LE.raw", dtype='float64', sep="")
# print "loaded Z_plan_vmat_E_6_MU_"+str(Fields[field])+"GBPcorrBLSOB"+"_Sub_LE.raw"
# frames=len(im)/1024/1024
nLeaves=18
leaf_pos_dict={}
leaf_pos_error_dict={}
leaf_n_points_dict={}
for leaf in range(0,nLeaves):
leaf_pos_dict[leaf]=np.array([])
leaf_pos_error_dict[leaf]=np.array([])
leaf_n_points_dict[leaf]=np.array([])
for frame in range(0,frames):
if (frame%15==0):
print "frame = "+str(frame)
startFrame=0+(1024*1024)*frame
endFrame=0+(1024*1024)*(frame+1)
singleFrameIm=im[startFrame:endFrame]
singleFrameIm=singleFrameIm.reshape([1024,1024])
sobel_kernel=np.array([[1,2,1],[0,0,0],[-1,-2,-1]])
sobelSigned = ndimage.convolve(singleFrameIm, sobel_kernel)
sobelAbs=np.absolute(sobelSigned)
sobelAbs2 = np.array(sobelAbs, dtype = np.float32)
sobelAbs2=cv2.bilateralFilter(sobelAbs2,24,5000,5000)
#iterate over each leaf in a frame
start=54 #196/4
leafWidth=53
nSlicesUsed=25
for leaf in range(0,nLeaves):
#choose y slices to use for each leaf
sliceStart=start+leaf*leafWidth
sliceEnd=sliceStart+nSlicesUsed
# print "start="+str(sliceStart*4)+" end="+str(sliceEnd*4)
sobelSlice=sobelAbs2[...,sliceStart:sliceEnd]
sobelSlice2=sobelAbs2[...,sliceStart:sliceEnd]
singleFrameImSlice=singleFrameIm[...,sliceStart:sliceEnd]
sobelSlice=sobelSlice.mean(axis=1,keepdims=True)
sobelSlice2=sobelSlice2.mean(axis=1,keepdims=True)
singleFrameImSlice=singleFrameImSlice.mean(axis=1,keepdims=True)
std_Slice=std_file[...,sliceStart:sliceEnd]
# print std_Slice.mean()
# std_Slice=std_Slice*std_Slice
# std_Slice=std_Slice.sum(axis=1,keepdims=True)
# std_Slice=np.sqrt(std_Slice)
std_Slice=std_file[...,10:11]*np.sqrt(nSlicesUsed)/nSlicesUsed
# print std_Slice.mean()
# raw_input("pause")
sobelSliceSigned=sobelSigned[...,sliceStart:sliceEnd]
sobelSliceSigned=sobelSliceSigned.mean(axis=1,keepdims=True)
#turn back to true 1D array
sobelSlice=sobelSlice.ravel()
sobelSlice2=sobelSlice2.ravel()
sobelSliceSigned=sobelSliceSigned.ravel()
std_Slice=std_Slice.ravel()
singleFrameImSlice=singleFrameImSlice.ravel()
xDat=np.arange(sobelSlice.size)
#peak finder
indices = find_peaks_cwt(sobelSlice, np.arange(50,80), noise_perc=10) #np.arange(50,80) range of widths of peaks
# print("indices",indices)
if indices.size==0:
print "continuing"
continue
# print "here"
if (plotIt and leaf==10):
h1=plt.subplot(2, 1, 1)
plt.errorbar(xDat,sobelSlice,yerr=std_Slice, color='g')
plt.errorbar(xDat,sobelSlice2,yerr=std_Slice, color='m')
# h1=plt.errorbar(xDat,singleFrameImSlice,yerr=std_Slice, color='k')
# plt.errorbar(xDat,singleFrameImSlice,yerr=std_Slice)
plt.plot(indices, sobelSlice[indices],'r^')
plt.pause(0.001)
raw_input(leaf)
for ind in range(0,indices.size):
mean=indices[ind]
pos_or_neg=sobelSliceSigned[int(np.floor(mean))]
rangePoN=5
if (mean>(1024-rangePoN)or mean<rangePoN):
pos_or_neg=0
else:
for pon in range(0,5):
pos_or_neg+=sobelSliceSigned[int(np.floor(mean))+pon]+sobelSliceSigned[int(np.floor(mean))-pon]
if (pos_or_neg>0): #left leaf
lowerWindow=28
upperWindow=20
else:
lowerWindow=14
upperWindow=28
if (mean-lowerWindow<0):
lower=0
else:
lower=mean-lowerWindow
if (mean+upperWindow>1024):
upper=1024
else:
upper=mean+upperWindow
xDatSub=xDat[lower:upper]
sobelSliceSub=sobelSlice[lower:upper]
sigmaParam=5
windowSize=len(xDatSub)
sigmaArray=np.ones(windowSize)*15
try :
popt,pcov = curve_fit(gaus,xDatSub,sobelSliceSub,p0=[700,mean,sigmaParam],sigma=sigmaArray)
except:
popt=[0,0,0]
pcov=0
# print "index="+str(ind)
# print "mean="+str(mean)
# print ('popt',popt)
if(abs(popt[2])<70 and abs(popt[2])>10 and abs(popt[0])>200): #
# print ("popt",popt[1])
if(plotIt and leaf==10 ):
plt.plot(xDatSub,gaus(xDatSub,*popt),'bo:',label='fit')
plt.pause(0.001)
raw_input(str(frame)+" fit")
# print sobelSliceSigned[int(np.floor(popt[1]))]
# print sobelSliceSigned[int(np.floor(popt[1]))+1]
# print sobelSliceSigned[int(np.floor(popt[1]))-1]
pos_or_neg=sobelSliceSigned[int(np.floor(popt[1]))]
for pon in range(0,10):
pos_or_neg+=sobelSliceSigned[int(np.floor(popt[1]))+pon]+sobelSliceSigned[int(np.floor(popt[1]))-pon]
# print "frame"+str(frame)+" pos "+str(0.0145*4*popt[1]*distanceCalibration/10)
if (pos_or_neg>0): #for z plan leaves
# if leaf==1:
# print popt[1]
# print 0.0145*4*popt[1]
# leaf_pos_dict[leaf]=np.append(leaf_pos_dict[leaf],0.0145*4*popt[1]*distanceCalibration/10.0) #*0.0145 for mm
# leaf_pos_error_dict[leaf]=np.append(leaf_pos_error_dict[leaf],0.0145*4*pcov[1,1]*distanceCalibration/10)
# leaf_n_points_dict[leaf]=np.append(leaf_n_points_dict[leaf],frame*0.07692) #s *0.023 for seconds
leaf_pos_dict[leaf]=np.append(leaf_pos_dict[leaf],(0.0145*4*popt[1]*distanceCalibration/10.0)) #*0.0145 for mm
leaf_pos_error_dict[leaf]=np.append(leaf_pos_error_dict[leaf],0.0145*4*pcov[1,1]*distanceCalibration/10)
if (field==1):
leaf_n_points_dict[leaf]=np.append(leaf_n_points_dict[leaf],(frame*0.07692)+0.12) #s *0.023 for seconds
else:
leaf_n_points_dict[leaf]=np.append(leaf_n_points_dict[leaf],frame*0.07692) #s *0.023 for seconds
# print ("frame:",frame,"leaf:",leaf,"error on mean",pcov[1,1])
# h1.cla()
leaf_of_choice=9
label_str=str(Fields[field])+"MU"
# print leaf_pos_dict[1]
# ax.plot(leaf_n_points_dict[leaf_of_choice], leaf_pos_dict[leaf_of_choice], marker='.', color=next(colors),label=label_str,linestyle="None")
ax.errorbar(leaf_n_points_dict[leaf_of_choice], leaf_pos_dict[leaf_of_choice], marker='.', color=colorChoice,label=label_str,linestyle="None",yerr=leaf_pos_error_dict[leaf_of_choice])
ax.legend(loc='lower right',numpoints=1)
plt.xlabel("Time (s)")
plt.ylabel("Position (cm)")
plt.pause(0.01)
# plt.savefig("LeafTrajectoriesNo9WithFitLines.png")
# raw_input("continue")
plotSpeeds=False
plotTrajectories=True
speeds_array=np.array([])
leaves_array = np.array([])
speeds_array_sigma=np.array([])
if plotTrajectories:
for leaf in range(0,nLeaves):
grad=1
cept=0
try :
popt2,pcov2 = curve_fit(line,leaf_n_points_dict[leaf],leaf_pos_dict[leaf],sigma=leaf_pos_error_dict[leaf],p0=[grad, cept])
except:
popt2=[0,0]
pcov2=[[0,0],[0,0]]
# leaf_gradients_dict[leaf]=popt2[0]
leaves_array=np.append(leaves_array,leaf+1)
speeds_array=np.append(speeds_array,popt2[0])
speeds_array_sigma=np.append(speeds_array_sigma,pcov2[0,0])
if Fields[field]==10:
xDat2=np.arange(frames+2)
elif Fields[field]==40:
xDat2=np.arange(math.ceil(frames*1.43))
else:
xDat2=np.arange(frames)
# xDat2=np.arange(int(leaf_n_points_dict[nLeaves-1])+1)
xDat2=xDat2*0.07692
if leaf==leaf_of_choice:
fig1=plt.figure(1)
plot1=plt.plot(xDat2,line(xDat2,*popt2),color=colorChoice)#,label=label_str+'fit'
plt.pause(0.01)
# plt.plot(leaf_n_points_dict[leaf],leaf_pos_dict[leaf],'go')
speeds_array_STD=np.sqrt(speeds_array_sigma)
# print leaves_array.size
# print speeds_array.size
# print speeds_array_STD.size
# plt.plot(leaves_array,speeds_array,'go')
# plt.figure(1)
# plt.errorbar(leaves_array, speeds_array,fmt='b.',yerr=speeds_array_STD)
# plt.xlabel("leaf number")
# plt.ylabel("leaf speed (mm$s^-1$)")
# plt.pause(0.001)
# plt.savefig(str(Fields[field])+"MU_leafVelocities.png")
# plt.close()
fig2=plt.figure(2)
er2=ax2.errorbar(leaves_array, speeds_array,fmt='b.',yerr=speeds_array_STD,label=label_str,color=colorChoice)
ax2.legend(loc='upper left',numpoints=1)
ax2.set_xlim(0,18.9)
ax2.set_ylim(0,4.1)
plt.xlabel("Leaf number")
plt.ylabel("Leaf speed (cms$^{-1}$)")
plt.pause(0.01)
avSpeedLineBank=(speeds_array[0]+speeds_array[1]+speeds_array[2]+speeds_array[15]+speeds_array[16]+speeds_array[17])/6
speeds_array=speeds_array/avSpeedLineBank
speeds_array_STD=speeds_array_STD/avSpeedLineBank
fig3=plt.figure(3)
er2=ax3.errorbar(leaves_array, speeds_array,fmt='b.',yerr=speeds_array_STD,label=label_str,color=colorChoice)
ax3.legend(loc='upper left',numpoints=1)
ax3.set_xlim(0,18.9)
ax3.set_ylim(0,1.8)
plt.ylabel("Normalised leaf speed (cms$^{-1}$)")
plt.pause(0.01)
# raw_input('hi')
# plt.figure(2)
fig3.savefig("All_leafVelocities_norm.png")
fig2.savefig("All_leafVelocities.png")
fig1.savefig("LeafTrajectoriesNo9WithFitLinesAfter.png")
# # singleFrameIm.tofile("InputIm4096.raw", sep="")
# # outputFilt1.tofile("Filter1.raw", sep="")
# # outputFilt2.tofile("Filter2.raw", sep="")
# # # sobel.tofile("FilterSobel1.raw", sep="")
# # # sobel2.tofile("FilterSobel2.raw", sep="")
# # sobelAbs.tofile("FilterSobelAbs.raw", sep="")
raw_input("Press enter to continue")
|
import pyglet
from pyglet import resource
pyglet.resource.path = ['resources']
pyglet.resource.reindex()
backgroundImage = pyglet.resource.image('background.png')
cursorImage = pyglet.resource.image('cursor.png')
cursorAltImage = pyglet.resource.image('cursor_alt.png')
pauseImage = pyglet.resource.image('pause.png')
resumeImage = pyglet.resource.image('resume.png')
moonImage = pyglet.resource.image('moon.png')
planetImage = pyglet.resource.image('planet.png')
starImage = pyglet.resource.image('star.png')
holeImage = pyglet.resource.image('hole.png')
sizeImage = pyglet.resource.image('size.png')
noImage = pyglet.resource.image('noImage.png')
moonSelectedImage = pyglet.resource.image('moon_selected.png')
planetSelectedImage = pyglet.resource.image('planet_selected.png')
starSelectedImage = pyglet.resource.image('star_selected.png')
holeSelectedImage = pyglet.resource.image('hole_selected.png')
def CenterImage(image):
image.anchor_x = image.width // 2
image.anchor_y = image.height // 2
CenterImage(moonImage)
CenterImage(planetImage)
CenterImage(starImage)
CenterImage(holeImage) |
"""
Program name: red_color_segment_1 .py
Objective: Draw the primary red wedge. Darkest at center and getting
lighter as the wedge widens.
Keywords: canvas, red wedge, color
============================================================================79
Explanation: Establish the geometry for making color wheel segements.
Author: Mike Ohlson de Fine
"""
# red_color_segment_1.py
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
from Tkinter import *
root = Tk()
root.title("Red color wedge")
cw = 240 # canvas width
ch = 220 # canvas height
chart_1 = Canvas(root, width=cw, height=ch, background="white")
chart_1.grid(row=0, column=0)
theta_deg = 0.0
x_orig = 100
y_orig = 200
x_width = 80
y_hite = 180
xy0 = [x_orig, y_orig]
xy1 = [x_orig - x_width, y_orig - y_hite]
xy2 = [x_orig + x_width, y_orig - y_hite ]
wedge =[ xy0, xy1 , xy2 ]
width= 40 #standard disk diameter
hite = 80 # median wedge height.
hFac = [0.25, 0.45, 0.75, 1.2, 1.63, 1.87, 2.05] # Radial factors
wFac = [ 0.2, 0.36, 0.6, 1.0, 0.5, 0.3, 0.25] # disk diameter factors
# Color list. Elements increasing in darkness.
kulaRed = ["#000000","#6e0000","#a00000","#ff0000",\
"#ff5050", "#ff8c8c", "#ffc8c8", "#440000" ]
kula = kulaRed
wedge =[ xy0, xy1 , xy2 ] # black background
chart_1.create_polygon(wedge,fill=kula[0])
x_width = 40 # dark red wedge
y_hite = 160
xy1 = [x_orig - x_width, y_orig - y_hite]
xy2 = [x_orig + x_width, y_orig - y_hite ]
wedge =[ xy0, xy1 , xy2 ]
chart_1.create_polygon(wedge,fill=kula[1])
for i in range(0, 7): # red disks
x0_disk = xy0[0] - width * wFac[i]/2 # bottom left
y0_disk = xy0[1] - hite * hFac[i] + width * wFac[i]/2
xya = [x0_disk, y0_disk] # BOTTOM LEFT
x1_disk = xy0[0] + width * wFac[i]/2 # top right
y1_disk = xy0[1] - hite * hFac[i] - width * wFac[i]/2
xyb = [x1_disk, y1_disk] #TOP RIGHT
chart_1.create_oval(xya ,xyb , fill=kula[i], outline=kula[i])
root.mainloop()
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
import vstruct
from vstruct.primitives import *
class Elf32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_ident = v_bytes(16)
self.e_type = v_uint16()
self.e_machine = v_uint16()
self.e_version = v_uint32()
self.e_entry = v_uint32()
self.e_phoff = v_uint32()
self.e_shoff = v_uint32()
self.e_flags = v_uint32()
self.e_ehsize = v_uint16()
self.e_phentsize = v_uint16()
self.e_phnum = v_uint16()
self.e_shentsize = v_uint16()
self.e_shnum = v_uint16()
self.e_shstrndx = v_uint16()
class Elf32Dynamic(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.d_tag = v_uint32()
self.d_value = v_uint32()
class Elf32Reloc(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.r_offset = v_ptr()
self.r_info = v_uint32()
class Elf32Symbol(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.st_name = v_uint32()
self.st_value = v_uint32()
self.st_size = v_uint32()
self.st_info = v_uint8()
self.st_other = v_uint8()
self.st_shndx = v_uint16()
# def Elf64Symbol
|
from __future__ import print_function, unicode_literals
import re, time, collections
from twisted.python import log
from twisted.internet import protocol
from twisted.application import service
SECONDS = 1.0
MINUTE = 60*SECONDS
HOUR = 60*MINUTE
DAY = 24*HOUR
MB = 1000*1000
def round_to(size, coarseness):
return int(coarseness*(1+int((size-1)/coarseness)))
def blur_size(size):
if size == 0:
return 0
if size < 1e6:
return round_to(size, 10e3)
if size < 1e9:
return round_to(size, 1e6)
return round_to(size, 100e6)
class TransitConnection(protocol.Protocol):
def __init__(self):
self._got_token = False
self._got_side = False
self._token_buffer = b""
self._sent_ok = False
self._buddy = None
self._had_buddy = False
self._total_sent = 0
def describeToken(self):
d = "-"
if self._got_token:
d = self._got_token[:16].decode("ascii")
if self._got_side:
d += "-" + self._got_side.decode("ascii")
else:
d += "-<unsided>"
return d
def connectionMade(self):
self._started = time.time()
self._log_requests = self.factory._log_requests
def dataReceived(self, data):
if self._sent_ok:
# We are an IPushProducer to our buddy's IConsumer, so they'll
# throttle us (by calling pauseProducing()) when their outbound
# buffer is full (e.g. when their downstream pipe is full). In
# practice, this buffers about 10MB per connection, after which
# point the sender will only transmit data as fast as the
# receiver can handle it.
self._total_sent += len(data)
self._buddy.transport.write(data)
return
if self._got_token: # but not yet sent_ok
self.transport.write(b"impatient\n")
if self._log_requests:
log.msg("transit impatience failure")
return self.disconnect() # impatience yields failure
# else this should be (part of) the token
self._token_buffer += data
buf = self._token_buffer
# old: "please relay {64}\n"
# new: "please relay {64} for side {16}\n"
(old, handshake_len, token) = self._check_old_handshake(buf)
assert old in ("yes", "waiting", "no")
if old == "yes":
# remember they aren't supposed to send anything past their
# handshake until we've said go
if len(buf) > handshake_len:
self.transport.write(b"impatient\n")
if self._log_requests:
log.msg("transit impatience failure")
return self.disconnect() # impatience yields failure
return self._got_handshake(token, None)
(new, handshake_len, token, side) = self._check_new_handshake(buf)
assert new in ("yes", "waiting", "no")
if new == "yes":
if len(buf) > handshake_len:
self.transport.write(b"impatient\n")
if self._log_requests:
log.msg("transit impatience failure")
return self.disconnect() # impatience yields failure
return self._got_handshake(token, side)
if (old == "no" and new == "no"):
self.transport.write(b"bad handshake\n")
if self._log_requests:
log.msg("transit handshake failure")
return self.disconnect() # incorrectness yields failure
# else we'll keep waiting
def _check_old_handshake(self, buf):
# old: "please relay {64}\n"
# return ("yes", handshake, token) if buf contains an old-style handshake
# return ("waiting", None, None) if it might eventually contain one
# return ("no", None, None) if it could never contain one
wanted = len("please relay \n")+32*2
if len(buf) < wanted-1 and b"\n" in buf:
return ("no", None, None)
if len(buf) < wanted:
return ("waiting", None, None)
mo = re.search(br"^please relay (\w{64})\n", buf, re.M)
if mo:
token = mo.group(1)
return ("yes", wanted, token)
return ("no", None, None)
def _check_new_handshake(self, buf):
# new: "please relay {64} for side {16}\n"
wanted = len("please relay for side \n")+32*2+8*2
if len(buf) < wanted-1 and b"\n" in buf:
return ("no", None, None, None)
if len(buf) < wanted:
return ("waiting", None, None, None)
mo = re.search(br"^please relay (\w{64}) for side (\w{16})\n", buf, re.M)
if mo:
token = mo.group(1)
side = mo.group(2)
return ("yes", wanted, token, side)
return ("no", None, None, None)
def _got_handshake(self, token, side):
self._got_token = token
self._got_side = side
self.factory.connection_got_token(token, side, self)
def buddy_connected(self, them):
self._buddy = them
self._had_buddy = True
self.transport.write(b"ok\n")
self._sent_ok = True
# Connect the two as a producer/consumer pair. We use streaming=True,
# so this expects the IPushProducer interface, and uses
# pauseProducing() to throttle, and resumeProducing() to unthrottle.
self._buddy.transport.registerProducer(self.transport, True)
# The Transit object calls buddy_connected() on both protocols, so
# there will be two producer/consumer pairs.
def buddy_disconnected(self):
if self._log_requests:
log.msg("buddy_disconnected %s" % self.describeToken())
self._buddy = None
self.transport.loseConnection()
def connectionLost(self, reason):
if self._buddy:
self._buddy.buddy_disconnected()
self.factory.transitFinished(self, self._got_token, self._got_side,
self.describeToken())
# Record usage. There are four cases:
# * 1: we connected, never had a buddy
# * 2: we connected first, we disconnect before the buddy
# * 3: we connected first, buddy disconnects first
# * 4: buddy connected first, we disconnect before buddy
# * 5: buddy connected first, buddy disconnects first
# whoever disconnects first gets to write the usage record (1,2,4)
finished = time.time()
if not self._had_buddy: # 1
total_time = finished - self._started
self.factory.recordUsage(self._started, "lonely", 0,
total_time, None)
if self._had_buddy and self._buddy: # 2,4
total_bytes = self._total_sent + self._buddy._total_sent
starts = [self._started, self._buddy._started]
total_time = finished - min(starts)
waiting_time = max(starts) - min(starts)
self.factory.recordUsage(self._started, "happy", total_bytes,
total_time, waiting_time)
def disconnect(self):
self.transport.loseConnection()
self.factory.transitFailed(self)
finished = time.time()
total_time = finished - self._started
self.factory.recordUsage(self._started, "errory", 0,
total_time, None)
class Transit(protocol.ServerFactory, service.MultiService):
# I manage pairs of simultaneous connections to a secondary TCP port,
# both forwarded to the other. Clients must begin each connection with
# "please relay TOKEN for SIDE\n" (or a legacy form without the "for
# SIDE"). Two connections match if they use the same TOKEN and have
# different SIDEs (the redundant connections are dropped when a match is
# made). Legacy connections match any with the same TOKEN, ignoring SIDE
# (so two legacy connections will match each other).
# I will send "ok\n" when the matching connection is established, or
# disconnect if no matching connection is made within MAX_WAIT_TIME
# seconds. I will disconnect if you send data before the "ok\n". All data
# you get after the "ok\n" will be from the other side. You will not
# receive "ok\n" until the other side has also connected and submitted a
# matching token (and differing SIDE).
# In addition, the connections will be dropped after MAXLENGTH bytes have
# been sent by either side, or MAXTIME seconds have elapsed after the
# matching connections were established. A future API will reveal these
# limits to clients instead of causing mysterious spontaneous failures.
# These relay connections are not half-closeable (unlike full TCP
# connections, applications will not receive any data after half-closing
# their outgoing side). Applications must negotiate shutdown with their
# peer and not close the connection until all data has finished
# transferring in both directions. Applications which only need to send
# data in one direction can use close() as usual.
MAX_WAIT_TIME = 30*SECONDS
MAXLENGTH = 10*MB
MAXTIME = 60*SECONDS
protocol = TransitConnection
def __init__(self, db, blur_usage):
service.MultiService.__init__(self)
self._db = db
self._blur_usage = blur_usage
self._log_requests = blur_usage is None
self._pending_requests = {} # token -> set((side, TransitConnection))
self._active_connections = set() # TransitConnection
self._counts = collections.defaultdict(int)
self._count_bytes = 0
def connection_got_token(self, token, new_side, new_tc):
if token not in self._pending_requests:
self._pending_requests[token] = set()
potentials = self._pending_requests[token]
for old in potentials:
(old_side, old_tc) = old
if ((old_side is None)
or (new_side is None)
or (old_side != new_side)):
# we found a match
if self._log_requests:
log.msg("transit relay 2: %s" % new_tc.describeToken())
# drop and stop tracking the rest
potentials.remove(old)
for (_, leftover_tc) in potentials:
leftover_tc.disconnect() # TODO: not "errory"?
self._pending_requests.pop(token)
# glue the two ends together
self._active_connections.add(new_tc)
self._active_connections.add(old_tc)
new_tc.buddy_connected(old_tc)
old_tc.buddy_connected(new_tc)
return
if self._log_requests:
log.msg("transit relay 1: %s" % new_tc.describeToken())
potentials.add((new_side, new_tc))
# TODO: timer
def recordUsage(self, started, result, total_bytes,
total_time, waiting_time):
if self._log_requests:
log.msg("Transit.recordUsage (%dB)" % total_bytes)
if self._blur_usage:
started = self._blur_usage * (started // self._blur_usage)
total_bytes = blur_size(total_bytes)
self._db.execute("INSERT INTO `transit_usage`"
" (`started`, `total_time`, `waiting_time`,"
" `total_bytes`, `result`)"
" VALUES (?,?,?, ?,?)",
(started, total_time, waiting_time,
total_bytes, result))
self._db.commit()
self._counts[result] += 1
self._count_bytes += total_bytes
def transitFinished(self, tc, token, side, description):
if token in self._pending_requests:
side_tc = (side, tc)
if side_tc in self._pending_requests[token]:
self._pending_requests[token].remove(side_tc)
if not self._pending_requests[token]: # set is now empty
del self._pending_requests[token]
if self._log_requests:
log.msg("transitFinished %s" % (description,))
self._active_connections.discard(tc)
def transitFailed(self, p):
if self._log_requests:
log.msg("transitFailed %r" % p)
pass
def get_stats(self):
stats = {}
def q(query, values=()):
row = self._db.execute(query, values).fetchone()
return list(row.values())[0]
# current status: expected to be zero most of the time
c = stats["active"] = {}
c["connected"] = len(self._active_connections) / 2
c["waiting"] = len(self._pending_requests)
# usage since last reboot
rb = stats["since_reboot"] = {}
rb["bytes"] = self._count_bytes
rb["total"] = sum(self._counts.values(), 0)
rbm = rb["moods"] = {}
for result, count in self._counts.items():
rbm[result] = count
# historical usage (all-time)
u = stats["all_time"] = {}
u["total"] = q("SELECT COUNT() FROM `transit_usage`")
u["bytes"] = q("SELECT SUM(`total_bytes`) FROM `transit_usage`") or 0
um = u["moods"] = {}
um["happy"] = q("SELECT COUNT() FROM `transit_usage`"
" WHERE `result`='happy'")
um["lonely"] = q("SELECT COUNT() FROM `transit_usage`"
" WHERE `result`='lonely'")
um["errory"] = q("SELECT COUNT() FROM `transit_usage`"
" WHERE `result`='errory'")
return stats
|
# Faça um algoritimo que calcule 5% de desconto
n1 = float(input('Qual o valor original do produto ? '))
pc = float(n1 * 0.05)
rs = float(n1 - pc)
print('O valor do produto com desconto seria de R${:.2f}'.format(rs))
# Podendo tambem ser feito da seguinte forma
# preço = float(input('Qual o valor original do produto ? '))
# novo = preço - (preço * 5 / 100)
|
from __future__ import absolute_import
from __future__ import print_function
import tensorflow.python.platform
import collections
import math
import numpy as np
import os
import random
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
import zipfile
import urllib2
from wikipedia import get_all_wiki_files_as_list
from datagetter import *
import csv
import scipy.stats as stats
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # MODEL PARAMETERS # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DEFINES THE RUN ID, HOW VARIABLES WILL BE CREATED
# MEMOIZED, AND STORED. MUST BE UNIQUE BETWEEN RUNS
# WHERE DATA OVERLAP IS NOT 100%. ALLOWS US TO DO
# REPEATED EVALUATION OVER PRECOMPUTED DATA.
run_id_label = 'friday-6pm'
# ASSIGNS WHETHER WE ARE IN TEST MODE OR TRAINING MODE
# IN ORDER TO DO THE CORRECT IO.
TESTING_FLAG = False
# VARIABLES THAT WERE OPTIMIZED FOR THEIR VALUES,
# THESE WERE THE ONES THAT WERE USED FOR THE FINAL
# RESULT, BUT THEY WERE CONSTANTLY MANIPULATED.
vocabulary_size = 8000 # Number of words to create embeddings for
num_steps = 6000001 # Number of steps to train over
batch_size = 128 # Size of single step batch
embedding_size = 128 # Embedding Vector Dimension
skip_window = 2 # Size of the Skip-Gram Window
num_skips = 2 # Input Reuse for a Label
# Path values that are determined by the run_id_label,
# which may be used to store intermediate results.
embedding_root = '/home/u/fall12/gward/Desktop/AI/running/embeddings/'
embedding_path = embedding_root + run_id_label + ".npy"
dictionary_path = embedding_root + run_id_label + "-dict.csv"
result_path = embedding_root + run_id_label + "-TRAIN.tsv"
if TESTING_FLAG:
result_path = embedding_root + run_id_label + "-TEST.tsv"
# Validation set size for training, was not modified.
valid_size = 30
valid_window = 100
# Allows the scope of these variables to be flexible, allowing us to
# generate them if they do not yet exist, or to go off of a memoized
# version if they do exist.
final_embeddings = None
dictionary = dict()
reverse_dictionary = dict()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # DICTIONARY GENERATION # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Checks to see if we have run this computation beforehand, and
# uses the precomputed value if we have. Note that this is contingent
# upon run_id_label being the same across multiple runs of the python file.
if os.path.isfile(dictionary_path):
csv_reader = csv.reader(open(dictionary_path))
print(csv_reader)
for key, val in csv_reader:
dictionary[key] = val
else:
# Defines the corpus texts that were generated through the training set.
# Then gets them as a cohesive, monolithic list of words. Very Large.
words = get_all_wiki_files_as_list()
# Constructs a dictionary out of the wiki articles, which maps every word
# in its size to an integer which is the key for the embedding. The dict
# ionary generated through this step will be written to disk to allow us
# to skip this step in the future.
def build_dataset(words):
count = [['UNK', -1]]
# Counts the words in the list and makes a dictionary of the most common.
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
# Goes through corpus text and replaces words with numerical translations
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # the word is not defined in our dictionary, as it is not of high enough frequency.
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
# Creates a translation between word number and the word it stands for.
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
# Builds our dataset.
data, count, dictionary, reverse_dictionary = build_dataset(words)
# Writes the CSV for the dictionary to disk so that we don't have to rebuild if we run again.
w = csv.writer(open(dictionary_path, "w"))
for key, val in dictionary.items():
w.writerow([key, val])
data_index = 0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # EMBEDDING TRAINING # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# if the embeddings already exist, load them using numpy instead of re-training them.
if os.path.isfile(embedding_path):
final_embeddings = np.load(embedding_path)
else:
# Utilize a training model directly out of the Tensor Flow Examples section.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
# Generates a batch to train on.
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
# Building and training a skip-gram model
valid_examples = np.array(random.sample(np.arange(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample during each training phase.
# Defines the Tensorflow skip-gram Model.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Construct the variables.
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Trains the tensor flow model.
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
tf.initialize_all_variables().run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 1000 == 0:
if step > 0:
average_loss = average_loss / 1000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, "/",num_steps,": ", average_loss)
average_loss = 0
# Updates the user on the progress of the training model with qualative examples
if step % 10000 == 0:
np.save(embedding_path, normalized_embeddings.eval())
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Saves the embeddings so that we do not have to recalculate them so frequently.
np.save(embedding_path, final_embeddings)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # EMBEDDING # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Embeds a piece of text using the embeddings that were calculated
def embed(text):
l = text.split(" ")
encoding = np.ndarray(shape=(1, vocabulary_size), dtype=np.int32)
for i in range(0, len(encoding[0])):
encoding[0,i] = 0
for word in l:
index = dictionary.get(word)
if index != None:
encoding[0, index] = encoding[0, index] + 1;
result = np.dot(encoding, final_embeddings)
return result
# Flattens an 1xN vector into a N vector.
def flatten(a):
b = np.ndarray(shape=(len(a[0])), dtype=np.float64)
for i in range(0, len(a[0])):
b[i] = a[0,i]
return b
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # PREDICTION # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# A list to store all of our confidence values so that we can easily normalize them
# on a scale of 1 to 100.
all_confidences = list()
# An ad-hoc function to estimate a raw confidence value given a list of scores.
def raw_confidence(scores):
scores = sorted(scores)
dim = (scores[1] * scores[1] + scores[2] * scores[2] + scores[3] * scores[3] + .01)/4
raw = pow(2, scores[0] / dim)
all_confidences.append(raw)
return raw
# Utilizes all of the confidence values computed to compute normalized confidence.
def scaled_confidence(score):
sorted_conf = sorted(all_confidences)
for i in range(0, len(all_confidences)):
if sorted_conf[i] > score:
return (i * 100.0) / len(all_confidences)
# Embed questions and answers, and compute the dot-product between them.
def predict(question, answers):
# Embeds the questions and answers
equestion = flatten(embed(question))
eanswers = dict()
for a in answers.keys():
eanswers[a] = flatten(embed(answers[a]))
# Finds the best dot-product score out of all of the potential answers.
best_score = -10000000;
best_ans = 0;
scores = list()
for a in answers.keys():
score = np.dot(equestion, eanswers[a])
scores.append(score)
if (score > best_score):
best_score = score
best_ans = a
scores = sorted(scores)
raw_conf = raw_confidence(scores)
return (best_ans, raw_conf)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # EVALUATION # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
n_correct = 0
n_incorrect = 0
all_results = list()
# Gets one of the datasets to evaluate against.
lines = get_training_lines()
if TESTING_FLAG:
lines = get_testing_lines()
# Predicts each one of the values, calculates a confidence value and prints it out, along with whether or not
# it was right.
for tsv_line in lines:
if (n_correct + n_incorrect) % 50 == 1:
print("Making Progress! - ", (n_correct + n_incorrect),' - ' , (n_correct * 100.0)/(n_correct + n_incorrect))
result = dict()
question_id = get_id(tsv_line)
result["id"] = question_id
question = get_question(tsv_line)
if TESTING_FLAG:
answers = get_answers_t(tsv_line)
else:
answers = get_answers(tsv_line)
correct_answer = get_correct_answer(tsv_line)
(prediction, conf) = predict(question, answers)
result["predict"] = prediction
result["raw"] = conf
all_results.append(result)
if correct_answer == prediction:
n_correct = n_correct + 1
else:
n_incorrect = n_incorrect + 1
score = (n_correct * 100.0) / (n_correct + n_incorrect)
print(all_results)
# Calculates the scaled confidence.
for result in all_results:
result["conf"] = scaled_confidence(result["raw"])
# Saves the results of the evaluation.
w = open(result_path, 'w')
for result in all_results:
s = ""+result["id"]+'\t'+result["predict"]+'\t'+str(result["conf"])+"\n"
w.write(s)
w.close()
|
import os
from elasticsearch import Elasticsearch
from cpath import data_path
from crawl import parse_comment
from misc_lib import get_dir_files
def load_all_comments(dir_path):
for comment_path in get_dir_files(dir_path):
yield parse_comment.parse_comments(comment_path)
def load_guardian_uk_comments():
save_dir = os.path.join(data_path, "guardian", "opinion")
topic = "UK"
topic_save_dir = os.path.join(save_dir, topic)
comments_dir = os.path.join(save_dir, "comments")
return load_all_comments(comments_dir)
def insert_uk_comments():
server_name = "gosford.cs.umass.edu"
es = Elasticsearch(server_name)
data = load_guardian_uk_comments()
for comment in data:
print(comment)
r = es.index(index="guardian_comment",body=comment)
def insert_comment_piece():
es = Elasticsearch("localhost")
data = load_guardian_uk_comments()
for comment in data:
r = comment['comments']
short_id = comment['short_id']
for e in r:
head, tail = e
p = {'id':head[0], 'text':head[1], 'dicsussion_id':short_id}
r = es.index(index="guardian_comment_piece", body = p)
for t in tail:
p = {'id':t[0], 'text':t[1], 'discussion_id':short_id}
r = es.index(index="guardian_comment_piece", body=p)
print()
|
# Copyright 2018 Adam Robinson
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import numpy.ctypeslib as ctl
import ctypes
libname = 'axonlib.so'
libdir = './'
lib = ctl.load_library(libname, libdir)
createControllerInstance = lib.createControllerInstance
createControllerInstance.argtypes = [ctypes.c_char_p]
createControllerInstance.restype = ctypes.c_void_p
createChildInstance = lib.createChildInstance
createChildInstance.argtypes = [ctypes.c_char_p]
createChildInstance.restype = ctypes.c_void_p
destroyInstance = lib.destroyInstance
destroyInstance.argtypes = [ctypes.c_void_p]
destroyInstance.restype = None
sendMessage = lib.sendMessage
sendMessage.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int
]
sendMessage.restype = None
recvMessage = lib.recvMessage
recvMessage.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)
]
recvMessage.restype = ctypes.c_char_p
class Axon:
def __init__(self, name, is_child=False):
self.child = is_child
self.name = name
if self.child:
self.c_Axon = createChildInstance(self.name.encode())
else:
self.c_Axon = createControllerInstance(self.name.encode())
def SendMessage(self, _bytes, code, kind):
sendMessage(
self.c_Axon,
ctypes.from_buffer_copy(_bytes),
code,
len(_bytes),
kind
)
def RecvMessage(self):
code = ctypes.c_int()
length = ctypes.c_int()
kind = ctypes.c_int()
_bytes = recvMessage(
self.c_Axon,
ctypes.byref(code),
ctypes.byref(length),
ctypes.byref(kind)
)
bytes_type = ctypes.c_byte * int(length.value)
return _bytes, code, kind
def __del__(self):
if not self.child:
destroyInstance(self.c_Axon)
|
import trimesh
from trimesh import Trimesh, visual
import numpy as np
import time
import os
class Mesh():
"""
Wrapper class for trimesh with custom functions and bookeeping of global mesh
Mostly helper functions, not heavily used at the moment
"""
def __init__(self, mesh):
self.mesh = mesh #the Trimesh object
@property
def z_off(self):
return self._z_off
def visualize_mesh(self, faces_to_colour, vector_origins = [], vector_normals=[], scale=2.0):
""" Debugging plot for visualizing intersecting faces and vectors
Parameters
----------
faces_to_colour : (n,1) array
array of face indexes that need to be coloured differently
vector_origins : (n,3) np.array
set of vector origins to plot
vector_normals : (n,3) np.array
List of normal vectors corresponding to vector_origins
scale: float, optional
Amount to scale the vector normal plot by
"""
mesh = self.mesh.copy()
# unmerge so viewer doesn't smooth
mesh.unmerge_vertices()
# make base_mesh white- ish
mesh.visual.face_colors = [105,105,105,105]
mesh.visual.face_colors[faces_to_colour] = [255, 0, 0, 255]
if vector_origins != [] and vector_normals != []:
# stack rays into line segments for visualization as Path3D
ray_visualize = trimesh.load_path(np.hstack((vector_origins, vector_origins + vector_normals*scale)).reshape(-1, 2, 3))
ray_visualize.merge_vertices()
scene = trimesh.Scene([mesh, ray_visualize])
else:
scene = trimesh.Scene([mesh])
scene.show()
def tow_mesh(tow):
"""Creates mesh from tow coordinates to use for z offset projection
Iterates through points and forms segments from outer points
Parameters
----------
tow : Tow object
Tow object to be converted to mesh
Returns
-------
Trimesh
mesh representation of all points in tow
"""
outer_mesh = Trimesh()
# inner_mesh = Trimesh()
[L1, L2, L3, L4, L5] = tow.new_pts
for i in range(len(tow.new_pts[0]) - 1):
v1 = L5[i] # vertices has to be anticlockwise
v2 = L5[i + 1]
v3 = L4[i + 1]
v4 = L4[i]
v5 = L3[i + 1]
v6 = L3[i]
v7 = L2[i + 1]
v8 = L2[i]
v9 = L1[i + 1]
v10 = L1[i]
outer_mesh_segment = Trimesh(vertices=[v1, v2, v3, v4,v5,v6,v7,v8,v9,v10], faces=[[0,1,2],[2,3,0],
[3,2,4],[3,4,5],
[5,4,6],[6,7,5],
[7,6,8],[8,9,7]])
# inner_mesh_segment = Trimesh(vertices=[v5, v6, v7, v8], faces=[[0, 1, 2, 3]])
if i == 0:
outer_mesh = outer_mesh_segment
else:
outer_mesh = outer_mesh.__add__(outer_mesh_segment)
outer_mesh.merge_vertices()
return outer_mesh
def detect_tow_drop(tow, base_mesh, hash_table):
"""
Overall function for determining z offsets. Predominantly
calls a number of sub functions. Operates by first checking if there
is any intersections of the inner points, extracting those bodies into a
separate mesh and then checking all intersections with this new mesh
Parameters
----------
tow : Tow object
Incoming tow to be laid down
base_mesh : Trimesh
Existing mesh containing all currently laid tows
hash_table: (n,1) integer array
Lookup table of all faces in base mesh, associated to a particular mesh body
"""
# Determine if the inner points of the tow intersect (remove edge tolerance)
tri_index = partial_project_tow(base_mesh, tow)
# If no intersections, then the tows are adjacent or not in contact, so edge overlap is ignored
if len(tri_index) == 0:
return
# If not, determine which tows it intersects with
bodies = identify_tow_bodies(hash_table, tri_index.astype('int32'))
print(bodies)
# Create a new tow mesh to compare
intersect_mesh = gen_intersecting_mesh(base_mesh, bodies)
# Check if inner + outerpoints intersect with relevant tows to account for tow drops
full_project_tow(intersect_mesh, tow)
"""
"""
def gen_intersecting_mesh(base_mesh, bodies):
"""Generates mesh of tows that are intersecting with ray offset
Parameters
----------
base_mesh : Trimesh
Mesh of exisiting tows already laid down
bodies: set(int)
Indexes of bodies intersecting with the new tow
Returns
-------
Trimesh
Subset of base_mesh, containing only the tows from bodies
"""
# Create copy of mesh so to not change any face data
mesh_copy = base_mesh
# Body count should be equivalent to the number of tows - make sure not to merge vertices
body_count = mesh_copy.body_count
# Split mesh modies into individual tow meshes
mesh_bodies = mesh_copy.split(only_watertight=False)
if (len(bodies) is 0):
return Trimesh()
# Based on interesting bodies, create new mesh with only those bodies
intersecting = Trimesh()
for i in bodies:
if intersecting.is_empty:
intersecting = mesh_bodies[i-1]
else:
intersecting = intersecting.__add__(mesh_bodies[i-1])
return intersecting
"""
"""
def identify_tow_bodies(hash_table, tri_index):
"""Identifies tow bodies intersecting with tow based off tri_index and hash table.
Hash table used for constant time lookup.
Return as a set to remove duplicates
Parameters
----------
hash_table : list(int)
lookup table of all faces in the base mesh and their corresponding bodies
Returns
-------
Set(int)
Set of intersecting bodies indexes (no duplicates)
"""
bodies = hash_table[tri_index]
return set(bodies)
"""
"""
def partial_project_tow(base_mesh, tow):
"""Projects just the inner points (exluding edge points)
These values are normally disregarded in edge-edge contact
Parameters
----------
base_mesh: Trimesh
mesh of exisitng tows that have already been laid down
tow : Tow object
incoming tow to be projected down
Returns
-------
np.array(n,1)
array of all intersecting face indexes
"""
tow_normals = tow.new_normals[1:-1,1:-1]
project_origins = tow.projection_origins()[1:-1,1:-1]
project_normals = tow_normals * -1
# Cumulative index of triangles with ray intersectinos. Duplicates allowed
all_tri_index = np.array([], dtype='int32')
# Itterate through to find intersecting triangles. Other data not necessary
for i in range(len(project_origins)):
if len(project_origins[i]) == 0:
continue
try:
tri_index, vec_index = base_mesh.ray.intersects_id(project_origins[i,:], project_normals[i,:], multiple_hits=False)
except:
tow.new_pts = np.array(tow.prev_pts)
tow.get_new_normals()
return partial_project_tow(base_mesh, tow)
all_tri_index = np.append(all_tri_index,tri_index)
return all_tri_index
"""
"""
def full_project_tow(base_mesh, tow):
"""Projects all 5 rows of points against the relevant intersecting mesh
With edge tows removed now, the edge values can be included
Parameters
----------
base_mesh: Trimesh
mesh of exisitng tows that have already been laid down
tow : Tow object
incoming tow to be projected down
Returns
-------
np.array(n,5,3)
array of all offset vector for each tow point (redundant return)
"""
tow_normals = tow.new_normals
# Generate tow points above to project down. Inner=Fallse --> all points returned
project_origins = tow.projection_origins()
project_normals = tow_normals * -1
# Create array to track offsets of each tow point
tow_z_array = np.zeros_like(project_origins)
for i in range(len(project_origins)):
if len(project_origins[i]) == 0:
continue
locations, vec_index, tri_index = base_mesh.ray.intersects_location(project_origins[i,:], project_normals[i,:], multiple_hits=False)
if(len(vec_index) == 0):
return None
offsets = tow_normals[i][vec_index]*tow.t
new_locations = locations + offsets #location of pts after offset
offset_dist = new_locations - tow.new_pts[i][vec_index] # Overall distance to offset from starting pt
# Check offset distance against distance it was projected to check for
# outlier intersections (i.e a cylinder projecting against its inner surface)
error_pts = check_offset_distance(offset_dist, tow.proj_dist)
tow_z_array[i][vec_index] = offset_dist
adjusted_z_array = offset_rule(tow_z_array)
for i in range(len(adjusted_z_array)):
adjusted_off_dist = np.linalg.norm(adjusted_z_array[i], axis=1) #distance of offsets
adjust_pts = np.where(adjusted_off_dist > tow.t/2)[0] #Only adjust pts with non-zero offset
offsets = adjusted_z_array[i][adjust_pts]
tow.new_pts[i][adjust_pts] = tow.new_pts[i][adjust_pts] + offsets #Update tow data with offsets
return tow_z_array
def trim_boundary(tow, boundary_mesh):
"""Given a boundary mesh (must be watertight volume), removes any tow
points that lie outside of this volumes.
Breaks the points into start, middle and end. Where the middle points are rows
that contain all 5 points within the boundary, start and end or the sets of points on
either side. This is so as much of the mesh can be made uniform.
Parameters
----------
tow : Tow object
incoming tow (already adjusted points)
boundary_mesh : Trimesh
Mesh of boundary volume. Must be watertight for contains function to work
"""
start = []
middle = []
end = []
print(len(tow.new_pts[0]))
for i in range(len(tow.new_pts[0])):
in_bounds = boundary_mesh.contains(tow.new_pts[:,i])
if any(in_bounds == False):
if len(middle) == 0:
start.append(i)
else:
end.append(i)
else:
middle.append(i)
if len(middle) <= 1:
return
tow.trimmed_pts["middle"] = tow.new_pts[:,middle].tolist()
# Add inside points to start an end so there is at least one point inside
if len(start) > 0:
start.append(max(start) + 1)
tow.trimmed_pts["start"] = boundary_intersect(tow.new_pts[:,start], boundary_mesh, start_flag=True)
if len(end) > 0:
end.insert(0,min(end) -1)
tow.trimmed_pts["end"] = boundary_intersect(tow.new_pts[:,end], boundary_mesh, start_flag=False)
def boundary_intersect(trim_array, boundary_mesh, start_flag=True):
"""Finds location of points interesecting with the boundary mesh,
such that the trimming goes right up to the boundary.
Parameters
----------
trim_array : np.array((5,n,3))
Point array of section with partial points outside of the boundary mesh
boundary_mesh : Trimesh
Boundary mesh volume
start_flag : boolean
Determines if the section is at the start or end of the tow to determine the
order to insert new points
Returns
-------
np.array(5,n,3)
Updated array of points including boundary intersecting points
"""
trimmed_array = [[],[],[],[],[]]
origins = []
rays = []
start = []
end = []
indexes = []
for i in range(len(trim_array)):
trim = []
in_bound = np.where(boundary_mesh.contains(trim_array[i,:].tolist()) == True)[0]
out_bound = np.where(boundary_mesh.contains(trim_array[i,:].tolist()) == False)[0]
if len(out_bound) == 0 or len(in_bound) == 0:
continue
# Determine whether to insert new points at the start or end of the array.
# Trim array contains one point on either side of the boundary, so create vector between points
if start_flag is True:
start.append(trim_array[i,min(in_bound)])
end.append(trim_array[i,max(out_bound)])
else:
start.append(trim_array[i,max(in_bound)])
end.append(trim_array[i,min(out_bound)])
trimmed_array[i] = trim_array[i][in_bound].tolist()
indexes.append(i)
if len(start) == 0:
return trimmed_array
origins = np.array(start)
rays = (np.array(end)-np.array(start)).tolist()
# Determine intersection locations - project direcetion vector of end points
location, vec, tri = boundary_mesh.ray.intersects_location(origins, rays, multiple_hits=False)
for j in range(len(vec)):
loc = location[j]
i = vec[j]
if start_flag is True:
trimmed_array[i].insert(0,loc.tolist())
else:
trimmed_array[i].append(loc.tolist())
return trimmed_array
"""
"""
def check_offset_distance(row, dist):
"""Sanity check for revolute cases that may project into themselves
Compares the intersection disatnce with the projection difference and ignores
if they are too different
Parameters
----------
row : np.array(5,3)
Single row of points
dist : float
Distance tolerance
Returns
-------
np.array(5,3)
adjusted row points
"""
offset_dist_norm = np.array([np.linalg.norm(i) for i in row])
error_pts = np.where(offset_dist_norm > dist)
row[error_pts] = np.array([0,0,0])
return row
"""
"""
def outliers_rule(z_array):
"""Iterrates through Z array, and if a value does not equal its
surrounding values, will be equated to surrounding values (to avoid random outliers)
Currently loops thorugh, will find more efficient solution later
Parameters
----------
z_array : np.array(5,n,3)
Array of point offsets
Returns
-------
np.array(5,n,3)
adjusted z_array
"""
new_z = z_array.copy()
numerical_error = 0.01
for i in range(len(z_array[0])):
if abs(z_array[1][i][2] - z_array[2][i][2]) < numerical_error and abs(z_array[1][i][2] - z_array[3][i][2])< numerical_error:
new_z[0][i][2] = z_array[2][i][2]
new_z[4][i][2] = z_array[2][i][2]
return new_z
def offset_rule(z_values):
"""Adjusts points to avoid sharp tow drop offs. Adjusts individual points
to be equal to the maximum neighbouring point offset
Parameters
----------
z_values : np.array(5,n,3)
Array of tow point offset vectors
Returns
-------
np.array(5,n,3)
Adjusted z array
"""
offset_z = z_values.copy()
length = len(z_values[0])
# points = np.array(tow.tow_points)
# normals = tow.new_normals
# Define corner point
offset_z[0,0] = max_z((z_values[0,0],z_values[1,0],z_values[0,1]))
offset_z[0,length-1] = max_z((z_values[0,length-1], z_values[1, length-1], z_values[0, length-2]))
offset_z[4, 0] = max_z((z_values[4, 1], z_values[3, 0], z_values[4, 0]))
offset_z[4, length-1] = max_z((z_values[4, length-1], z_values[3,length-1], z_values[4, length-2]))
# Define top edge (without corner points)
for i in range(5):
for j in range(length):
if [i, j] not in [[0, 0], [0, length-1], [4, 0], [4, length-1]]: # not corner points
if i == 0: # top edge
left_z = z_values[0, j-1]
right_z = z_values[0, j+1]
bot_z = z_values[1, j]
current_z = z_values[i, j]
offset_z[i,j] = max_z((left_z, right_z, bot_z, current_z))
elif j == 0: # left edge
top_z = z_values[i - 1, 0]
right_z = z_values[i, 1]
bot_z = z_values[i + 1, 0]
offset_z[i, j] = max_z((top_z, right_z, bot_z))
elif j == length - 1: # right edge
left_z = z_values[i, j - 1]
top_z = z_values[i - 1, j]
bot_z = z_values[i + 1, j]
offset_z[i, j] = max_z((top_z, bot_z, left_z))
elif i == 4: # bot edge
left_z = z_values[4, j - 1]
right_z = z_values[4, j + 1]
top_z = z_values[3, j]
current_z = z_values[i, j]
offset_z[i, j] = max_z((left_z, right_z, top_z, current_z))
else: # mid points
top_z = z_values[i-1, j]
bot_z = z_values[i+1,j]
left_z = z_values[i,j-1]
right_z = z_values[i,j+1]
offset_z[i,j] = max_z((top_z,bot_z,left_z,right_z))
return offset_z
def max_z(vector_lists):
"""Finds the maximum offset magnitude from a list of vectors/arrays
Parameters
----------
vector_lists : np.array(n,3)
List of neighbouring offset vectors
Returns
-------
np,array(1,3)
maximum offset vector
"""
magnitude = []
for i, j in enumerate(vector_lists):
magnitude.append(np.linalg.norm(j))
max_value = max(magnitude)
index = magnitude.index(max_value)
return vector_lists[index]
"""
"""
def adjacent(mesh,face):
"""Finds index's of faces adjacent to $face
Possible REMOVE
Parameters
----------
mesh : Trimesh
Mesh object to query
face : int
index of face to find adjacent faces of
Returns
-------
np.array(n) : int
List of adjacent face indices
"""
faces = []
for a in list(mesh.face_adjacency):
if face in a:
faces.append(a[a!=face][0])
return np.array(faces)
def load_stl(stl_file, dir="stl_files"):
"""loads mesh object from a specified *.stl file
Parameters
----------
stl_file : string
name of stl file
dir : string, optional
name of directory of stl_files (relative to Import.py directory)
Returns
-------
Trimesh
Imported mesh object
"""
file = os.path.join(dir,stl_file)
mesh_file = trimesh.load_mesh(file)
return mesh_file
def transverse_adjust(tow, mesh):
"""If a base mesh is included, projects original points down onto that
mesh to determine new origins. Essential for double curvature surfaces
Parameters
----------
tow : Tow
Tow object to be projected down
mesh : Trimesh
original mesh imported from STL file
"""
normals = tow.new_normals
project_normals = normals *-1
project_origins = tow.projection_origins()
mesh.merge_vertices()
for i in range(len(tow.new_pts)):
# Wrap intersection in try/except clause as revolute surfaces can throw an exception, in which
# case it will revert to the uninterpolated points
try:
locations, vec_index, tri_index = mesh.ray.intersects_location(project_origins[i,:], project_normals[i,:], multiple_hits=False)
except:
tow.new_pts = np.array(tow.prev_pts)
tow.get_new_normals()
return transverse_adjust(tow, mesh)
if len(tri_index) == 0:
print('error: stl file and real tow data are not compatible')
else:
next_pts = np.copy(tow.new_pts[i])
offset = normals[i]*tow.t/2
next_pts[vec_index] = locations
off_dist = np.linalg.norm(next_pts-tow.new_pts[i], axis=1)
outliers = np.where(off_dist > 3*tow.t)
next_pts[outliers] = tow.new_pts[i][outliers]
tow.new_pts[i] = next_pts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.