index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,500 | e19529dce407da0f1e21f6a3696efcefac9ed040 | import pandas as pd
def load_covid():
covid = pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
target = 'new_cases'
date = 'date'
dataset = covid[(covid['location'] == 'World')].copy()[[target, date]]
dataset[date] = pd.to_datetime(dataset[date])
dataset.index = dataset[date]
dataset['month'] = dataset['date'].dt.month
dataset = dataset.drop(columns=['date'])
return {
'target': target,
'dataset': dataset,
}
|
8,501 | d3952306679d5a4dc6765a7afa19ce671ff4c0b4 | """
The :mod:`sklearn.experimental` module provides importable modules that enable
the use of experimental features or estimators.
The features and estimators that are experimental aren't subject to
deprecation cycles. Use them at your own risks!
"""
|
8,502 | bf45349a9fdfcef7392c477e089c5e3916cb4c8e | #!/usr/bin/python
# -*- coding: utf-8 -*-
import base64
import json
import os
import re
import subprocess
import time
import traceback
import zipfile
from datetime import datetime
import requests
from flask import request, current_app
from library.oss import oss_upload_monkey_package_picture
from public_config import TCLOUD_FILE_TEMP_PATH
class ToolBusiness(object):
@classmethod
def get_tool_ip(cls):
ip = request.args.get('ip')
url = 'http://api.map.baidu.com/location/ip'
params = {"ip": ip, "ak": 'kqCYLKt8Uz9VnvHBXA7uOI51FIrei0OM'}
ret = requests.get(url=url, params=params)
ret = json.loads(ret.content)
if ret and 'status' in ret and ret['status'] == 0 and 'content' in ret and 'address' in ret:
return ret['status'], ret['content'], ret['address'], 'ok'
return 101, '', '', '获取失败'
@classmethod
def apk_analysis(cls, apk_download_url, type=1):
try:
# type 1 : not save , 2: save to db
target_path = "/tmp/packages/"
if not os.path.exists(target_path):
os.mkdir(target_path)
date_time_now = datetime.now().strftime('%Y%m%d-%H.%M.%S')
target_name = '{}.apk'.format(date_time_now)
download_apk_name = os.path.join(target_path, target_name)
current_app.logger.info('开始从 {} 下载到 {}'.format(apk_download_url, download_apk_name))
response = requests.get(url=apk_download_url, verify=False)
with open(download_apk_name, 'wb') as f:
f.write(response.content)
time.sleep(0.5)
# 下载失败
if not os.path.exists(download_apk_name):
current_app.logger.error('{} 下载失败!'.format(apk_download_url))
return 102, "下载失败"
current_app.logger.info('下载成功,保存地址 {}'.format(download_apk_name))
current_app.logger.info('开始分析')
package_info_re = re.compile(r"package: name='(.*)' versionCode='(.*)' versionName='(.*?)'.*", re.I)
label_icon_re = re.compile(r"application: label='(.+)'.*icon='(.+)'", re.I)
launchable_activity_re = re.compile(r"launchable-activity: name='(.+)'.*label.*", re.I)
apk_info = {}
cmd = '/usr/local/bin/aapt dump badging {}'.format(download_apk_name)
command_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
infos = command_process.stdout.readlines()
for info in infos:
info = info.decode('utf-8')
if info.startswith('package:'):
temp = package_info_re.search(info)
apk_info['package_name'] = temp.group(1)
apk_info['version_code'] = temp.group(2) or 0
apk_info['version_name'] = temp.group(3)
elif info.startswith('application:'):
temp = label_icon_re.search(info)
apk_info['label'] = temp.group(1)
apk_info['icon'] = temp.group(2)
elif info.startswith('launchable-activity:'):
temp = launchable_activity_re.search(info)
apk_info['default_activity'] = temp.group(1)
try:
size = round(os.path.getsize(download_apk_name) / float(1024 * 1024), 2)
apk_info['size'] = str(size)
zip = zipfile.ZipFile(download_apk_name)
icon_binary = zip.read(apk_info['icon'])
time_now = datetime.now().strftime('%Y%m%d.%H%M%S')
picture = f'monkey-{time_now}.png'
dir_path = f'{TCLOUD_FILE_TEMP_PATH}/monkey'
if not os.path.exists(TCLOUD_FILE_TEMP_PATH):
os.mkdir(TCLOUD_FILE_TEMP_PATH)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
with open(f'{dir_path}/{picture}', 'wb') as f:
f.write(icon_binary)
apk_info['icon'] = oss_upload_monkey_package_picture(dir_path, picture)
except Exception as e:
current_app.logger.warning(e)
current_app.logger.warning(traceback.format_exc())
current_app.logger.info(apk_info)
if type == 1:
pass
elif type == 2:
pass
return apk_info
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return {}
|
8,503 | 84d9400dc4ee0bebce3f5f7da0bd77a280bb54a9 | # Generated by Django 3.1.3 on 2020-11-27 02:17
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('foodBookApp', '0027_remove_post_total_comments'),
]
operations = [
migrations.AlterField(
model_name='post',
name='likes',
field=models.ManyToManyField(blank=True, related_name='like_post', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='privacy',
field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),
),
migrations.AlterField(
model_name='profile',
name='privacy',
field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),
),
]
|
8,504 | b5c68211cfa255e47ee316dc5b0627719eacae78 | # -*- coding: utf-8 -*-
from rest_framework import serializers
from django.contrib.auth.models import User
from core.models import Detalhe, Viagem, Hospital, Equipamento, Caixa
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = '__all__'
class CaixaSerializer(serializers.ModelSerializer):
class Meta:
model = Caixa
fields = '__all__'
class HospitalSerializer(serializers.ModelSerializer):
class Meta:
model = Hospital
fields = '__all__'
class DetalheSerializer(serializers.ModelSerializer):
imeiEquipamento = serializers.CharField(max_length=22)
class Meta:
model = Detalhe
fields = '__all__'
class ViagemSerializer(serializers.ModelSerializer):
detalhes = DetalheSerializer(many=True, read_only=True)
caixa = CaixaSerializer(read_only=True)
localPartida = HospitalSerializer(read_only=True)
localChegada = HospitalSerializer(read_only=True)
class Meta:
model = Viagem
fields = '__all__' |
8,505 | 1eb5df463bbd39002c5dbc3f88459e2f26d4b465 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-11 03:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('produksi', '0055_auto_20190409_1316'),
]
operations = [
migrations.RemoveField(
model_name='transisi',
name='status_perpindahan',
),
]
|
8,506 | 89078ddd7dad3a2727b66566457b9ac173abe607 | from django.conf.urls import url, include
from . import views
explore_patterns = [
url(r'^$', views.explore),
url(r'^(?P<model_type>\w+)/$', views.get_by_model_type),
url(r'^(?P<model_type>\w+)/(?P<id>\w+)/$', views.get_by_model_id),
url(r'^(?P<model_type>\w+)/(?P<id>\w+)/download$', views.download_media_file),
]
export_patterns = [
url(r'^$', views.download),
url(r'^(?P<model_type>\w+)/(?P<id>\w+)/(?P<format>\w+)/$', views.export_by_model_id),
]
urlpatterns = [
url(r'^about/', views.about),
url(r'^help/', views.help),
url(r'^search/', views.search, name='search'),
url(r'^explore$', views.explore),
url(r'^explore/', include(explore_patterns)),
url(r'^export$', views.download),
url(r'^export/', include(export_patterns)),
url(r'^$', views.home),
]
#url(r'^logout$', views.logout, name='logout'),
|
8,507 | dc41c64d09e5fdd0e234f516eeec0cbd2433876c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 13:34:19 2020
@author: ShihaoYang
"""
from pyltp import SentenceSplitter
from pyltp import Segmentor
from pyltp import Postagger
from pyltp import Parser
from pyltp import NamedEntityRecognizer
import os
import jieba
import re
os.getcwd()
os.chdir('/Users/emilywang/shihao yang')
os.getcwd()
LTP_DATA_DIR='/Users/emilywang/shihao yang/ltp_data_v3.4.0/'
cws_model_path=os.path.join(LTP_DATA_DIR,'cws.model')
segmentor=Segmentor()
segmentor.load(cws_model_path)
pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model') # 词性标注模型路径,模型名称为`pos.model`
postagger = Postagger() # 初始化实例
postagger.load(pos_model_path) # 加载模型
par_model_path = os.path.join(LTP_DATA_DIR, 'parser.model') # 依存句法分析模型路径,模型名称为`parser.model`
parser = Parser() # 初始化实例
parser.load(par_model_path) # 加载模型
class Sentence(object):
def __init__(self,text):
self.text = text
self.data = dict()
def SentS(self):
sents = SentenceSplitter.split(self.text) # 分句
return sents
def getLTPAnalysis(self, sentence):
words=segmentor.segment(sentence)
#print('\t'.join(words))
postags = postagger.postag(words) # 词性标注
#print('\t'.join(postags))
arcs = parser.parse(words, postags) # 句法分析
rely_id = [arc.head for arc in arcs] # 提取依存父节点id
relation = [arc.relation for arc in arcs] # 提取依存关系
#heads = ['Root' if id == 0 else words[id-1] for id in rely_id] # 匹配依存父节点词语
#for i in range(len(words)):
#print(relation[i] + '(' + words[i] + ', ' + heads[i] + ')')
return words,postags,rely_id,relation
def getWord(self, words,postags,rely_id,relation,_id,wType):
sbv = None
for i in range(len(words)):
if relation[i] == wType and rely_id[i] == (_id)+1:
return i
return sbv
def getpron(self,words,postags,rely_id,relation,_id):
flag = None
for i in range(len(words)):
if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i] == (_id)+1:
flag = i
break
if flag == None:
return None
pob = None
vob_of_pob = None
pob = self.getWord(words,postags,rely_id,relation,flag,'POB')
if pob:
vob_of_pob = self.getWord(words,postags,rely_id,relation,pob,'VOB')
if vob_of_pob:
return vob_of_pob
else:
return pob
return None
def getatt_of_sbv(self,words,postags,rely_id,relation,_id):
for i in range(len(words)):
if relation[i] == 'ATT' and rely_id[i] == (_id)+1 and (postags[i]=='a' or postags[i]=='n'):
return i
return None
def getFirstNotNone(self, array):
for word in array:
if word is not None:
return word
return None
def getMainsent(self,realsbv,sentence):
re = ''
words,postags,rely_id,relation = self.getLTPAnalysis(sentence)
#hed = self.getHED(array)
if 0 not in rely_id:
return None,None
hed = rely_id.index(0)
sbv = self.getWord(words,postags,rely_id,relation,hed, 'SBV') # 主语
vob = self.getWord(words,postags,rely_id,relation,hed, 'VOB') # 宾语
fob = self.getWord(words,postags,rely_id,relation,hed, 'FOB') # 后置宾语
###############
if sbv == None:
reals = realsbv
elif postags[sbv] == 'r' and realsbv != None:
reals = realsbv
else:
reals = words[sbv]
if reals == None:
return None,None
if sbv != None and postags[sbv] == 'n':
temp = self.getatt_of_sbv(words,postags,rely_id,relation,sbv)
if temp != None:
if words[sbv] not in self.data.keys():
self.data[words[sbv]] = [words[temp]]
else:
self.data[words[sbv]].append(words[temp])
if sbv != None:
sbvcoo = self.getWord(words,postags,rely_id,relation,sbv, 'COO')
if sbvcoo != None:
reals += words[sbvcoo]
###############
if postags[hed] == 'a':
temp = self.getpron(words,postags,rely_id,relation,hed)
if temp!= None:
re = '{} {} {}'.format(reals, words[hed], words[temp])
elif sbv != None:
temp = self.getatt_of_sbv(words,postags,rely_id,relation,sbv)
if temp != None:
re = '{} {} {}'.format(words[temp] + reals, words[hed], '')
else:
re = '{} {} {}'.format(reals, words[hed], '')
return reals,re
finalvob = self.getFirstNotNone([vob, fob])
if finalvob != None:
temp = self.getWord(words,postags,rely_id,relation,finalvob, 'VOB')
if temp != None:
re = '{} {} {}'.format(reals, words[hed], words[finalvob] + words[temp])
else:
re = '{} {} {}'.format(reals, words[hed], words[finalvob])
else:
re = '{} {} {}'.format(reals, words[hed], '')
return reals,re
def getMain(self,sentence):
sentence = re.sub(' ','。',sentence)
sentence = re.sub(',','。',sentence)
sents = SentenceSplitter.split(sentence)
reals = None
for s in sents:
reals,res = self.getMainsent(reals,s)
if res != None:
print(res)
def gettextmain(self):
sents = self.SentS()
for s in sents:
self.getMain(s)
s = Sentence('陈欣婕今天真好看。她今天中午吃炸鸡')
s.gettextmain()
def readfile():
fn = open('/users/emilywang/shihao yang/beef.txt') # 打开文件
string_data = fn.read() # 读出整个文件
fn.close() # 关闭文件
# Removing Square Brackets and Extra Spaces in Texts
string_data = re.sub(r'\[[0-9]*\]', ' ', string_data)
string_data = re.sub(r'\s+', ' ', string_data)
string_data = re.sub('-', '', string_data)
return string_data
string = readfile()
s = Sentence(readfile())
s.gettextmain()
s.data
string = readfile()
s = Sentence('苹果和香蕉都是水果')
s.gettextmain()
s.data |
8,508 | e103e7a215614e1a7923838b775f49bba2792036 | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from ddp.messages.client import MethodMessage
from ddp.messages.client import MethodMessageParser
class MethodMessageParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = MethodMessageParser()
def test_parse(self):
id = 'id'
method = 'method'
params = [True, 1.0]
message = self.parser.parse({'msg': 'method', 'id': id,
'method': method, 'params': params})
self.assertEqual(message, MethodMessage(id, method, params))
|
8,509 | 0b141ecca501c21df50e76d0841dd5651274f0da | from django import forms
from myapp.models import Student
from myapp.models import Employee
class EmpForm(forms.ModelForm):
class Meta:
model = Student
fields = "__all__"
class StudentForm(forms.Form):
firstname = forms.CharField(label="Enter first name:", max_length=50)
lastname = forms.CharField(label="Enter last name:", max_length=100)
email=forms.EmailField(label="Enter Email")
file=forms.FileField()
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__"
|
8,510 | 534aaf8371707089522af014a93f3ff6c4f913ff | from django.contrib import admin
from pages.blog.models import Blog
admin.site.register(Blog)
|
8,511 | 21cfe1ca606d18763fbfb8ff6862c382b3321adc | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger('ucs')
def firmware_pack_create(handle, org_name, name, rack_bundle_version,
blade_bundle_version, descr="", mode="staged",
org_parent="org-root"):
"""
This method creates Host Firmware pack.
Args:
handle (UcsHandle)
org_name (string): Name of the organization
name (string): Name of the firmware pack.
rack_bundle_version (string): Rack bundle version
blade_bundle_version (string): Blade bundle version
mode (string): "one-sot" or "staged"
descr (string): Basic description.
org_parent (string): Parent of Org
Returns:
None
Example:
firmware_pack_create(handle, org_name="sample_org",
name="sample_fp",
rack_bundle_version="",
blade_bundle_version="")
"""
org_dn = org_parent + "/org-" + org_name
p_mo = handle.query_dn(org_dn)
if not p_mo:
log.info("Sub-Org <%s> not found!" % org_name)
else:
from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\
FirmwareComputeHostPack
mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,
name=name,
descr=descr,
rack_bundle_version=rack_bundle_version,
mode=mode,
blade_bundle_version=blade_bundle_version)
handle.add_mo(mo)
handle.commit()
def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,
blade_bundle_version=None, descr=None, mode=None,
org_parent="org-root"):
"""
This method creates Host Firmware pack.
Args:
handle (UcsHandle)
org_name (string): Name of the organization
name (string): Name of the firmware pack.
rack_bundle_version (string): Rack bundle version
blade_bundle_version (string): Blade bundle version
mode (string): "one-sot" or "staged"
descr (string): Basic description.
org_parent (string): Parent of Org
Returns:
None
Example:
firmware_pack_modify(handle, org_name="sample_org",
name="sample_fp",
rack_bundle_version="",
blade_bundle_version="")
"""
org_dn = org_parent + "/org-" + org_name
fw_dn= org_dn + "/fw-host-pack-" + name
mo = handle.query_dn(fw_dn)
if mo is not None:
if rack_bundle_version is not None:
mo.rack_bundle_version = rack_bundle_version
if blade_bundle_version is not None:
mo.blade_bundle_version = blade_bundle_version
if mode is not None:
mo.mode=mode
if descr is not None:
mo.descr = descr
handle.set_mo(mo)
handle.commit()
else:
log.info("Firmware host pack <%s> not found." % name)
def firmware_pack_remove(handle, org_name, name, org_parent="org-root"):
"""
This method removes Host Firmware pack.
Args:
handle (UcsHandle)
org_name (string): Name of the organization
name (string): Name of the firmware pack.
org_parent (string): Parent of Org.
Returns:
None
Example:
firmware_pack_remove(handle, org_name="sample_org",
name="sample_fp")
"""
org_dn = org_parent + "/org-" + org_name
p_mo = handle.query_dn(org_dn)
if not p_mo:
log.info("Sub-Org <%s> not found!" %org_name)
else:
fw_dn= org_dn + "/fw-host-pack-" + name
mo = handle.query_dn(fw_dn)
if not mo:
log.info("Firmware host pack <%s> not found.Nothing to remove" % name)
else:
handle.remove_mo(mo)
handle.commit()
|
8,512 | 69d3a39dc024929eaf6fb77e38a7a818d2886cf7 | '''
selection review
very similar to quicksort in terms of set up.
no need to sort to find kth element in a list
but instead can be done in o(n)
quick sort can be o(nlogn) if we choose median
instead of pivot
tips:
raise value error for bad index not in between 0 <= k < n
basecase of n <=1 --> return arr[0]
use L, E, G
if k < len(L):
select(L, k)
if k < select(len(L) + len(E)):
return pivot
else:
select(G, k - len(l)-len(E))
O(n) runtime
n + n / 2 + n / 4 + n / 8 + n / 16 + ... = n (1 + 1/2 + 1/4 + 1/8 + ...)
= 2n on average
worst case is 0(n^2) like quick sort if you pick the worst each
time
'''
import random
def select(arr, k):
n = len(arr)
if not 0 <= k < n:
raise ValueError('not valid index in array')
if n <= 1:
return arr[0]
pivot = random.choice(arr)
L, E, G = [],[],[]
for data in arr:
if data < pivot:
L.append(data)
elif data == pivot:
E.append(pivot)
else:
G.append(data)
if k < len(L):
return select(L, k)
elif k < (len(L) + len(E)):
return pivot
else:
return select(G, k - (len(L) + len(E)))
x = [1,2,3,4,5,6,7,8,9,10]
print(select(x,3))
|
8,513 | 57de9a46dfbf33b117c2dfbb534a5020e019d520 | # -*- coding: utf-8 -*-
"""
@Author: xiezizhe
@Date: 5/7/2020 下午8:52
"""
from typing import List
class KMP:
def partial(self, pattern):
""" Calculate partial match table: String -> [Int]"""
ret = [0]
for i in range(1, len(pattern)):
j = ret[i - 1]
while j > 0 and pattern[j] != pattern[i]:
j = ret[j - 1]
ret.append(j + 1 if pattern[j] == pattern[i] else j)
return ret
def search(self, T, P):
"""
KMP search main algorithm: String -> String -> [Int]
Return all the matching position of pattern string P in T
"""
partial, j = self.partial(P), 0
for i in range(len(T)):
while j > 0 and T[i] != P[j]:
j = partial[j - 1]
if T[i] == P[j]: j += 1
if j == len(P):
return i - (j - 1)
return -1
class Trie:
def __init__(self):
self.dicts = dict()
def add(self, word):
node = self.dicts
for w in word:
if w not in node:
node[w] = dict()
node = node[w]
def search(self, word):
node = self.dicts
for w in word:
if w not in node:
return False
node = node[w]
return True
class Solution:
# def minimumLengthEncoding(self, words: List[str]) -> int:
# kmp = KMP()
# ret = 0
# texts = ''
# words.sort(key=lambda w: len(w), reverse=True)
# for word in words:
# idx = kmp.search(texts, word)
# if idx == -1:
# ret += len(word)
# if len(texts) == 0:
# texts = word + "#"
# else:
# texts = texts + word + '#'
# ret += 1
#
# # print(texts)
# for word in words:
# if word not in texts:
# print(word)
# return len(texts)
def minimumLengthEncoding(self, words: List[str]) -> int:
trie = Trie()
ret = 0
words.sort(key=lambda w: len(w), reverse=True)
for word in words:
if trie.search(word[::-1]):
continue
trie.add(word[::-1])
ret += len(word) + 1
return ret
if __name__ == "__main__":
s = Solution()
assert s.minimumLengthEncoding(["time", "me", "bell"]) == 10
assert s.minimumLengthEncoding(
["ojtnj", "uuydcho", "dgsyp", "dwxycpx", "dpmvc", "dvfhmb", "flrxjjx", "fwhdhvn", "rgsakp", "aiconf", "nzacpk",
"sbxnaj", "shway", "rgrmz", "rysudo", "bzkioce", "mqxkzvu", "wyebk", "tymoaz", "mlmbg", "djbmek", "qfnme",
"khkiyae", "tjdaxry", "sqtcwz", "ehnsai", "jhncvrm", "cxkzgrx", "pummt", "hzrpfcn", "lkyqit", "phpqdxw",
"vangm", "wcjdgw", "pxesvtn", "mnqory", "bdrzvh", "brtzmo", "chqgf", "bipyxm", "meoikg", "ysyckk", "ojayeiq",
"zrfbsb", "yhuotea", "crfbhq", "tllycn", "qxnzihf", "avyawpz", "bwsjym", "myjozc", "lbdksm", "mctlt",
"dszowuw", "syshm", "xrvhhkn", "kgrcwfv", "dwlajlf", "yviuk", "xegjj", "spiczl", "vfvomi", "mgcujy", "dqmzb",
"isrisgt", "vdrtuah", "vsyth", "eoclef", "poccek", "cgafrlu", "crbhpgk", "sromv", "xmvbca", "gobra", "ygvlq",
"pjvhe", "tfweiso", "cskuohg", "eyalone", "pobkak", "nzpxn", "lbcrws", "uhtfe", "eorth", "showvu", "hxsmb",
"jrggose", "izifkb", "oqwyf", "mozmzj", "ijwle", "ggtqqqv", "geevzj", "meota", "ifsse", "kdtofm", "swydhvf",
"tzjhqap", "wqwwd", "jlinnov", "lmxkgeg", "stbot", "xrsfn", "etoyctk", "rygagm", "vcnrf", "zkdge", "emqtscp",
"newqcyy", "nnuus", "exwsxbd", "zstvl", "lbkko", "kygkyqq", "oggji", "xytbjo", "mfbahk", "ggoks", "lmqewkl",
"qexhyqe", "ogaogio", "nzvbav", "mdole", "qvyks", "gkupfu", "dgmpn", "ngrdrj", "iitqvk", "ipuiqb", "ugxfea",
"ialkmv", "hmgnx", "aoyoj", "fvzhjil", "butrbp", "dwhxnes", "etkdwg", "cjkghz", "tovkq", "mmxhv", "jgcsn",
"hmictal", "zxmnek", "pcoeg", "ntyqmlq", "hfubhtg", "ydjbv", "xnwlqto", "hatgi", "bsaczd", "pokwk", "arxlula",
"zjtqlk", "ocfxup", "nsnqjc", "xdcsopi", "iqxyxp", "xfmtpvm", "bqtgcf", "wboycn", "aoeda", "uowqdgj", "rzzzx",
"liucs", "ejzxz", "qmlehsh", "igrbmon", "dpmkbon", "pmayh", "nujdwdw", "awdgo", "ijgkzk", "inhee", "jzdtv",
"adhauh", "grtmbp", "qndbvw", "zprrw", "mpqieq", "jzmzeuu", "fcvftqs", "qxzxqy", "lidguzz", "eazwd", "zjhfsz",
"zsnzefh", "mnckfg", "zjgtq", "ckyxlif", "fznfo", "jegnof", "lzwyzb", "ozivfio", "igkclsa", "bebzn", "bitsggm",
"lrnwin", "hjnnzr", "idvoirn", "dgile", "vfngh", "xbmur", "rqaftt", "wjwwwxs", "btreou", "gjsycg", "pvsiylz",
"ccxzgdf", "excrrrr", "fiesr", "jdioj", "uzwsc", "odrlcoy", "hcsit", "ptwfprh", "sbqry", "kffvy", "ejeawbp",
"omvcc", "iqgxqlt", "edsuu", "xnbue", "qfbcx", "fzlmbkl", "wrrcueb", "mmqispp", "nknilwd", "dewuhju",
"hmdqlxy", "vjxgg", "lkuexo", "dzvfscm", "voulbs", "uevoqgq", "kmhwu", "oglzllg", "torhihn", "fhuqzc",
"mmcfhb", "woyayma", "uznsvre", "mmxed", "aoskwg", "xrosbm", "hpyrgh", "tghwbwh", "hcwzn", "iepeftj", "judij",
"kudbk", "jonpv", "lywck", "rxelz", "bgifz", "mehbxq", "fmqnz", "sqrmzj", "iqqjzex", "qioliz", "kjizbf",
"lgdcffc", "pfgmcr", "trdabul", "vlqjdnc", "jjvbxe", "fqlayw", "ilbhtyq", "saawulw", "gxysrb", "kighql",
"eceapr", "kztbcww", "jedkoy", "dxpcaga", "ndacphe", "rcoit", "ywgcnxg", "klipfup", "bddws", "jwyof", "lrfwgo",
"bediwuf", "ujakh", "ppima", "xzhwvm", "guzmsqt", "ffbliq", "adjmynm", "akabzn", "inmykju", "vlcjyv",
"orquepg", "tufrk", "vqpjymm", "lvuab", "qzxav", "ekcmu", "uqtuhie", "kfvtgf", "nklwjo", "ujxlfpl", "zobfpq",
"eignijd", "ythctg", "artllm", "wodhh", "tzpwszq", "njdqegg", "hzrqib", "zvoxtfd", "htboem", "axjuix", "bvmvm",
"jbnum", "bxdth", "atejt", "gqsqtnk", "fykrjbp", "ldyhonr", "wcuoj", "upphc", "agydg", "cjmwk", "rhxbqh",
"tpgozdd", "qyqoy", "zjqutw", "qoohqny", "nsiacwz", "xupin", "criuvs", "eswjeft", "pdmevn", "zvogq", "lrrvo",
"qhfqqpw", "ktudfg", "ijvmi", "neyjjdx", "rllpi", "vllvaa", "esebtu", "jyhcrh", "otgmr", "oudvyxj", "pmszy",
"opeed", "gicni", "mnuzn", "mjbfpod", "sqwgxu", "dwniwz", "wmbmmv", "lyafuy", "zmvlz", "kopxzuh", "urcbbiy",
"guhco", "nerjm", "lpdxc", "hxmjzz", "hynagc", "iyxeczi", "bdfxmoz", "yybnpqd", "jvgnb", "oquqem", "fmclmz",
"dmkhf", "zxbjpp", "qpxgcir", "iecvjm", "gtkne", "lgtqrbc", "gilbn", "mcxsg", "ncwbhn", "wkriiq", "zhsir",
"ptkkmw", "jcbpkrm", "vbefo", "vmbcd", "vqffj", "fhqzjt", "nryuh", "vmclav", "cjyggm", "sanev", "rrdocz",
"zqdexbs", "jrxstt", "pyhcesj", "aagghyr", "cyemjrb", "aliohf", "qaslg", "pnyjzxz", "pehnvi", "suhuw",
"twopabr", "sapqoc", "mckrh", "nzlgrxt", "aqpobnu", "pirbjgb", "plzlj", "raylxpu", "gyasfrh", "urjfxux",
"xjbwau", "iupknn", "vhxnc", "dnbjop", "vrxhwmd", "vjsmkh", "rfmqids", "smaiwt", "vkyfo", "bjqyxc", "rbbbp",
"dlkzg", "dwvdwu", "prulzh", "bavge", "ehhrz", "xxjqk", "pxopmp", "okmkmb", "slcznpp", "nvqlb", "jalrk",
"parwlcd", "anbxo", "oqcxyzo", "fjhrdjh", "pgvnwfe", "yfjyvh", "quvszjm", "xyiig", "xtncqv", "svsix", "jvpdnh",
"owuiv", "bsrugtt", "rmvggws", "lmdql", "kvmvd", "xrpmaw", "ssnxyb", "oworq", "rmmpuya", "rijpih", "aelazka",
"kncksqx", "yvtdiy", "epato", "pbbamj", "fejsw", "zgsru", "ekwrre", "zqben", "vugxi", "fvcsdp", "rujcews",
"asqxya", "worjlsd", "xggakg", "kzfpot", "haqon", "ypqxzz", "mmkzwt", "bdhif", "exzhv", "srnklzh", "hlrunb",
"dwfyke", "fvgbtdm", "aeutp", "czhefx", "tegfw", "jkxpsb", "gxkfkw", "exvntd", "gvuti", "jdmly", "owaqhw",
"fopuxzv", "edrvil", "biszwgv", "vgckzd", "fqdxn", "qktdf", "hpgwrk", "gpxiips", "vxnlab", "yylxz", "hsuscch",
"bhivaf", "wzrwtc", "ebplv", "yzxykou", "mxlssom", "evghv", "hksleg", "shybau", "zeyqa", "tljqka", "axfkec",
"fatdj", "janlkcc", "sjorbra", "jplge", "oazzot", "qbgtncn", "ozlil", "stohadq", "rvpuwn", "oqwpl", "byftgi",
"ubuusl", "fkogr", "bybdyhj", "vinyuzs", "ivsqvz", "vmnae", "gckxw", "rozbe", "glvxwj", "rcgicu", "xmvbd",
"itycsry", "llmwrs", "fuqth", "styrrwl", "wsseuln", "xwflcli", "muxgz", "ypmbboh", "rpmvnep", "wjvvnv",
"arjnw", "toauwc", "ltjxqrl", "basffd", "clxozwd", "glmrv", "iejgfj", "cvkoj", "wotjf", "mqucec", "xalgemc",
"hgimkh", "golvfq", "fuqpmak", "mhpcp", "pxoibt", "ledqa", "guzbyr", "ztvbeka", "racdp", "krsngra", "aaiknz",
"bhoobyc", "xibbe", "yohepxk", "eclevs", "ldliwcm", "qatvlk", "eiypbw", "vxvtwa", "nkdwsej", "ftmyvp",
"gpthye", "gazwoi", "zzgipon", "cithg", "wpabujl", "jhezlnb", "vqqaxfg", "kvpbk", "vggjemp", "owylv",
"lgwtfpg", "jjqvfm", "xbhga", "tulvfv", "sefuo", "hbysv", "ozopepd", "awyrifd", "pnudwx", "vreje", "zhpgw",
"qygbf", "tvbrvy", "zzmcw", "cznee", "deuzxt", "qfppjvi", "ilkps", "ydwhg", "krwkxzu", "mnsidg", "rkxyyr",
"ajkqz", "xtmom", "vqocor", "fympcl", "yyleyzy", "jjvzhrn", "kpmxvuz", "txoeqlx", "lhhmn", "chzgpf", "ncnjxle",
"ihxrg", "feqixq", "lkfhcar", "hfnsh", "bifczy", "umknat", "yrhgkh", "mgpcu", "qotukst", "yqlmfq", "ttcdp",
"xnjjzm", "cukbr", "hjhjb", "iikfcsr", "nsqbnnz", "dauygf", "cmydq", "lfnhqnl", "ppqgs", "hscbfug", "ohzisud",
"opspdkv", "aauxbop", "wpkhzo", "sxbsgu", "tajrv", "ololy", "mxmus", "vizvxv", "osaqz", "rxygkn", "mrzqlf",
"zrriyxb", "ufroe", "bajozg", "atpsu", "uhgauzu", "tffdw", "mdjulde", "rbrmy", "jhkqvwl", "gzsultq", "nkbfi",
"xtvwh", "dryzcv", "emaxuk", "zucvutb", "jdduyk", "bjdin", "loicuq", "qhjjb", "rgfjbq", "mphnk", "lxvceyx",
"zeoxb", "fxhnxu", "qpbipe", "ophwp", "wiioer", "quchwj", "pouxunw", "bloxgg", "xbsma", "dtwew", "xstorn",
"qfrfkz", "gxusbsn", "dhnxd", "mhstbs", "hekbtu", "wvrrjw", "yeiwd", "patplsx", "qmyiyi", "mowboj", "iskyd",
"bqhjj", "povppk", "vthpwx", "uuydaw", "rduxvez", "vmcww", "ylruvph", "ymqosp", "wzcvohg", "lhepwta", "bckhc",
"oiyyt", "wqzfv", "uduec", "lkkbtzl", "prvpbo", "jrwstii", "ijztoo", "qwwth", "vqzqiun", "krnjp", "zyanpiw",
"ojhjhvg", "lohmb", "thqtf", "reptzv", "zgkyq", "lhkvy", "cmjwl", "fmilgpw", "jrfawz", "vrtzd", "ezgfl",
"plzng", "zidzso", "civavlg", "vtwopu", "ljhckxo", "nuydt", "qembl", "fiwrre", "gfrgi", "gzegiq", "mltlqo",
"pcett", "snbsc", "msibcqn", "beacrhz", "vsycjt", "gjqji", "smcegol", "zregkp", "smcazoj", "dziqad", "jpuwp",
"hnlztac", "vduitco", "wyencad", "bkdnnqo", "cabzyg", "mgpcwr", "fxgvkxt", "wlkcrdd", "bhmhsy", "gqcctjc",
"atafpt", "vdzhmcg", "ighxj", "gfqpale", "fohbrtj", "mfpsgt", "tarjocf", "gyycb", "qvqfryl", "jpwowwc",
"jcgcg", "gmrjze", "nfptxq", "hmjhxge", "ieelj", "suvkgr", "nwjxe", "tkepqm", "extnpmq", "rxzdvf", "relzaa",
"hfhgaq", "lmihlz", "pacocq", "dclxr", "oknoem", "pbpnnd", "nleerfl", "tvytymc", "aamfnl", "ufdnq", "bxyzvyh",
"vksvout", "lohxhf", "sskgn", "aawbv", "hrvhx", "wvoqf", "vxkvh", "oqany", "bcmyd", "epdddqn", "zrlej",
"bchaf", "hmftii", "mefcrz", "wbxvc", "ewwnldf", "cqecxgh", "cnwvdmk", "vetrw", "zmogwov", "lshlzpe", "lijay",
"tcdqg", "xavqixd", "yjkhtsl", "myjvow", "cgthhd", "taaii", "iuuegk", "lcypmle", "wesrit", "tybco", "nhxysw",
"awkrj", "jcmqa", "porvo", "nrypriu", "vznnevp", "hzklwi", "vapuxh", "wyfkn", "albemu", "ttfdbl", "dbqrjv",
"cxals", "qzitwf", "ysunur", "llsefy", "cghfzji", "jboaa", "emhlkw", "khhmgha", "twlxgjz", "pyujor", "ozcax",
"fetvovo", "mdhrrd", "qdhdne", "fiuvw", "ebyxh", "ldaothh", "vwyjf", "yjyljlu", "ivroqg", "qvpeyec", "eemsdra",
"wavgeqk", "bjejrqg", "mdjimoz", "fgopy", "lgwodr", "cunvszh", "wiver", "ghmog", "jzgfyk", "vxlbx", "kvgbtn",
"cunorte", "mtesdc", "zdzmqu", "pigik", "smruadg", "czjxlt", "kukgaok", "tsldpqq", "luomo", "ezbcvdc",
"tfetwes", "uopzf", "wsvezkw", "wrnlvbx", "bpqungd", "jqnnof", "rqhiomi", "voulqb", "ouspxn", "chngpz",
"fbogfcv", "nqhunxo", "rydbke", "ewduo", "suqqwup", "oxzfxj", "kuwfwm", "euiics", "mvftoau", "vstfbm",
"vnmtoo", "muicf", "bjbskxb", "knbomlf", "enrbtfk", "hnaqe", "vxzsr", "gkqma", "qygmn", "ztkybmb", "injggpk",
"enqrgdk", "rkgoct", "tgaiu", "dnknoxk", "iwuou", "oxanccl", "xestej", "ekrqq", "xbwhz", "jkdvxfh", "oybaay",
"afyhci", "papffjq", "bdppssw", "qwyvjx", "xmnnosl", "kvqzjl", "wcwii", "ygfvt", "tpabbht", "kjmaq", "duschjz",
"gguiof", "wgfhve", "joqmfjq", "smqfd", "ynlovlz", "sgrzum", "bobmux", "dcppi", "isdjrwl", "lbevb", "efqsirq",
"hlgfql", "enmemlb", "dbmfk", "ibfpzm", "rtdnooq", "yicdq", "xadul", "dxibxzi", "yyxnj", "jhsdzxw", "thltbi",
"kwhreyi", "hrocoa", "fnaalbd", "vnwona", "nnonm", "naqaf", "xgzzies", "uhruynk", "kgadfx", "hyohzbd", "hnajx",
"yipzh", "ezdxaet", "xbzppoz", "rwnewxz", "hlcbkmb", "znyhu", "zsqtpkr", "gmyxr", "rphyvo", "bgjuz", "nulpv",
"eejfoso", "xmwcnes", "xxxxnpe", "jezkk", "idfsxrw", "qgzjtf", "arpzpo", "hxsanlt", "emvotcb", "sknzhvg",
"icitca", "ivhdln", "sqilerz", "ndigw", "bcsre", "mibbep", "zsczom", "cgghjbb", "fkylfgt", "bvzofs", "mefsng",
"bispbza", "tsosgy", "xopalrw", "wserf", "jbmlz", "xidxny", "ffmpjos", "vddwxmd", "netnsg", "kgevsp", "pguuv",
"cwisp", "slxiyb", "dmwaguc", "jobwusu", "uytcqrv", "hzhsy", "zrlsdd", "xhxah", "rxzij", "zwdgy", "ygmvkz",
"drkzbo", "qpsal", "tpxvl", "lfmfl", "sayjvlh", "rdamym", "ycuzd", "zkycu", "hdesec", "unequk", "lpkdid",
"vorxls", "admsdop", "rqnvkyg", "krnqqtb", "rxfms", "xfthd", "pxjbk", "gpslrg", "rwziwef", "usxgqvz", "baxxye",
"ocrkkrw", "lrlgsp", "ceyctg", "rniml", "vavug", "jgircl", "jrpnmsa", "rywvlfg", "prxnys", "fkzmknn", "ooelc",
"btvfs", "yqepuvw", "tmmmb", "qmpzexb", "zjckjvd", "aieytbb", "oafqq", "szrcyh", "czrxgae", "ifkte", "hfgajox",
"pwpnkqq", "yqphogn", "xuwthrd", "mpcmy", "qitdoa", "avlzfrh", "ywpip", "dgeki", "fgbnx", "tyofu", "xziqzj",
"qxzvqz", "vtsqk", "ipkld", "yfhim", "ebaegdc", "ubhrh", "ldejv", "mtflwy", "ocpyj", "yopgqs", "fkjxxd",
"njnnwr", "nylkeb", "taymdqv", "ekpznq", "cbzobmg", "bucdds", "qjozu", "uvpghor", "obhnu", "ljkxbg", "uqrxjtf",
"xwbxiw", "oxsmcg", "spchdd", "pcuitj", "faidq", "tybmy", "uygiyp", "qloizj", "cafgmy", "smetd", "kwcwb",
"tdabxf", "fpmrc", "lfjujn", "vvmvex", "mnsgdc", "enjlgsw", "ohwcg", "kxjdaup", "rotjarp", "aovdoq", "oviwq",
"qwaxs", "bmazco", "plcljsv", "yytjhl", "vgwjm", "drnue", "vqjgf", "uqlsfy", "bmqmfp", "lkauwna", "ozmqce",
"heunaxr", "zaffbj", "arbek", "qjnllw", "fdkhlz", "wgmbwh", "yceqag", "ltjjq", "yurggfw", "puaafsl", "tjiqkyt",
"yuzub", "ytmrfq", "ommmu", "ipknn", "iubnuab", "dzthvc", "zjbzpew", "dcooev", "pjydqcf", "zuojlzy", "zwjyfc",
"spmac", "dfkbnz", "fzriie", "asusog", "hdodx", "drjpo", "ddyif", "chabv", "ebvkwrr", "burdjl", "jjddi",
"dljzkye", "samyg", "zwgxcq", "xtratwo", "qfopz", "xvlaw", "laage", "btdium", "vzlnzt", "kmvbzkq", "kctobsx",
"kazbelu", "yxdwrk", "eslvjc", "nhsdmvs", "zuxqcc", "hqtxovn", "zrbdai", "fgjxs", "txecvio", "kjxlq", "dkuxss",
"mkbevn", "pzmdqc", "ihyia", "atsub", "twytus", "nzooxj", "qwuoly", "fdoigo", "zukhlh", "mugeaxt", "qqsfyls",
"qqtql", "wrvphcx", "nzjfhx", "uequtk", "fxuto", "qnast", "nveys", "ltbrcth", "toctdib", "fbpnh", "umxfgn",
"zvjuta", "yeron", "qzvswqk", "gbctr", "ryryz", "zieknd", "zcsna", "jrhak", "zfxqsj", "urlba", "lbozqf",
"yfcjaa", "hazgy", "gmmfzyz", "zjvkyc", "rvfdcf", "daitab", "hcxqgum", "qwakp", "ltbsjwo", "pqqtygx",
"upxcxao", "qylot", "lmxqc", "dwzcd", "tjccm", "mqcpap", "wgxqtr", "ivycvxy", "wdykg", "snvqka", "jxtvtsb",
"jnyowsq", "iwfuoig", "cuoixhu", "fzwalg", "djhrar", "sjmahk", "dyusf", "wrxqvdi", "ftytlor", "jsjbv",
"vjbebg", "agvsn", "vvmpgm", "gsgjopk", "vbqvhy", "afopf", "zybfuz", "aqsgc", "ytrjsvn", "wlhdfr", "vdhvl",
"jrlvr", "cscxwf", "yhgbew", "wupbl", "ssuhyvv", "bhcirzk", "oykwk", "ijbto", "qsnpgw", "otwzage", "ytqzh",
"rgwow", "bvhgkwh", "fvawxie", "fllxw", "gfcqf", "scoqb", "qubrq", "gdxjtp", "ahrpck", "awnlgi", "cmehsyp",
"dwmytpy", "firyeq", "oohwhr", "caelk", "mqemvs", "qflkzi", "tfpibll", "ybhzd", "ctsxri", "yurocj", "dnlnl",
"ydmdva", "xkaotl", "xovax", "ypynrqp", "kwfzw", "fbgsmrc", "tutime", "rcugul", "cvewno", "typhbpa", "wazew",
"flzfs", "wxxbza", "ogjfkl", "vjlebet", "imbubm", "xinyncy", "dqmxfy", "buhagzh", "jjadpos", "gejyz", "gxshqk",
"wkwrs", "dqeriqo", "dmixr", "bysjih", "aoloq", "ddwhsxs", "nteqv", "cqagf", "ditsrn", "wfxgl", "jwjqb",
"rvkxj", "rxapr", "yrlkip", "npquasb", "nvezlr", "gmhchcx", "lodfihi", "dheypxa", "plzjykh", "qopsthg",
"zsnes", "raongg", "zrpnac", "tzmtltj", "jsecdn", "rzudh", "hkcyic", "xsxmw", "reeuwpn", "grkwrag", "gvzzbsq",
"lrfta", "aqyvbkj", "ytgfu", "wcmvd", "olnvfi", "hhgmhb", "kojmepr", "wpohl", "szhgg", "hymiblu", "lkwjr",
"zulqpz", "sdcqjo", "olgsgez", "lxkpqci", "yxcgn", "gmvex", "fskpppe", "utzto", "axncvp", "lcyahba", "ydeae",
"zvzar", "ghfkkqv", "ryrpg", "gucpbq", "reofjz", "cdnoo", "dchhh", "byiwd", "cqbhok", "ksfnoa", "xsmmlr",
"qyvdfqh", "dzshj", "bpifnzh", "uxmoml", "jdxvojf", "ihfll", "vwesfof", "zynnpb", "fwzra", "rxlgww", "vkmjd",
"hcjgzt", "mkapfl", "ffjqlf", "wulaebc", "gurramv", "tufkzai", "bxprqek", "nkohv", "abgfwyl", "slslg",
"wirsnh", "pykvuh", "fdrwk", "gtmgsxe", "dxsaab", "lqiryty", "aoezg", "tzhugcg", "uoarf", "dwhsv", "rjiuoi",
"ycgcdnf", "rtfmwz", "amkjc", "woogtdi", "deprx", "ucknu", "womfm", "xdeev", "qapxpuu", "ngulnk", "fgtxyf",
"hnyabid", "cilmy", "wrsewtf", "luvtmo", "wftuh", "ifoeeqp", "dtfdhhl", "rwnburg", "fohkkul", "frqqi",
"gsrcyc", "teuync", "dvpvak", "daqjki", "kksscp", "somsde", "tyfvck", "ftfekl", "ahncv", "yvosm", "qgllvg",
"ylfwv", "jenqns", "lqovrnm", "iyger", "nfvtsv", "bknxmqj", "pfzybdr", "hqjol", "chlpk", "etgrtqa", "msuxdx",
"vnoatf", "ypdzomn", "vsshmg", "rfkipq", "jvpbiz", "vbskd", "edsoixj", "uowim", "hqtsj", "inbsxal", "ookrv",
"ipotdnk", "kmazqd", "jpfghb", "gvmnnpv", "juvwa", "xtkvzw", "ejqcl", "ebgcnt", "ztuyu", "dlzthw", "zzipe",
"iaxwdxy", "htynwkc", "lefbq", "pizfr", "vttrsv", "oagak", "eqlrom", "vttefg", "dsrmk", "oekbe", "cvugzk",
"diwvz", "gxmfob", "vjowzm", "mjpop", "uznhz", "kqvjwug", "wjqvxfg", "jbpwezu", "wsckdx", "slqfomn", "omuxk",
"zlgblso", "kvitoq", "dmafq", "djxmzk", "pjqfegq", "yjrttas", "siakcx", "iutiqk", "nwfdj", "gbgtazk", "cpqtf",
"panmlr", "aqubhsg", "iwdim", "nqetym", "mwazh", "thyhy", "ydtxan", "xfoin", "lsosc", "esznfa", "xgdisi",
"flvbzh", "mpltx", "iwjpsqp", "udfycf", "rntmc", "ltflwu", "wkgbaw", "bcuzt", "hejxuhb", "lguohe", "klnhb",
"mjump", "avcwrol", "yrcqlc", "ihxul", "avajh", "gtpauet", "iemzk", "rfdub", "gqnbk", "cfcmg", "iobyh",
"iruuapf", "tyifwt", "sbdtp", "mngcpmb", "oaqpolm", "mmimmh", "gxknadi", "bmxhuu", "ulyoa", "keidy", "vsnfk",
"cnnnfty", "pkajm", "ddgeecb", "prxidqd", "wmenvhd", "akjcqo", "tnekfef", "ipvsi", "pzjwq", "wmmct", "erdjnuf",
"vgeaqs", "nlbdx", "dpvbe", "dgeqz", "aiguzh", "akawppx", "tykrjcs", "gvavo", "hkyle", "yhedx", "xzqcg",
"gzdxt", "csssbk", "tmekrmv", "lfsgo", "iizahz", "aszfd", "aybqnsl", "vadwxsl", "ulmiii", "xaxdugp", "sfnnsbg",
"dkyruh", "qhpqu", "amesjd", "evjuki", "vtqjw", "aoabp", "qnsuhe", "bplbx", "fdqok", "ozkhgib", "cggwzys",
"nbknjay", "ooambw", "evmvegf", "htdlxik", "kahcume", "bojpn", "bhipie", "hdyjslw", "pbkkq", "qwszl",
"fgkbzsd", "hejdx", "vmcfhgx", "puzlmmm", "meffil", "boakbiz", "eczot", "fvkkit", "jebfx", "umvkjg", "uikgs",
"rycgpf", "rfmfgmy", "nveho", "bgywqen", "gepfma", "vquyq", "wcercbw", "wbpjkxc", "rqloeda", "omclokx",
"hvotwp", "tvqfxxu", "qrtghk", "hggme", "arnmfnt", "cxprj", "rspdt", "hlgfq", "dmqel", "pcerxk", "ptqjc",
"wzreko", "kahks", "xjnzo", "xzzye", "xbdeu", "koiwkv", "jlwkkjr", "xzdixoc", "xeedvrm", "mrtnhqi", "jaeann",
"mvubp", "olklqf", "retbgcj", "qxxlhh", "cqyyoy", "ngwikg", "qijte", "sjzck", "zkmkx", "ongtzf", "tanow",
"smgntvq", "urfgt", "xwcroa", "kadcpd", "cxhgo", "walku", "kvvcsyt", "elwmuxk", "bfphtm", "vzeumuq", "sknvev",
"vbsnfd", "grmbg", "vjahwt", "dmcbmn", "smubz", "jobbfcv", "ujlkm", "lcthh", "bauuqdu", "kjgzgtq", "gicjz",
"nugbax", "kbnjfiu", "sqfpein", "obbgfww", "ykggxjx", "irnmog", "xniuv", "rqiwycq", "hzlgyu", "yjtrttv",
"satym", "dgqhlkk", "rghal", "tbekx", "kkwmo", "eahwhks", "bpvmbur", "sqtgkj", "khboz", "enefr", "vkzqvt",
"wfruavu", "ninomu", "ypktaoa", "mlpmoit", "fxyhjfp", "fgnpp", "txieja", "dprnj", "bgyrp", "zsqwqrw", "stqzki",
"kwiayb", "ulbsn", "aetje", "vwzbb", "tedwyqs", "cymiruy", "jigpoqx", "ypuqsc", "weletu", "gvibea", "chhuldm",
"baylv", "wdhovo", "imfqu", "meodnsk", "jhlckqw", "jolyfh", "jsfkrhr", "tnbfzvs", "egcfht", "qnzmyr", "owtrqu",
"oqaqu", "xftys", "goxfftm", "sgbnp", "bhfvaz", "gospa", "jwzlvwk", "lqncoqd", "xxizglc", "bwffm", "mhpggzr",
"kdaoewx", "anviou", "mqiij", "wkskpn", "enougdh", "vldnn", "gbfgz", "ejmbh", "qsdrvsx", "mrvbz", "cqlufpf",
"kbgjlu", "njgna", "admrmk", "pwwsc", "gxkot", "pdjwh", "ejwxt", "bpaxufv", "iwjzs", "xxfsg", "vuhgh",
"srytgb", "yesvlux", "tggnch", "cgnbb", "fbzbx", "aomoqf", "zkrvrjg", "ueaoz", "dppacnl", "ewovhxz", "kbvee",
"ixeeb", "gwgoqm", "hlwlxe", "fpmkrk", "wzjsr", "ispwe", "garofu", "jcmpec", "tggeo", "yzdeo", "axpmln",
"zhnlhck", "duyqcn", "tpqwqi", "jvmaj", "bisgoy", "mpwmurb", "olqla", "ecapwan", "kcpxn", "xcapin", "ooctk",
"sgqql", "vcyyjxf", "ejyom", "jsgtha", "logxnjg", "nypadhj", "dprmk", "cqkuzb", "gratv", "tgkjgu", "fttcafm",
"tpryi", "ubbhw", "uwcuyn", "zkgohs", "snfesz", "ifrex", "tkbfz", "fvvkp", "otjiq", "lgomjjv", "ertracf",
"bregu", "kkbizb", "hyhvn", "zjcnxfl", "mceskuj", "lmupdq", "zdzqzgo", "yorppew", "fpwtjd", "dxvyzt", "bbnnu",
"pkycae", "ucvapn", "dijmkb", "nvwwpr", "bufkw", "zhono", "vayxf", "hlfwkev", "klkvkj", "yzgpwg", "lcbqr",
"tkkfi", "pcgljx", "bhduxu", "rgfipts", "hkjbrr", "fobvy", "wqmqhxo", "yjgvypg", "ehgoizl", "ipiibzh",
"aqxbxtx", "lrtin", "fyyuypr", "pyrocgm", "kwqbg", "ukccw", "wgsbpvx", "pcoivrv", "okhxaba", "bbuaibf",
"ccvfm", "phpst", "yxtqiz", "cdfbo", "sijfljn", "gdlhn", "bqmbced", "tiejf", "aurqer", "olmyd", "prctay",
"lwflhi", "bbehvta", "oxoda", "lklyc", "rzedhp", "kairil", "envan", "wdcwfk", "xoroddb", "womrlr", "ruxebe",
"jnpywrd", "wrifvz", "zkewcd", "vllfrn", "uvdvjh", "bglpya", "vzokkbw", "apaoqt", "xpjizn", "xoajmd", "xapjwc",
"jcknwg", "bjpreep", "ffkua", "ukcbah", "bugvkrf", "cbmmfs", "cwaczhl", "nsqaj", "sjeikg", "fayqif", "slowoh",
"xjpvkpa", "ynunjle", "bqavt", "nkpqudr", "neikvd", "yuqlzg", "pdxbtrb", "cashlog", "iqiqy", "smjmxv",
"zbtpbr", "zzamzcv", "jmakg", "txfswc", "pkaym", "swlde", "utann", "mqgpjne", "pslfvek", "nbiqhb", "bzsianu",
"wnxgbi", "ahkeeiz", "dqdfjg", "bptdg", "pwita", "uqyflq", "txabjn", "yznjmve", "mukcqqf", "cxonbf", "ixuewjm",
"pzlcat", "eikeeo", "scwsoa", "uaeyw", "oeorff", "gbqgd", "qboqiv", "hiulpb", "dbbdm", "qvdxx", "aypxbcn",
"ykjwdbg", "pvfxn", "shrqyz", "zaxtu", "pfefgww", "jwifrw", "zxuud", "kpkwhlj", "lwptgd", "zpdmvsw", "takeb",
"ynehl", "kixtod", "fyrgm", "qirzmr", "shyvec", "xjgzt", "bwfvht", "wyehh", "renzc", "nnibax", "slhfng",
"yjtecc", "lghvbzf", "qroxvun", "mlsed", "rrudho", "cyffhh", "tjlxahp", "xmaepzk", "jvdzh", "bbvegrw", "cebcz",
"odjpeam", "guerph", "tgmphgo", "ohtkqq", "jcxojz", "haeheae", "erydxni", "hatjxx", "kwmgkjw", "wmezvy",
"hsuuvfi", "ineek", "grkxmhb", "alxkt", "rmspxdg"]) == 13956
assert s.minimumLengthEncoding(["me", "time"]) == 5
assert s.minimumLengthEncoding(
["yiyqbv", "njqvawn", "wnlovvp", "vogum", "jpolc", "zleec", "sxdrww", "rbowr", "xsjorra", "kwjsx", "vornum",
"echku", "kuizegn", "rhuvv", "eemkh", "yshht", "pbixoa", "cmbxvtr", "iupia", "nmcbq", "mgrjsx", "ejvniwt",
"svhsel", "kazenhf", "fevpm", "xcwqfgw", "ozikzc", "mywnmqt", "taorwjm", "gcshacq", "fgtasq", "qexygw",
"ljmbari", "zfjudos", "rgxuzy", "kmzryaf", "exjfd", "mcqnebz", "ptoim", "zglfi", "fhneaz", "rexgc", "lhplwyr",
"dthdp", "jizetec", "obyzg", "rqupa", "yphttge", "wdcdn", "wdomtr", "hchbd", "ytyra", "upytftl", "swbbi",
"qpcybv", "dcoxspd", "dftkf", "nwjfmj", "ojbwy", "zofuy", "adqkt", "kpcply", "aeukw", "fqblb", "xurrbpo",
"veioa", "puzvl", "bnzvlax", "tjzsdcw", "jarqr", "orxjbg", "ilrqdri", "syjuoyi", "htoqdco", "gwslw", "dpqyf",
"jnkhv", "fpqhpr", "baewnvc", "caunsf", "qhbpe", "wlckl", "lmoroqe", "ddlak", "qipwbfp", "cefqs", "surczp",
"jtmfuro", "ezhqau", "dlsco", "hywoqh", "lnifq", "hvfmu", "cqjdkok", "tggdact", "rwuowdk", "attnl", "lwhyq",
"mqtsc", "bmwajiy", "nyohug", "vvfpt", "lbyazu", "sarwago", "iccztck", "ugsxcw", "rpwza", "yofmlll", "ulhdzhg",
"lbaqk", "bwxxwc", "dmsbawg", "tjloy", "imbrkul", "xguke", "shlkuq", "lizjcdu", "kmvykl", "ilqxxjm", "rtbvvqt",
"qisec", "zobzr", "thwntt", "afpifh", "uwiiovy", "hgsyecl", "pdgnm", "mqyesch", "suexztu", "msguuwu", "yrykkv",
"xtoommc", "muteu", "bamml", "kkhlb", "jfrnx", "wpytor", "zzogpt", "yryxxt", "hzqofjd", "ehtildc", "ptclf",
"nyltvd", "nrret", "qqqqt", "uuxunf", "jajxt", "lzdvlc", "gpdtjug", "hjsso", "jairua", "qarxuey", "rpwwjwv",
"cjqypep", "tuzgcs", "oytqxb", "rgfmud", "stnwn", "tzzaop", "jpuopzg", "qeywd", "spnstrg", "dfwgntg", "yjyqk",
"ioowc", "duqfg", "gmqxe", "xhlbby", "liurjk", "vdujfm", "xxyyn", "omapgc", "koemzbz", "ziiyako", "pjmhfrv",
"bshtfgj", "ihjvt", "pnipuw", "fajiuj", "rdvcqzd", "mgknns", "ouwkm", "ejnklwc", "osepl", "gplpyvs", "paxrddg",
"gsjlpd", "lgnmgl", "yifeeer", "hhnwlol", "fcmxs", "ilinwgm", "udhfdtq", "ceefc", "xweqx", "jfelwod",
"rtywfjo", "kzwrgqx", "fcjriov", "fzytqv", "zcpcddo", "scpyzow", "kbzegu", "gclwr", "gmiwlp", "rtpka",
"yiywuyy", "qceot", "dtrgn", "ntwbu", "fxobd", "zmxwza", "qcksyz", "wgbtmm", "pzorve", "hztydc", "jqlay",
"ijdkbk", "uzjrps", "gfzibk", "gsxqj", "kgjrkdd", "smdeuk", "iwizewp", "owjie", "kcdccu", "ifltqr", "zrdfbm",
"pznbcsk", "mtkpi", "cpasir", "flrxrm", "uxcxnv", "htlfcp", "ltukxfr", "ftbbha", "jhgjgyz", "qjreroc",
"vcvtbid", "nrhlq", "gtkpot", "gyplqqg", "lnorig", "fixhufv", "ugcug", "ndfug", "wuorhe", "owocnkw", "rcnbf",
"ioiiiui", "kakwtne", "svxtt", "wdrxogm", "ibrxs", "bddqi", "jeguac", "hlftdw", "nutgfjw", "krrzvf", "amxuloc",
"deozdoe", "ovsvk", "sfqsl", "slgiw", "jbjujag", "mhiru", "uqksech", "davosw", "nlueljv", "rhtvdu", "ivdpdqa",
"qnbenpq", "dtapqq", "hwwfpxl", "oyrfosn", "goxgmgo", "tbvutl", "cbbbcm", "iiugpk", "hinkem", "vvaitk",
"pskyf", "hdnekg", "nqhfn", "dqbozx", "zcwpko", "kafyu", "jfegubk", "nofqzsk", "ujmxxg", "akwzemu", "yvhxb",
"qqlwofi", "hmoecj", "qwgtlc", "jepvygq", "uzggm", "fztiews", "lvndvf", "vulax", "znqudh", "whgqi", "noguo",
"vewkx", "uruvgf", "ubohmba", "aulzi", "flvfdlq", "yspfie", "wugif", "qndyiwa", "keihmct", "rggvn", "ojjmuoh",
"sbbcl", "cdivmoz", "vkusmp", "mfddp", "kgohwvp", "rjbbxw", "vsgptj", "hbyjoz", "gufrv", "orxiv", "fxcqfw",
"okppik", "qlouw", "lkryigo", "qccvc", "ixcnodg", "wlfilts", "ahqtevp", "kkbuha", "oehaez", "rzczib", "vxobk",
"wmetvjs", "xfjgeq", "eadzl", "aeqdvch", "czojfq", "hxshidl", "ofswsj", "iwbqcmg", "schhwtt", "ltyth", "wiccu",
"akill", "zaaji", "qepvfa", "mpvrkeu", "dcpenm", "wdhlk", "llqbby", "lronwkr", "rwtguo", "ofnvs", "lxdnwzf",
"dctmilf", "zhckjd", "hajsuac", "wpylhy", "zhipvm", "ihikr", "zzwjgvr", "gdglrn", "skhow", "tlqtjl", "uypli",
"evdva", "civide", "iroihm", "lvuzid", "vexat", "ngmvrz", "szdhbt", "ggrbz", "bsmovlt", "kguomvl", "onzvx",
"nobgxw", "tqxemc", "vbiyx", "fpzpf", "ogtvf", "yuthri", "xszbn", "xcuhj", "nosnpbp", "mowsxg", "tfalyy",
"kxombgm", "cukrz", "krmseq", "velzh", "kmufxj", "nvxlkq", "ualvras", "wytoucy", "qicqyym", "pbeujtv",
"haojnbm", "xnfffpe", "wvoiald", "rlyvf", "sxamoxw", "ztqnmp", "biiavx", "lnjnzs", "arqdjdy", "pkrgokc",
"qxswouj", "dgqah", "mnhzo", "ggilb", "qscrd", "ggvkimw", "qlxjys", "wximi", "aqlhio", "iavtvy", "grkqf",
"dwrtut", "uozutfc", "fogxpdb", "ydtntlq", "vnmpmwp", "gtxhwq", "mlpihx", "yfpjlz", "hdvcquq", "nunny",
"wklasgp", "wxduo", "topsqf", "tngcpzc", "mcrut", "pdnsmt", "kavaok", "seiqsqa", "bhgkiyt", "mawvhtp",
"domcnrm", "fgusghc", "wdaufwz", "tzpuks", "kisndyz", "fwyieu", "wtdum", "ytxhl", "yhzkmuv", "nppnqe", "ccvhj",
"dautnyq", "hkaliab", "kngan", "ebmhiop", "vsdkcef", "nmpcnd", "vxvnl", "cwcgu", "zsuneh", "qjgcmd", "awvba",
"rzbisxo", "oilqrj", "neiazlm", "hlyrl", "tmiht", "lwqxxv", "gyblrw", "gnnjkb", "lrxiln", "xlwlseh", "npfwcvp",
"yjcdhw", "rzndd", "orlhmip", "gatuojh", "osotgvv", "owksz", "kcocizf", "izlev", "smigns", "wtxfwo", "knwizte",
"mqjojzp", "lkezye", "xqldbu", "cvbpyl", "aoipbz", "asrupt", "bdwkesh", "jpaykm", "pksbg", "gdbsibd", "lfxpwk",
"rmnfph", "yzxwke", "xjwyusv", "yetar", "sytdz", "pnystzi", "yntcqo", "egoorl", "aydxu", "rfdrfhe", "flzkos",
"mmjgev", "fbjwmvi", "jeouc", "lcmkri", "aggsb", "aaeazai", "amyxpey", "onxqpg", "qrjpxq", "zanea", "niwsgtv",
"nsqja", "utgskd", "hlcum", "frygtl", "xjmqetz", "upqddd", "vxzdstm", "hcmtera", "ejstou", "xkcguf", "bokigdk",
"vurnv", "zsgrje", "nbxlf", "tpilcx", "lvepux", "xacdtp", "amdgx", "ubbvnx", "xmvznh", "tlprri", "sthkn",
"xhoad", "deotaxo", "pqzppmw", "xlcpx", "qwzrpyp", "lujabeb", "heskwyy", "mzzaaur", "vnestcs", "rryphdl",
"ibdiabi", "eoiyt", "znflx", "clougix", "zzadxw", "lrrgtf", "lsdoakf", "yxfmqx", "qhnrry", "ktcdmv", "veygqu",
"btjlo", "fcspsc", "gozoazm", "xcsqgz", "aazae", "nkuvask", "mzdgjq", "sihqdhy", "zadrwzw", "gzcyuea",
"lpgccic", "fqtfuzw", "bjoqpkc", "oydpkxc", "sugnnu", "hyvygf", "axkxo", "rsmzb", "dlhqmac", "gbqby", "npqkj",
"odbtb", "bdsib", "zyasxv", "ifxqcc", "lmnjwhr", "ibuyu", "uzhle", "ccpwhjr", "vhrojnz", "fkzfz", "fyesm",
"dnvipvm", "jbbqn", "qdkgl", "xkvvgq", "dphugaf", "soxbfun", "rbgokx", "biveiz", "vbaqtn", "qapydgf", "llldu",
"ottjpzu", "fwjuc", "cawio", "gbkwe", "rrnnxer", "luviy", "zsalse", "ckwdeox", "ozhqocm", "vtozfwz", "jztole",
"ydqei", "bfugz", "psawjp", "dzlyrwp", "izuyrne", "rbwcfr", "vdvte", "usjbqs", "zzovkxr", "frfkwk", "mmtmdd",
"sntka", "wachbzo", "rmzvj", "scbngo", "eqiuiwi", "qfakk", "cckcmt", "owhzow", "rejdlw", "iprsqdq", "twwaldw",
"mfilzyk", "jygvx", "iewbo", "irhko", "zpazqhn", "ndqbg", "ayzxqdz", "zvpbh", "maapq", "pzitrfm", "qsgsurv",
"viwcfff", "wpgenms", "tjmvu", "czuemc", "infxoo", "avhbw", "nugkqx", "xubakjp", "ndask", "utaqq", "njhuxq",
"sdvuex", "tfmxqp", "bydovjo", "bizxjsp", "zoozxyv", "jegei", "gkpqobw", "psumbtg", "gkgoh", "sgcbpql",
"xxkhy", "kdorkr", "hcomj", "ulrpyv", "rhplil", "tyyochd", "xhzul", "srdjmns", "kgukye", "yepvs", "xnobsjb",
"umxmtub", "wvqasr", "igftpzw", "exhecn", "rreee", "jpxuvxh", "jriqf", "akexunb", "ekvdsoe", "ytzvj",
"vfrlyae", "pmfai", "biouzle", "xkbce", "clzyi", "xhjoso", "wmxkxb", "dqzzig", "ydtby", "gskwj", "wlkwbz",
"zepvllz", "zsgqp", "blntawk", "eynmil", "bdqyp", "wgtnqbc", "rrgaq", "gtafuzo", "qdiko", "kkcsdo", "zwqhs",
"kugzbmf", "wtvvs", "kqsdx", "mxsuxiz", "pgbgjfe", "vodfr", "qbvwu", "vfwbhgw", "ayojye", "kolzfqg", "xnbecj",
"akbcnf", "uutrn", "upmesa", "marqej", "bbucee", "bazqbau", "qikgsyf", "oeayzn", "uilxnzr", "vpnxknl",
"btgtxgh", "vjaav", "zaxtzah", "msweps", "awduwld", "gzaep", "ngvgc", "qpoqdgn", "kimndg", "qilmmpw",
"oafhlyp", "nyelgvw", "onymk", "feycbc", "dhcrx", "siqpfly", "tyvycmf", "huctqp", "uscjrp", "bbptd", "msdmu",
"xlxhye", "xnyzcox", "kyskda", "injdkmp", "jiwus", "spjylwd", "eqcrnt", "snfiu", "jvwvge", "yfeaw", "mmdnsjj",
"suzdw", "xiupf", "rjwjhng", "tqvasy", "rmibpa", "zuqax", "prpndnp", "efryqe", "pwuqfy", "wpqlfs", "aeswq",
"cxkeiue", "jydxzfi", "tzfvwp", "zzgtw", "mupiusx", "sojavt", "dxmsgq", "migjiyj", "kixjk", "ywwvcpl",
"khzcuo", "oykhx", "fochin", "foxbfkc", "sizjg", "wrjcvr", "ceadd", "tvfqgxq", "whzhche", "dcoeti", "mpilfib",
"cphie", "ucpnjm", "ajltvx", "kpizym", "vevfsrs", "jznrri", "yvhxomr", "cbcnk", "yuwuhu", "jywuzed", "kqakusq",
"jrnzgfo", "mjimzz", "mfjybnd", "ntqyq", "junxxck", "myvqajv", "kvuqs", "obfxw", "jwuba", "vnrvzvy", "aeric",
"vtgda", "nkrocpt", "ahitg", "dzxtr", "zswwc", "yhxap", "fdhiwr", "cpxtqv", "izbmo", "zyioo", "vysnoe",
"ouuyvj", "cumdhzn", "dbsmph", "cktjem", "vbmxy", "utgfyhc", "rqdeorp", "btnlmd", "chxwlt", "nsghoqi",
"egycsm", "wkanat", "lzjyf", "donyx", "cchqsa", "xozzz", "yzmnf", "jfzuh", "dpcpg", "hlahz", "vobopk",
"lssfeli", "ccttzi", "glzgqpv", "oyqzug", "qqhkrr", "euwotv", "hwbmtz", "hiylhly", "bppzne", "yetyyvs",
"cnbwcby", "hzblk", "pfjmxt", "dsxvt", "vvkju", "zjrfr", "gdbhb", "udoad", "nbhpzfm", "iwetbym", "atmly",
"tnxli", "myegb", "hiwqsk", "btrajk", "nhrmwn", "ftmbecv", "xopht", "eiikqy", "qizanwa", "cwxiatf", "jshjva",
"llrtkn", "zhivu", "lmwiu", "oaeaqz", "oxotfub", "jnkafm", "juhrmq", "mqzbtw", "puiaxty", "dnahvoj", "gaxhz",
"xfnay", "iqmlnlq", "xudhcg", "izpkz", "tqttmt", "bwnbs", "fdufd", "vhzyymh", "zhqtxr", "evbcrv", "xvnma",
"dgcwy", "cwxzlbz", "oodiol", "teyim", "kqqfjub", "ftsqzi", "arfztkr", "oqlujx", "rpkkdov", "ptoff", "ivxaxr",
"nxeept", "cacpl", "tehir", "spvggl", "qfzxkn", "bhwkukx", "fkdpuq", "xdrngre", "fnfplq", "dzbrl", "ufgxu",
"sciec", "fgdydvw", "nmpaqxi", "ydsvfv", "natjz", "lruyvzf", "xznznxp", "mhfrh", "kddsk", "uwatn", "uklzs",
"lnuta", "ryizc", "cvwko", "tnzpk", "ywpiv", "vbvcagq", "pzolw", "nmyfhg", "cshkofj", "ksptw", "kqejh",
"zgzjqzo", "mxzrw", "enabosq", "vmubgc", "sfzcj", "hewvk", "ewhrq", "oifnsmi", "izdnvu", "cshgtk", "mqotuhd",
"gnqgj", "rxailbm", "iyhxvtu", "ncjzklq", "zjmnoc", "awqwos", "ugujppc", "spbvfwl", "gntsvo", "euksu",
"qnvneph", "crhmf", "brktmf", "mvgmr", "yzcskrp", "tihawec", "edqmxpn", "fxyymlr", "dzfkucm", "prldz",
"gplrlhz", "bohwr", "bhebbk", "mmecj", "segydd", "ptslsb", "pyhgw", "cwmrq", "mjfhflh", "xhuid", "npxmb",
"izilq", "dczhqh", "tgfnxtb", "zrylvo", "lctxrar", "ylhrbii", "rfxedv", "llvhzjq", "bjocv", "wbnex", "cnohnf",
"xahrl", "rouvwyc", "hbhovgv", "dhucp", "ncmff", "ncsskg", "gsjbyin", "lroxscf", "whfaenl", "vsfultg",
"floxkpy", "captoai", "qwolyex", "ggaypn", "wzunypd", "pjixeu", "gxnjkoc", "pqiqhn", "xakjmgz", "vqizkx",
"gdzcxr", "kyxwdd", "pgxmazn", "qeuwf", "bduknm", "tcrcn", "nehgee", "wktbcgu", "jwqltdt", "wczkai", "drkqs",
"qhdqnn", "oobxirc", "lbunv", "ifscr", "xnfpbrw", "yrrdbax", "fbocs", "tewne", "iobixe", "zgosas", "yhesn",
"xlqwd", "pfcen", "slsjffx", "ilwatrc", "mhsmgp", "iteghl", "aqhufdl", "kxgpqcu", "ryrcgp", "azidf", "smlnl",
"rocxvbt", "iutfc", "loapgbr", "musulp", "dqcnj", "tpgbkfh", "wvskii", "itkfopo", "kytyb", "rzahbu", "aewptd",
"ohergbb", "cadxh", "aphwelj", "huooyzn", "gtttia", "izeyhcr", "cfvxz", "aitaxyp", "vypqost", "ebfnmif",
"kgiucm", "zryyu", "oxgnbpt", "frpwo", "ouqvodl", "pdaazh", "gxwmf", "dozxsjm", "yndpsik", "zcwvu", "mihug",
"jgodklw", "ysklw", "cfxqv", "yqvtz", "rctnp", "xjywa", "kpqyw", "hhtegzt", "rnwbeoi", "uyxqum", "jahcwbe",
"jzjns", "ovwoaz", "oqmsrua", "natbejl", "deffv", "okgbr", "paqhy", "jkafhte", "lifsknp", "afmskh", "oemdro",
"oxuwov", "qtyxa", "hkpfsm", "ulaubn", "tciurw", "myohwlo", "okuiejb", "ormoqsb", "gmipz", "hterzir", "ekxzre",
"xkevge", "ihenf", "nnhzv", "eocjmx", "upzal", "oounfko", "myhbwub", "fwipva", "pkzzvpd", "nrupm", "vluzq",
"fxkoyho", "atzktr", "aomrp", "qwpser", "ejagmb", "cfigelm", "bvanb", "cgcgabo", "hmjvlqt", "hxxocf", "ftqaud",
"htuipy", "bhwmcn", "tgyvaqe", "lvuwh", "yiabzs", "rzzavu", "fiubm", "uuqsb", "riyakuf", "psscffd", "kvckzr",
"fktmnf", "ivzqexi", "nhxzm", "kffjmb", "vdzxv", "esago", "bfikw", "gaiuxmz", "volokcm", "jypcs", "psibvs",
"hxaxklf", "lmqwgy", "spnbimo", "mtihak", "xikoiy", "rmmtv", "phaqgxj", "zcuwkhk", "emodbyb", "ztahsya",
"ieiqm", "lfoquh", "emznnq", "pnhlgut", "pgvads", "cqsjx", "lxnjei", "zpque", "rdjbiyb", "sxedpu", "potnqva",
"iirkn", "rjmnrxd", "ksgcd", "waeymnh", "tizdz", "kproa", "wpttygd", "lvyze", "peewvgm", "fwtyzbw", "zitkk",
"gfgqr", "udgvlz", "swqspo", "ohhvyq", "kgyuau", "hcerp", "pdomlm", "twabkk", "zfsea", "epiwp", "xgycjpt",
"jtkdh", "mxmdm", "rtkzm", "qkacy", "nuvdiq", "agctak", "hypgyh", "ewtjp", "paysolw", "bcutebe", "xelxyb",
"gzdvrth", "vpzfv", "cxrkt", "admiyzi", "lqlmn", "zbjpbg", "tlvdnli", "zetnox", "ylcsobo", "balajod", "igoume",
"sxcgw", "sbkkafk", "fmndnnw", "incsa", "jyupkg", "uhvvc", "rswnbth", "nvprfj", "figqf", "znyidqi", "aijper",
"euidr", "dftxkze", "vnppi", "splwifc", "fprgafl", "ixzaz", "mrhqtne", "dtkjsy", "dsmqrgy", "xfscz", "cymvmpu",
"vptkfdx", "zrgrjq", "mqvwsur", "hdtlw", "ugdpwun", "cvxitc", "vytvqg", "pmtpfz", "nfdtdt", "umvwjuc", "jouxc",
"qpypri", "pdhqp", "lmise", "wlsvcfg", "aqdkzcb", "qlrmrfz", "pbgoyi", "xmsskoh", "jjdye", "xvsdmq", "ymjeipy",
"igjyv", "uiojvmc", "uckoww", "grlnyeg", "hpglp", "omnnyy", "iiliir", "cnucbcx", "pcxvs", "hipad", "xmiltkj",
"oorwi", "qgoxjj", "jnmviqs", "wpleqn", "tudxw", "pcogem", "hgewaf", "niwfexy", "vcttgcb", "anjgovq",
"epgmscd", "mdtru", "xvapv", "rydjik", "kopppcr", "mjbsmu", "unxoakz", "ldpsw", "frksjr", "vyxxg", "yyydri",
"szidq", "qvbtd", "qratl", "xwfov", "bzhqyxl", "fskrtf", "pcpzmnv", "xuxwx", "vzbevnb", "ebaqz", "dbpuek",
"ooqwj", "gaimp", "coelqh", "bwuceq", "oxpfjt", "zrqyc", "rwllk", "pqunv", "ufbnn", "tbnjoz", "kkqmrxu",
"qyyrm", "hislf", "wyuck", "ubpre", "pdioi", "aryhv", "vdcxv", "rkgmaag", "czlzokw", "gtxuduz", "grpijx",
"qzrar", "qhues", "rmznt", "sxxmved", "onjzuwl", "atbjhip", "nrardl", "alrocy", "cfkip", "ihtbf", "pqdgm",
"hmokun", "dpghac", "otwml", "mnbzwa", "ehetlt", "rchvq", "lwjgywn", "lzdmjo", "nvhohdp", "tmshcpc", "gavjv",
"ycnkv", "uynzh", "bvpnfjq", "lfbem", "qberui", "vrmmhx", "wpbqtfq", "jujpx", "dujgkof", "hrpbso", "zhcdt",
"iybngyb", "rgeruza", "nesyxr", "cihgfe", "hjgskb", "zspxeqm", "inzrgyd", "crkjq", "iooshwp", "muvvj", "wakis",
"rowibwa", "qikwypf", "aportho", "pubcgx", "vqoqpfi", "rnpbri", "ussjv", "looor", "xkzvdv", "tstegg",
"zgiiokw", "rwvyaun", "mqqla", "asnqp", "nghuryl", "hlvhn", "ecuotnu", "judvbu", "xgvuw", "oeckn", "hdhttsg",
"hcyhu", "klbyjc", "tnrmqnc", "mjojxhi", "kvdet", "vbmevim", "oglrzs", "afbscdi", "zxrffti", "firzgmz",
"oenim", "wgpua", "asiep", "kyteq", "wpeneca", "qixmeoq", "zaofon", "csxxtr", "cpwmnl", "feylas", "idjuo",
"mrtpvta", "jjvmjy", "mnljocc", "lnvjleq", "oognud", "rbyneq", "rhvomm", "fldrkpk", "znvrp", "myswmz", "jiloe",
"juivjmo", "ylhbyzl", "ndmabkt", "sgdvlq", "pmnddmi", "utpuj", "kfisv", "nxfeell", "mxhgqd", "ccvdsdg",
"emtybo", "zmkylbt", "mmrpi", "dkwlgq", "iwlappb", "uimsrnu", "mkxaxmi", "tcvll", "njggal", "kmqud", "evgzlh",
"oaxizbp", "jiuej", "xknlp", "cyksydh", "gbixmz", "vtouyk", "sxjpkio", "qhubt", "kflvnb", "sjdfggl", "bxozyj",
"xekbh", "wtmcb", "xtapfco", "rnornl", "ursdpki", "waonim", "eibfyed", "zniinaz", "uyfohq", "qcaxlt",
"koyaapa", "pjuvbsi", "ecpdl", "ifaqwm", "yyumzc", "gvfngfp", "lttul", "flyza", "uasdlme", "oklhb", "wulkzzv",
"ziwsxo", "jqcxiu", "qdzrwgm", "zjdwy", "uumns", "emlnp", "irnrqp", "gqkza", "oynpcz", "yxyea", "zpamf",
"gyehxbv", "nplkhcc", "rxeekyo", "kecgp", "gseju", "nkisxqf", "vlyud", "fxxihhm", "yjgtml", "fehwpdi",
"wclnvyy", "lriwrc", "ikparv", "volfh", "ysphh", "szrvrv", "rqlmz", "jyqut", "fyftsj", "uvwfip", "rngwgm",
"mjwaz", "roehjki", "ploxokr", "yjbalp", "fspkq", "yfxrb", "kzulvk", "ordxp", "vdrrt", "wdiojwd", "ridzl",
"niykdvu", "whyycmn", "riwcma", "bkhgkrb", "nsine", "emgtgf", "zoymw", "ljtvhzb", "kfyfdma", "piygxdl",
"onfwgdf", "fwmkm", "vqbljay", "icife", "bxfli", "yeygr", "qenhgm", "mtxuckj", "kdcyx", "kwqhfcn", "ywkfy",
"prbpw", "pheyc", "kmnds", "cacqs", "kvekiqy", "bfvfhdy", "gxulp", "skmcra", "exomt", "lcxue", "mnvvday",
"rsddl", "gooegc", "udght", "doymnin", "ccdap", "wuive", "dyyln", "rynust", "luxabyg", "kdkkyyw", "vawqfsy",
"rmeswm", "rcxzyv", "clpowz", "pdntqm", "tvjkkmz", "iiclw", "nhudzen", "cybhu", "crwtw", "enypnh", "ygekg",
"hrjwqt", "peissge", "wangcy", "rbpoik", "raqulbf", "gyisnsj", "rgbqn", "lgvuzb", "djicf", "epnuu", "nsapc",
"voatgh", "yorfehc", "jxfttat", "wyuivb", "bwopl", "odwdsh", "anchkv", "sepvew", "qoxxmae", "bpvqnj", "sngfo",
"buoazou", "zhijssa", "janng", "uvdbd", "yfvkqo", "lcjii", "mvacvrz", "xztiar", "lpbtrqa", "ukbpdx", "okaqpgr",
"idgqlj", "ewglgo", "ruymhi", "pcidw", "bvuqj", "npzch", "yppyan", "oiguirj", "iijvwqj", "jvbwjys", "yjtunfc",
"iaikra", "oduhdgk", "ivixur", "ibcgai", "djzvcbx", "lmtsul", "lgnwzol", "wursq", "xsxbqwq", "jqvwnc",
"dcwwvtb", "vwybnr", "bughwjl", "rnelxb", "hmacv", "ufgdygl", "aabuat", "oynwask", "gnfjjf", "zipbq", "zxstn",
"jdrbprf", "jmkvny", "rblpql", "vykdj", "qaakyqw", "osbhddb", "avgldyy", "kvpoa", "fnqcliu", "zzlninw",
"drsal", "omswys", "hwqcpct", "ecraq", "fvhsbjq", "raauy", "pfmoz", "vvqvcm", "tbjqjun", "jcfbegq", "otiwup",
"axvvce", "dhpdnx", "pennr", "hvvmvzv", "binezl", "ygdmcuo", "ypwnqn", "aloxdv", "ucieh", "kovbtag", "rgfpaww",
"fpbftg", "spjowfr", "zridoy", "blwbbf", "evwlxi", "itbcz", "hgixuo", "qmoqmjb", "tkeeis", "pjiaq", "rbpje",
"ledoui", "ubecht", "mphdd", "uzswsbb", "ntsybr", "qmnijyp", "pqwawe", "ltytill", "dpnxy", "pkxqcol", "ayrdi",
"mycnd", "knotsn", "zvcrjl", "qwroblg", "vtrktey", "dzilezi", "wzkxg", "varqc", "xlpttyc", "xxqhnl", "jpxywa",
"kjdsh", "hdseebw", "bxqbp", "flazqce", "xrtab", "rupsfq", "asswer", "rhqof", "hjzdv", "addsgax", "cuahzjj",
"xwdilr", "osqgg", "pfhwv", "rqorah", "ggdlnv", "truvaoj", "jzuldwf", "mjddj", "vixtn", "eslxoaj", "cmoypm",
"jvvzs", "oqgxcc", "tptls", "wwgwbj", "tysuhg", "xbnqb", "iogjvg", "fbxdmr", "zdvsmx", "hiuja", "watrt",
"kjawab", "entxk", "jmnkaox", "zznsox", "asmzc", "soblvp", "quyxjw", "udrdc", "hyylvvw", "gzfwxuv", "jjqmjw",
"faegxbl", "lqjcg", "bzmruq", "bykuh", "miwhd", "ykgtwhk", "oyobzwi", "oltwpua", "ctulabr", "dwandd", "vhuhox",
"vtlknw", "ywvln", "qemqdeg", "akezvx", "kjmjpv", "vwuftx", "kreaxnj", "fvfop", "cxabs", "jfacbje", "eecnz",
"cmblit", "gfvpoq", "whywnh", "pghvx", "ohgkmf", "xxtiwd", "nkojni", "dlcicnp", "bwyvyyd", "gifup", "vgjfr",
"hhteifi", "kjhffq", "pawqaxl", "yozro", "slxluvd", "amqcquy", "vnnxkr", "wgdur", "rvawiu", "thcwnc", "cddut",
"vnrtrv", "fnfio", "nhvxe", "rfdqmj", "ucblh", "ccbnt", "lxckaoy", "fnwcbx", "gmdbiwt", "ypvwjy", "cbjazk",
"qmujnm", "nsqot", "lhcqt", "ijxcts", "nujrms", "itxel", "ghukr", "qpwitlr", "gcafqrn", "lcoho", "lfzab",
"vwhgceb", "vgsgy", "jrtgo", "ryxlz", "deoyq", "ybenly", "lyysca", "sodvazo", "hbnnoz", "ovgvda", "elwtjx",
"soydmn", "trdsi", "mwwjwo", "vupwj", "dszpcv", "kkhjdj", "ewmyo", "nmpeq", "oepldcq", "xttrgu", "wbcbxi",
"jakzk", "peukyw", "fvcqv", "xklwuu", "hsmva", "kslmkq", "azllbig", "stnzih", "wfyud", "ihauy", "cfxmj",
"pdyogwv", "dcqdpa", "xhusy", "jfpmpmm", "odeiiw", "ozyaer", "uykzvma", "tuaznxj", "kdnbdki", "syrnsem",
"fdysz", "hhrpo", "fglzfi", "vgcqzqm", "qhsjr", "bvboe", "dpfwpvg", "mvvry", "itnnr", "lgykbe", "pscow",
"mkrgeqv", "czffv", "apteht", "jeqixsx", "ksmbe", "zamivv", "vvmyo", "cwwoce", "sppubxc", "qaich", "nmbxr",
"tfkwfxi", "iakhezl", "fxujis", "fkwffe", "antaylq", "mmfgstq", "zxaacy", "zlswx", "pbqxil", "eupck",
"qzcxpbe", "rjalbzr", "wioagbq", "kreec", "zsdcuft", "rrdzb", "ocdlvq", "oxiroo", "zcxsqh", "wbrsi", "fqike",
"oskzupi", "thvof", "dicbyst", "iojwe", "hyfizq", "yoknhww", "nupiyyn", "ievah", "slcgmxg", "cnecpa", "lcwsoj",
"hnqsc", "ghipbi", "exobr", "nwpnq", "dmhbj", "amdbmwl", "xfbzovs", "puizvu", "yvsus", "ykysqg", "bgqdv",
"zgqbr", "zkjpkej", "crkot", "zciymk", "tleogn", "sayrmz", "elwma", "zugjva", "uifwsmw", "wstrg", "xbotd",
"hinsg", "qpgyoyp", "xzfocdy", "mbvuepb", "dtphufk", "cyapnt", "yyehhad", "ohdrd", "mlibm", "qzdfil",
"rdwszqx", "bzcbmyn", "uarjlg", "mtwpqmx", "nmagl", "cepniel", "tylvaa", "melhd", "jygeneg", "fdglfy",
"xcpciu", "ayrel", "bxceshv", "kspyg", "iclkaz", "ykbzt", "nrnkzo", "kxkto", "fabzszn", "edalls", "nilmh",
"wwawgnn", "gymbtx", "mzipa", "ajevx", "qppisv", "otqhsf", "ippxak", "bixnqd", "uqitwo", "soxcug", "loiscd",
"wqrjk", "rqntoa", "fzpxlp", "tuaob", "pyqqms", "krbzmmj", "aijqpfg", "nstqrbu", "wmtiahz", "joplby", "jyszxq",
"jnxtyhe", "lbvfv"]) == 14011
|
8,514 | 62fc71e26ba3788513e5e52efc5f20453080837d | class Solution:
def projectionArea(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
res=0
for i in grid:
res+=max(i)
for j in i:
if j:
res+=1
for k in zip(*grid):
res+=max(k)
return res
|
8,515 | 324030a976af29dc93fdb637583bfaab93671cc2 | # Python 2.7 Doritobot Vision System
# EECS 498 Purple Team, 2014
# Written by Cody Hyman (hymanc@umich.edu)
# Written against OpenCV 3.0.0-alpha
import sys
import os
import cv2
import numpy as np
from uvcinterface import UVCInterface as uvc
from visionUtil import VisionUtil as vu
from collections import deque
from math import *
# Calibration state 'Enumeration'
class CalState(object):
UNCAL = 1
CAL_PROG = 2
CALIBRATED = 3
### Vision System Class ###
class VisionSystem(object):
# Window names
CAM_FEED_NAME = 'Camera Feed'
CAL_NAME = 'Calibrated Image'
PROC_NAME = 'Vision Processing'
CTL_NAME = 'Filter Controls'
# Constants
G_CENTER = 52
R_CENTER = 0
SMIN = 50
VMIN = 80
#HISTORY_LENGTH = 15
EMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0]
RAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32)
FIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel
def __init__(self, camera):
### Instance Value initialization ###
self.camera = camera
self.calstate = CalState.UNCAL
self.calpts = []
self.XSIZE = 1000
self.YSIZE = 1000
self.x_est = -1
self.y_est = -1
self.theta_est = -1
# Drawing storage
self.waypointEst = [(300,300)] # Waypoint estimates for UI
self.tagLoc = (10,10) # Tag location estimate
self.fVectorStart = (0,0)
self.fVectorEnd = (0,0)
#self.worldpts = np.float32([
# [0,self.YSIZE/2],
# [0,0],
# [self.XSIZE,0],
# [self.XSIZE,self.YSIZE/2]
# ])
# ===== ***** Calibration points from world *****===== #
'''self.worldpts = np.float32([
[-5, -1. * -105], #22
[90, -1. * -100], #27
[90, -1. * 110], #26
[0, -1. * 107] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
# Swap x-y coordinates (WTF!)
'''self.worldpts = np.float32([
[-105,-5], #22
[-100, 90], #27
[110, 90], #26
[107, 0] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
self.worldpts = np.float32([
[-104,-2], #22
[-104,85], #27
[115,84], #26
[115,3] #25
])
self.worldpts = vu.toImageCoordinates(self.worldpts)
testPts = vu.toWaypointCoordinates(self.worldpts)
print 'TestWorldPts', str(testPts)
# ===== *************** ===== #
### Camera initialization ###
print 'Opening Camera ' + str(camera)
self.vidcap = cv2.VideoCapture(camera)# Open up specified camera
# Check if camera is opened and exit if not
if self.vidcap.isOpened():
print 'Camera ' + str(camera) + ' opened successfully'
else:
print 'ERROR: Camera ' + str(camera) + ' not opened'
return False
# Set camera autoexposure
uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1)
uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0)
### Initialize UI elements ###
# Filter Controls Window
ctlWindow = cv2.namedWindow(self.CTL_NAME)
cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler)
cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler)
# Camera input window
camWindow = cv2.namedWindow(self.CAM_FEED_NAME)
cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged)
cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged)
cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged)
cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration
# Rectified/Calibrated Image window
#calWindow = cv2.namedWindow(self.CAL_NAME)
#cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler)
# Image processing Window 2
procWindow = cv2.namedWindow(self.PROC_NAME)
# History for filter bank
self.xHistory = deque(self.EMPTY_KERNEL)
self.yHistory = deque(self.EMPTY_KERNEL)
self.thetaHistory = deque(self.EMPTY_KERNEL)
# Run vision on a frame
def processFrame(self):
### Main processing loop ###
#while(True):
frameRet, self.camImg = self.vidcap.read()
#Img = self.drawCalMarkers()
cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())
if(self.calstate == CalState.CALIBRATED):
self.remapImage() # Apply perspective warp
bl = cv2.getTrackbarPos('Blue', self.CTL_NAME)
gr = cv2.getTrackbarPos('Green', self.CTL_NAME)
rd = cv2.getTrackbarPos('Red', self.CTL_NAME)
bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)
gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)
rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)
smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)
bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)
bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)
gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)
rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)
#vu.printCentroids(gCentroid, rCentroid)
if(bgroundFlag):
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)
else:
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)
ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)
if((ctr != None) and (theta != None)):
fctr, ftheta = self.filterPoints(ctr, theta)
self.x_est = ctr[0]
self.y_est = ctr[1]
# print 'Theta IN:', theta
self.theta_est = theta#ftheta
self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location
vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))
if(gCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))
if(rCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))
if(bCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))
wpIndex = 0
for wp in self.waypointEst:
wpIndex = wpIndex + 1
if(wpIndex == 1):
wpcolor = (0,0,255)
else:
wpcolor = (0,255,255)
vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #
vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index
if(self.tagLoc[0] != None):
vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))
#vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255))
#cv2.imshow(self.CAL_NAME, self.warpImg)
cv2.imshow(self.PROC_NAME, self.rgbImg)
#if cv2.waitKey(20) & 0xFF == ord('q'):
# break
# Use current perspective transform to remap image
def remapImage(self):
if(self.calstate == CalState.CALIBRATED):
self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE)))
self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1)
self.warpImg = cv2.medianBlur(self.warpImg, 5)
else:
print 'Transform not calibrated'
# Draws calibration markers on the camera image
def drawCalMarkers(self):
markedImg = self.camImg.copy()
for pt in self.calpts:
vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255))
return markedImg
# Finds a marker's central moment
def findMarker(self, image, hueCenter, hueWidth, satMin, valMin):
hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255]))
cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median
markerImg = cv2.dilate(markerImg, cleanElement)
markerImg = cv2.medianBlur(markerImg, 3)
mMoments = cv2.moments(markerImg) # Compute moments
m00 = mMoments['m00']
if(m00 > 0.1):
return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg
return None, markerImg
# FIR on centers and angles
def filterPoints(self, ctr, theta):
if((ctr != None) and (theta != None)):
if(len(self.xHistory) == len(self.FIR_KERNEL)):
self.xHistory.popleft()
if(len(self.yHistory) == len(self.FIR_KERNEL)):
self.yHistory.popleft()
if(len(self.thetaHistory) == len(self.FIR_KERNEL)):
self.thetaHistory.popleft()
self.xHistory.append(ctr[0])
self.yHistory.append(ctr[1])
self.thetaHistory.append(theta)
xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1)
yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1)
thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1)
#print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta
return (xFilter, yFilter), thetaFilter
# Interface to get current state estimates
def getState(self):
# Give estimated [x,y,theta]
if(self.tagLoc != None):
tx = self.tagLoc[0]
ty = self.tagLoc[1]
else:
tx = None
ty = None
return [self.x_est, self.y_est, self.theta_est, tx, ty]
### Event Handlers ###
# Camera input mouseclick handler
def mouseClickHandler(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
print 'Recalibration requested'
self.calstate = CalState.CAL_PROG
self.calpts = [] # Reset calibration points
if event == cv2.EVENT_LBUTTONDOWN:
print 'Mouse left click event at ' + str(x) + ',' + str(y)
if(self.calstate == CalState.UNCAL):
self.calstate = CalState.CAL_PROG
print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'
self.calpts.append([x,y])
elif(self.calstate == CalState.CAL_PROG):
if(len(self.calpts) < 4):
print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'
self.calpts.append([x,y])
# Finish
if(len(self.calpts) == 4):
print 'Calibrated'
self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts)
print str(self.calpts)
self.calstate = CalState.CALIBRATED
elif(self.calstate == CalState.CALIBRATED):
print 'Already calibrated'
# Color click handler for cal window
def colorClickHandler(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print 'Checking marker 1 color at ', str(x), ',', str(y)
pass # Get color at point
if event == cv2.EVENT_RBUTTONDOWN:
print 'Checking marker 2 color at ', str(x), ',', str(y)
pass # Get color at point
# Generic do-nothing slider handler (for )
def trackbarChangeHandler(self, x):
pass
# Gain slider handler
def gainChanged(self, gain):
uvc.set(self.camera, uvc.GAIN, gain)
# Saturation slider handler
def saturationChanged(self, sat):
uvc.set(self.camera, uvc.SATURATION, sat)
# Exposure slider handler
def exposureChanged(self, exp):
uvc.set(self.camera, uvc.EXPOSURE_ABS, exp)
# Sets the waypoint list for rendering on overlay
def setWaypoints(self, waypointEst):
self.waypointEst = vu.toImageCoordinates(waypointEst)
# Sets the estimated tag location for rendering on the overlay
def setTagLocation(self, tagEst):
self.tagLoc = (int(tagEst[0]),int(tagEst[1]))
# Stops the vision process
def stop(self):
self.vidcap.release()
cv2.release()
cv2.destroyAllWindows()
# Main function to run vision system as standalone
def main():
print 'Args:' , str(sys.argv)
for x in range(len(sys.argv)):
if(sys.argv[x] == '-c'):
ncam = int(sys.argv[x+1])
vs = VisionSystem(ncam)
self.vidcap.release()
cv2.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
8,516 | 472cdca501890d1d07c7363a48532ed3a184727c | """This is a collection of utilities for httpy and httpy applications.
"""
import cgi
import linecache
import mimetypes
import os
import stat
import sys
from Cookie import SimpleCookie
from StringIO import StringIO
from urllib import unquote
from httpy.Response import Response
def uri_to_fs(config, resource_uri_path, defaults=[], raw=False):
"""Map a requested URI to the filesystem.
Takes a TransactionConfig object, a URI path, and a list of filenames which
should be considered default resources.
The URI path is taken to be rooted literally in the filesystem root (which
could be a site root or an application root). If it points to a directory,
we look for a default resource if any are named. If it points to a file, we
make sure the file exists.
This method can raise the following Responses:
301 Moved Permanently
400 Bad Request
403 Forbidden
404 Not Found
If successful, we return the filesystem path to the particular resource.
"""
# Knit the requested URI onto the application root.
# =================================================
if config.app_fs_root == config.site_root:
_parts = resource_uri_path.lstrip('/').split('/')
else:
uri_below_app = resource_uri_path[len(config.app_uri_root):]
_parts = uri_below_app.lstrip('/').split('/')
_parts.insert(0, config.app_fs_root)
resource_fs_path = os.sep.join(_parts)
resource_fs_path = os.path.realpath(resource_fs_path)
if raw:
return resource_fs_path
if os.path.isdir(resource_fs_path):
# Process the request as a directory.
# ===================================
if not resource_uri_path.endswith('/'):
# redirect directory requests to trailing slash
new_location = '%s/' % resource_uri_path
response = Response(301)
response.headers['Location'] = new_location
log(98, "Redirecting to trailing slash: %s" % resource_uri_path)
raise response
log(98, "Looking for these defaults: %s" % str(defaults))
default = ''
for name in defaults:
_path = os.path.join(resource_fs_path, name)
if os.path.isfile(_path):
default = _path
break
resource_fs_path = default
if not default:
log(95, "No default resource in %s" % resource_uri_path)
raise Response(403)
else:
# Process the request as a file.
# ==============================
if not os.path.exists(resource_fs_path):
log(95, "Did not find %s at %s." % ( resource_uri_path
, resource_fs_path
))
raise Response(404)
return resource_fs_path
# Following are some parsers useful for dynamic applications.
#
# While httpy.Request keeps close to the HTTP layer, any dynamic application
# will need to comprehend application-specific information encoded in the
# Request. The functions below return representations of such information as
# objects from the standard library.
#
# function uses returns
# =========================================================
# parse_query uri['query'] cgi.FieldStorage
# parse_cookie message.get('Cookie') Cookie.SimpleCookie
# parse_post raw_body cgi.FieldStorage
#
#
# These functions are not used in httpy proper and are not unittested yet.
def parse_query(request):
"""Translate request's querystring into a cgi.FieldStorage.
"""
querystring = request.uri['query']
fp = StringIO(querystring)
headers = {}
headers['content-type'] = request.message.get('content-type')
headers['content-length'] = request.message.get('content-length')
environ = {}
environ['REQUEST_METHOD'] = request.method
boundary = request.message.get('boundary')
query = cgi.FieldStorage( fp = fp
, headers = headers
, outerboundary = boundary
, environ = environ
, keep_blank_values = True
, strict_parsing = False
)
return query
def parse_cookie(request):
"""Translate request's cookie into a Cookie.SimpleCookie.
"""
raw_cookie = request.message.get('Cookie','')
return SimpleCookie(raw_cookie)
def parse_post(request):
"""Translate request's body into a cgi.FieldStorage.
"""
fp = StringIO(request.raw_body)
headers = {}
headers['content-type'] = request.message.get('content-type')
headers['content-length'] = request.message.get('content-length')
environ = {}
environ['REQUEST_METHOD'] = request.method
boundary = request.message.get('boundary')
post = cgi.FieldStorage( fp = fp
, headers = headers
, outerboundary = boundary
, environ = environ
, keep_blank_values = True
, strict_parsing = False
)
return post
# Chad's logging util.
def log(verbosity, message):
if int(os.environ.get("HTTPY_VERBOSITY", 0)) >= verbosity:
print "%d %s" % (verbosity, message)
import sys; sys.stdout.flush()
# Steve's logging util.
from StringIO import StringIO
import new, threading
class dummy_outputer:
def __init__(self): pass
def write(self,*outputs): pass
def writeln(self,*outputs): pass
def __call__(self,*outputs): pass
def dump(self): pass
def pdump(self): pass
class outputer:
"""
This is an initial implementation of an outputer class that acts
like print but adds a couple of features:
1) verbosity
2) buffering
3) output to places other than stdout
Example usage:
>>> out = outputer(1)
>>> out.write('hey')
>>> out.v2('hey','you')
>>> out.v1('hey','you')
>>> out.pdump()
heyhey you
>>> out('ack')
>>> poo = out.dump()
>>> poo
'ack '
"""
def __init__(self,verbosity=0,vlevels=5,parentFirst=None,parentContents=None):
self.parentContents=parentContents
self.first=threading.Event()
self.parentFirst = parentFirst
self.contents=StringIO()
if not self.parentContents:
for i in range(vlevels-1):
v=i+1
if v<=verbosity:
v_outputer = outputer(parentFirst=self.first,parentContents=self.contents)
else:
v_outputer = dummy_outputer()
setattr(self,'v%s'%v,v_outputer)
def write(self,*outputs):
for output in outputs:
if self.parentContents:
self.parentContents.write(str(output))
self.contents.write(str(output))
def writeln(self,*outputs):
if not outputs:
outputs=['']
if not self.first.isSet():
self.first.set()
else:
self.contents.write('\n')
if self.parentContents:
if not self.parentFirst.isSet():
self.parentFirst.set()
else:
self.parentContents.write('\n')
for output in outputs:
self.write(output)
self.write(' ')
def __call__(self,*outputs):
self.writeln(*outputs)
def dump(self):
self.contents.flush()
self.contents.seek(0)
output = self.contents.read()
self.contents=StringIO()
self.first.clear()
return output
def pdump(self):
print self.dump()
|
8,517 | 05d6f15102be41937febeb63ed66a77d3b0a678e | import time
import itertools
import re
from pyspark import SparkContext, SparkConf
from pyspark.rdd import portable_hash
from datetime import datetime
APP_NAME = 'in-shuffle-secondary-sort-compute'
INPUT_FILE = '/data/Taxi_Trips.csv.xsmall'
OUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'
COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''')
FIRST_KEY = 1
SECOND_KEY = 2
TRIP_END_TIMESTAMP = 3
TIMESTAMP = int(time.time())
def partition_func(key):
return portable_hash(key[0])
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = (entry[FIRST_KEY], entry[SECOND_KEY])
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
def create_pair_rdd(ctx):
rawRDD = ctx.textFile(INPUT_FILE)
headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))
rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))
validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)
pairRDD = validRDD.map(make_pair)
compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[TRIP_END_TIMESTAMP]))
return compressedRDD
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp() \
- datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
if __name__ == "__main__":
conf = SparkConf()
ctx = SparkContext(master="local[*]", appName=APP_NAME, conf=conf)
ctx.setLogLevel('INFO')
rdd = create_pair_rdd(ctx)
sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=partition_func,
numPartitions=4,
keyfunc=key_func,
ascending=True)
unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)
groupedRDD = unpairedRDD.mapPartitions(sorted_group, preservesPartitioning=True)
lossRDD = groupedRDD.map(calculate_loss)
lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))
ctx.stop()
|
8,518 | e00b81f73f4f639e008fde1a6b2d4f7937df4207 | #!/usr/bin/env python
host, port = "localhost", 9999
import os
import sys
import signal
import socket
import time
import select
from SocketServer import TCPServer
from SocketServer import StreamRequestHandler
class TimeoutException(Exception):
pass
def read_command(rfile,wfile,prompt):
def timeout_handler(signum, frame):
raise TimeoutException()
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(1)
try:
if prompt:
wfile.write('\n> ')
c = rfile.readline()
except TimeoutException:
c = ''
finally:
signal.alarm(0)
return c.strip()
class Control (StreamRequestHandler):
allow_reuse_address = True
def handle(self):
command = 'go'
prompt = True
while command not in ['quit','exit']:
# reading the command on TCP
# relaying it to exabgp via the socket
command = read_command(self.rfile,self.wfile,prompt)
prompt = False
if command in ['quit','exit']:
continue
if command in ['help','?']:
self.wfile.write('exabgp tcp-control help\n')
self.wfile.write('\n')
self.wfile.write('This program is just a way to manually enter commands using telnet\n')
self.wfile.write('routes and flows syntax are parsed like normal configuration\n')
self.wfile.write('\n')
self.wfile.write('quit (close the telnet connection)\n')
self.wfile.write('exit (close the telnet connection)\n')
self.wfile.write('\n')
self.wfile.write('version (returns the version of exabgp)\n')
self.wfile.write('reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\n')
self.wfile.write('restart (reload the configuration and bounce all BGP session)\n')
self.wfile.write('shutdown (politely terminate all session and exit)\n')
self.wfile.write('\n')
self.wfile.write('WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\n')
self.wfile.write('WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\n')
self.wfile.write('\n')
self.wfile.write('The route will be sent to ALL the peers (there is no way to filter the announcement yet)\n')
self.wfile.write('\n')
self.wfile.write('annouce route\n')
self.wfile.write(' The multi-line syntax is currently not supported\n')
self.wfile.write(' example: announce route 1.2.3.4 next-hop 5.6.7.8\n')
self.wfile.write('withdraw route\n')
self.wfile.write(' example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\n')
self.wfile.write('announce flow\n')
self.wfile.write(' exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\n\n')
self.wfile.write(' example: announce flow route {\\n match {\\n source 10.0.0.1/32;\\n destination 1.2.3.4/32;\\n }\\n then {\\n discard;\\n }\\n }\\n\n')
self.wfile.write('withdraw flow\n')
self.wfile.write(' exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\n\n')
self.wfile.write(' example: withdraw flow route {\\n match {\\n source 10.0.0.1/32;\\n destination 1.2.3.4/32;\\n }\\n then {\\n discard;\\n }\\n }\\n\n')
self.wfile.write('\n')
self.wfile.write('SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\n')
self.wfile.write('AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\n')
self.wfile.write('\n')
self.wfile.write('show neighbors\n')
self.wfile.write(' display the neighbor configured\\n\n')
self.wfile.write('show routes\n')
self.wfile.write(' display routes which have been announced\\n\n')
self.wfile.write('\n')
self.wfile.flush()
prompt = True
elif command.startswith('announce '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('requested %s annoucement\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command.startswith('withdraw '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('request %s withdrawal\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command.startswith('neighbor '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('neighbor %s requested\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command.startswith('show '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('%s requested\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command in ['shutdown','reload','restart','version']:
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
prompt = True
elif command not in ['go','']:
self.wfile.write('unknown command [%s], try: help\n' % command)
self.wfile.flush()
prompt = True
try:
r,_,_ = select.select([sys.stdin], [], [], 1.0)
except select.error:
raise KeyboardInterrupt('SIGNAL received in select')
if r:
self.wfile.write('\n')
while r:
# Can not use readline with select.
# From http://stackoverflow.com/questions/5486717/python-select-doesnt-signal-all-input-from-pipe
# Note that internally file.readlines([size]) loops and invokes the read() syscall more than once, attempting to fill an internal buffer of size. The first call to read() will immediately return, since select() indicated the fd was readable. However the 2nd call will block until data is available, which defeats the purpose of using select. In any case it is tricky to use file.readlines([size]) in an asynchronous app.
response = os.read(sys.stdin.fileno(),4096)
# this should not happen as select informed us of data to read but it seems it does
if not response:
break
self.wfile.write(response)
prompt = True
time.sleep(0.1)
try:
r,_,_ = select.select([sys.stdin], [], [], 1.0)
except select.error:
raise KeyboardInterrupt('SIGNAL received in select')
continue
def timed (message):
now = time.strftime('%a, %d %b %Y %H:%M:%S',time.localtime())
return "%s | %-8s | %-6d | %-13s | %s" % (now,'FORKED',os.getpid(),'tcp-server',message)
def sig (signum, frame):
# outch rude but prevent silly trace on exit if waiting for a read on stdin :p
os.kill(os.getpid(),signal.SIGKILL)
signal.signal(signal.SIGINT, sig)
signal.signal(signal.SIGTERM, sig)
count = 0
connected = False
class Server (TCPServer):
def server_activate (self):
print >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,port))
sys.stderr.flush()
TCPServer.server_activate(self)
while not connected:
try:
server = Server((host, port), Control)
connected = True
except socket.error:
count += 1
if count % 1 == 0:
print >> sys.stderr, timed('tcp-server still trying to bind to %s:%d' % (host,port))
# we can not connect to the socket, retrying (happens if respawns too quickly)
time.sleep(1)
server.serve_forever()
|
8,519 | 8921c0a17e90f7113d1e0be630a15fc9d74d1780 | from web3.auto.infura import w3
import json
import os
with open("contract_abi.json") as f:
info_json = json.load(f)
abi = info_json
mycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)
myfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)
#myfilter.fromBlock = "16181508"
#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})
print(abi)
print (myfilter) |
8,520 | db33f7386d1eacbfbfd29aa367df310c557ae864 | km=float(input())
cg=float(input())
print(round(km/cg,3),"km/l") |
8,521 | 329451a3d3fa95f5572dc1701d1adbf4aaa72628 | import argparse
from ags_save_parser import saved_game
def report_mismatch(compare_result_list):
report = []
for i in range(len(compare_result_list)):
value = compare_result_list[i]
if value != '_':
report.append((i, value))
return report
def report_mismatch_for_module(
modules_1,
modules_2,
index):
module_1 = modules_1[index]
module_2 = modules_2[index]
if len(module_1) != 2 or len(module_2) != 2:
raise AssertionError(
"Module {}, value {}, has length {}".format(
index,
module_1,
len(module_1)))
print('Module {}\n'.format(index))
print(report_mismatch(modules_1[index][1]))
print(report_mismatch(modules_2[index][1]))
print()
def report_mismatch_all_modules(result):
scripts_1 = result[0]['scripts']
scripts_2 = result[1]['scripts']
modules_1 = scripts_1['modules']
modules_2 = scripts_2['modules']
print('Global Data:\n')
print(report_mismatch(scripts_1['global_data']))
print(report_mismatch(scripts_2['global_data']))
print()
for i in range(len(scripts_1['modules'])):
try:
report_mismatch_for_module(
modules_1,
modules_2,
i)
except AssertionError:
print('Module {} was a match'.format(i))
def report_honor(save_game_1, save_game_2):
print("Honor is 1 is {}".format(
save_game_1['scripts']['modules'][1][1][3076]))
print("Honor is 2 is {}".format(
save_game_2['scripts']['modules'][1][1][3076]))
def main():
parser = argparse.ArgumentParser(description='Parse Save File')
parser.add_argument(
'--file1',
dest='file1',
default='/home/krieghan/hq_saves/agssave.000.hqthor')
parser.add_argument(
'--file2',
dest='file2',
default='/home/krieghan/hq_saves/agssave.001.hqthor')
parser.add_argument(
'--full',
dest='full',
action='store_true',
default=False)
parser.add_argument(
'--catch-transition',
default=None)
parser.add_argument(
'--honor',
action='store_true',
default=False)
args = parser.parse_args()
save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)
save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)
from kobold import compare
result = compare.compare(save_game_1, save_game_2, type_compare='full')
modules_1 = result[0]['scripts']['modules']
modules_2 = result[1]['scripts']['modules']
stat_module_1 = modules_1[1][1]
stat_module_2 = modules_2[1][1]
if args.honor:
report_honor(save_game_1, save_game_2)
elif args.catch_transition is not None:
pass
elif args.full:
report_mismatch_all_modules(result)
else:
report_mismatch_for_module(1)
if __name__ == '__main__':
main()
|
8,522 | 89ffb2da456d2edf15fde8adc01615a277c6caa1 | import numpy as np
import matplotlib.pyplot as plt
##########################################
# line plot
#########################################
# x축 생략시 x축은 0, 1, 2, 3이 됨
"""
plt.plot([1, 4, 9, 16])
plt.show()
"""
# x축과 y축 지정
"""
plt.plot([10, 20, 30, 40], [1, 4, 9, 16])
plt.show()
"""
# 스타일지정
# 색깔, 마커, 선 순서로 지정함
# 색깔 : blue(b), green(g), red(r), cyan(c), magenta(m), yellow(y), block(k), white(w)
# 마커 : point(.), pixel(,), circle(o), triangle_down(v), triangle_up(^),
# triangle_left(<), triangle_right(>), tri_down(1), tri_up(2), tri_left(3),
# tri_right(4), square(s), pentagon(p), star(*), hexagon1(h),
# hexagon2(H), plus(+), x marker(x), diamond(D), thin_diamond(d)
# 선 : solid line(-), dashed line(--), dash-dot line(-.), dotted(:)
"""
plt.plot([1,4,9,16], 'bs:')
plt.show()
"""
# 기타스타일
# http://matplotlib.org/1.5.1/api/lines_api.html#matplotlib.lines.Line2D 참고
# color(c) : 선색깔
# linewidth(lw) : 선굵기
# linestyle(ls) : 선스타일
# marker : 마커종류
# markersize(ms) : 마커크기
# markeredgecolor(mec) : 마커 선 색깔
# markeredgewidth(mew) : 마커 선 굵기
# markerfacecolor(mfc) : 마커 내부 색깔
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.show()
"""
# 그림 범위지정
# xlim, ylim에서 최소, 최대값 지정
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.xlim(-10, 10)
plt.ylim(-10, 30)
plt.show()
"""
# 틱 설정
# 틱 : 플롯이나 차트에서 축상의 위치 표시 지점
# 틱라벨 : 틱 위에 써진 숫자 혹은 글자
# xticks, yticks로 틱라벨 지정
# 틱 라벨 문자열에는 $$사이에 LaTeX 수학 문자식 넣을수 있다
"""
X = np.linspace(-np.pi, np.pi, 256)
C = np.cos(X)
plt.plot(X, C)
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, +1])
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\pi$', r'$-\pi/2$',
'0', r'$+\pi/2$', r'$+\pi$'])
plt.yticks([-1, 0, +1], ["Low", "Zero", "High"])
plt.grid(False) # grid없애기
plt.show()
"""
# 여러개 선 그리기
# x, y, 스타일을 여러개 지정하면 됨
"""
t = np.arange(0., 5., 0.2)
plt.plot(t, t, 'r--', t, 0.5*t**2, 'bs:', t, 0.2*t**3, 'g^-')
plt.show()
"""
# 하나의 그림에 복수의 plot명령 적용 : 홀드
# hold(True) : 겹치기 시작
# hold(False) : 겹치기 종료
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.hold(True)
plt.plot([9,16,4,1], c="k", lw=3, ls=":", marker="s", ms=10, mec="m", mew=5,
mfc="c")
plt.hold(False)
plt.show()
"""
# 범례
# legent명령으로 범례 추가
# loc인수로 범례의 위치 지정
# loc : best(0), upper right(1), upper left(2), lower left(3),
# lower right(4), right(5), center left(6), center right(7)
# lower center(8), upper center(9), center(10)
"""
X = np.linspace(-np.pi, np.pi, 256)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, label="cosine")
plt.hold(True)
plt.plot(X, S, label="sine")
plt.legend(loc=5)
plt.show()
"""
# x축, y축 라벨, 타이틀
# xlabel, ylabel, title로 지정
"""
X = np.linspace(-np.pi, np.pi, 256)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, label="cosine")
plt.xlabel("time")
plt.ylabel("amplitude")
plt.title("Cosine Plot")
plt.show()
"""
# 부가설명
# annotate명령을 사용하여 그림내에 화살표를 포함한 부가 설명 넣을수 있음
"""
X = np.linspace(-np.pi, np.pi, 256)
S = np.sin(X)
plt.plot(X, S, label="sine")
plt.scatter([0], [0], color="r", linewidth=10)
plt.annotate(r'$(0,0)$', xy=(0, 0), xycoords='data', xytext=(-50, 50),
textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", linewidth=3, color="g"))
plt.show()
"""
# Figure [ Axes [ Axis] ] 의 구조이다
# Figure : 여러개의 윈도우를 띄우거나, 그림의 크기 지정시 사용
# plot사용시 자동으로 Figure를 생성하므로 명시적으로 생성할 필요는
# 없음
# figure객체를 얻으려면 gcf 명령 사용
"""
f1 = plt.figure(figsize=(100,2))
plt.plot(np.random.randn(100))
plt.show()
"""
"""
f1 = plt.figure(1)
plt.plot([1,2,3,4], 'ro:')
f2= plt.gcf()
print(f1, id(f1))
print(f2, id(f2))
plt.show()
"""
# Axes와 Subplot
# 하나의 윈도우(Figure)안에 여러개의 플롯을 배열하는 경우 각각의 플롯은
# Axes라고 불리는 객체에 속함
# subplot 명령으로 Axes객체를 생성, plot명령 사용시 자동으로 Axes를 생성함
# subplot은 그리드 형태의 Axes객체들을 생성
# Figure가 행렬(matrix)이고 Axes가 행렬의 원소라고 생각하면 됨.
# 위와 아래 두개의 플롯이 있는 경우 2X1행렬
# subplot은 3개의 인수를 가지고 처음 2개가 행렬 정의, 세번째가 위치 지정
"""
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
ax1 = plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'yo-')
plt.title('A tale of 2 subplots')
plt.ylabel('Dampled oscillation')
print(ax1)
ax2 = plt.subplot(2, 1, 2)
plt.plot(x2, y2, 'r.-')
plt.xlabel('time (s)')
plt.ylabel('Undamped')
print(ax2)
plt.show()
"""
# subplot의 인수는 (2,2,1)를 줄여서 221로 표시 가능
"""
plt.subplot(221); plt.plot([1,2]); plt.title(1)
plt.subplot(222); plt.plot([1,2]); plt.title(2)
plt.subplot(223); plt.plot([1,2]); plt.title(3)
plt.subplot(224); plt.plot([1,2]); plt.title(4)
plt.tight_layout()
plt.show()
"""
# xkcd 스타일
X = np.linspace(-3, 3, 4096)
C = np.cos(X)
with plt.xkcd():
plt.title('XKCD style plot!!!')
plt.plot(X, C, label="cosine")
t = 2 * np.pi / 3
plt.scatter(t, np.cos(t), 50, color='blue')
plt.annotate(r'0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-90,
-50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", linewidth=3, color="g"))
plt.show()
|
8,523 | 947055d1d6acc50e1722d79ea30e327414cd9c41 | N, D = map(int, input().split())
ans = 0
D2 = D*D
for i in range(N):
x, y = map(int, input().split())
if (x*x+y*y) <= D2:
ans += 1
print(ans)
|
8,524 | 719a993e1f5c5d1e803b04a5561373f2b9a5a5c2 | def get_perms(string):
toRtn = []
freq_table = count_letters(string)
get_perms_helper(freq_table, "", len(string), toRtn)
return toRtn
def count_letters(string):
freq = {}
for letter in string:
if letter not in freq:
freq[letter] = 0
freq[letter] += 1
return freq
def get_perms_helper(freq_table, prefix, remaining, result):
if remaining == 0:
result.append(prefix)
return
for letter in freq_table:
count = freq_table[letter]
if count > 0:
freq_table[letter] -= 1
get_perms_helper(freq_table, prefix + letter, remaining - 1, result)
freq_table[letter] = count
print get_perms("aaab") |
8,525 | 2a9426653146603d9aa79a59ce181d97aa3c551c | import sys
input = sys.stdin.readline
N = int(input())
A, B, C, D = [], [], [], []
for i in range(N):
a, b, c, d = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
AB = []
CD = []
for i in range(N):
for j in range(N):
AB.append(A[i] + B[j])
CD.append(C[i] + D[j])
AB.sort()
CD.sort()
answer = 0
left, right = 0, len(CD) - 1
while left < len(AB) and right >= 0:
total = AB[left] + CD[right]
if total == 0:
left_count, right_count = 1, 1
left_tmp = left
left += 1
while left < len(AB) and AB[left] + CD[right] == 0:
left_count += 1
left += 1
right -= 1
while right >= 0 and AB[left_tmp] + CD[right] == 0:
right_count += 1
right -= 1
answer += (left_count * right_count)
elif total > 0:
right -= 1
else:
left += 1
print(answer) |
8,526 | 324081eb4e133f6d16e716f3119e4cbc5e045ede | import pytorch_lightning as pl
from matplotlib import pyplot as plt
class Model(pl.LightningModule):
def __init__(self, net):
super(Model, self).__init__()
self.net = net
self.save_hyperparameters()
self.criterion = None
self.optimizer = None
self.batch_loss_collector = []
self.train_losses = []
self.valid_losses = []
def init_training_parameters(self, criterion, optimizer):
self.criterion = criterion
self.optimizer = optimizer
def set_criterion(self, criterion):
self.criterion = criterion
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
return self.optimizer
def on_train_epoch_start(self) -> None:
self.batch_loss_collector = []
def training_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('train_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def on_train_epoch_end(self, outputs) -> None:
self.train_losses.append(sum(self.batch_loss_collector)/len(self.batch_loss_collector))
def on_validation_epoch_start(self) -> None:
self.batch_loss_collector = []
def validation_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('val_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def validation_epoch_end(self, outputs) -> None:
self.valid_losses.append(sum(self.batch_loss_collector)/len(self.batch_loss_collector))
def plot_losses(self):
plt.figure()
plt.plot(range(len(self.train_losses)), self.train_losses, color='red', label='Training error')
plt.plot(range(len(self.valid_losses)), self.valid_losses, color='blue', label='Validation error')
plt.xlabel('Epoch')
plt.ylabel('Losses')
plt.ylim(0)
plt.legend()
plt.show()
|
8,527 | 7beb9d9e24f4c9a4e1a486048371da79c35d0927 | """
exercise 9-7-9-2
"""
fname = raw_input("Enter file name: ")
filehandle = open(fname)
d = dict()
for line in filehandle:
newline = line.split()
if newline != [] and newline[0] == 'From':
day = newline[2]
if day not in d:
d[day] = 1
else:
d[day] += 1
print d
|
8,528 | e83a9a4675e5beed938860037658d33c4d347b29 | class TestContext:
def test_should_get_variable_from_env(self, monkeypatch, fake_context):
expected = "test"
monkeypatch.setenv("SOURCE_PATH", expected)
actual = fake_context.get("SOURCE_PATH")
assert actual == expected
def test_should_get_variable_from_local_state(self, fake_context):
expected = "test"
fake_context.set({"SOURCE_PATH": expected})
actual = fake_context.get("SOURCE_PATH")
assert actual == expected
def test_should_set_variable_to_local_state(self, fake_context):
expected = "test"
fake_context.set({"test": expected})
actual = fake_context.get("test")
assert actual == expected
|
8,529 | fd0db093b72dad4657d71788405fcca4ba55daff | __doc__ = """
Dataset Module Utilities - mostly for handling files and datasets
"""
import glob
import os
import random
from meshparty import mesh_io
# Datasets -----------------------
SVEN_BASE = "seungmount/research/svenmd"
NICK_BASE = "seungmount/research/Nick/"
BOTH_BASE = "seungmount/research/nick_and_sven"
DATASET_DIRS = {
"orig_full_cells": [f"{SVEN_BASE}/pointnet_axoness_gt_180223/"],
"soma_vs_rest": [f"{SVEN_BASE}/pointnet_soma_masked_180401"],
"orphans": [f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/",
f"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/"],
"orphans2": [f"{NICK_BASE}/pointnet/orphan_dataset/train_val_axons",
f"{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/"],
"orphan_axons": [f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/"],
"orphan_axons_refined": [(f"{SVEN_BASE}"
"/pointnet_orphan_axons_gt_180308_refined/")],
"pinky100_orphan_dends": [(f"{BOTH_BASE}/data/180920_orphan_dends/")],
"orphan_axons_pinky100": [(f"{SVEN_BASE}/InhAnalysis/meshes_put_axon/")],
"fish_refined": [f"{SVEN_BASE}/180831_meshes_ashwin_refined/"],
"full_cells_unrefined": [(f"{SVEN_BASE}"
"/pointnet_full_semantic_labels"
"_masked_180401")],
"full_cells_refined": [(f"{SVEN_BASE}"
"/pointnet_full_semantic_labels"
"_masked_180401_refined/")],
"pinky100_orphan_dend_features": [(f"{BOTH_BASE}"
"/nick_archive/p100_dend_outer"
"/inference/proj32/")],
"pinky100_orphan_dend_features_32": [(f"{BOTH_BASE}"
"/nick_archive/p100_dend_outer_32"
"/inference/")],
"default": [f"{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/",
f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/",
f"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/"]
}
# --------------------------------
def fetch_dset_dirs(dset_name=None):
"""
Finds the global pathname to a list of directories which represent a
dataset by name.
"""
assert (dset_name is None) or (dset_name in DATASET_DIRS), "invalid name"
dset_name = "default" if dset_name is None else dset_name
home = os.path.expanduser("~")
return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])
def files_from_dir(dirname, exts=["obj", "h5"]):
"""
Searches a directory for a set of extensions and returns the files
matching those extensions, sorted by basename
"""
filenames = list()
for ext in exts:
ext_expr = os.path.join(dirname, f"*.{ext}")
filenames.extend(glob.glob(ext_expr))
return sorted(filenames, key=os.path.basename)
def split_files(filenames, train_split=0.8,
val_split=0.1, test_split=0.1, seed=None):
if seed is not None:
random.seed(seed)
# Normalizing splits for arbitrary values
total = train_split + val_split + test_split
train_split = train_split / total
val_split = val_split / total
test_split = test_split / total
n_train = round(train_split * len(filenames))
n_val = round(val_split * len(filenames))
permutation = random.sample(filenames, len(filenames))
train_files = permutation[:n_train]
val_files = permutation[n_train:(n_train+n_val)]
test_files = permutation[(n_train+n_val):]
return train_files, val_files, test_files
# Helper functions for testing (e.g. sample.py)
def pull_n_samples(dset, n):
"""Pulls n random samples from a dataset object"""
return list(dset[i] for i in random.sample(range(len(dset)), n))
def save_samples(samples, output_prefix="sample"):
"""Saves a list of samples to ply files (with h5 labels)"""
for (i, vertices) in enumerate(samples):
vertex_fname = "{pref}{i}_vertices.ply".format(pref=output_prefix, i=i)
if os.path.dirname(vertex_fname) == "":
vertex_fname = "./" + vertex_fname
mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
|
8,530 | 47b40e4311f76cd620b7c6ed6b39216d866fa857 | import requests
import json
import hashlib
import os
def pull_from_solr(output_directory):
solr_url = 'http://54.191.81.42:8888/solr/collection1/select?q=*%3A*&wt=json&indent=true'
# TODO: ask about auth for this
req = requests.get(solr_url)
if req.status_code != 200:
raise
new_data = req.json()
for doc in new_data['response']['docs']:
doc_url = doc['url']
doc_sha = hashlib.sha224(doc_url).hexdigest()
doc.update({"sha": doc_sha})
with open(os.path.join(output_directory, '%s.json' % doc_sha), 'w') as f:
f.write(json.dumps(doc, indent=4))
|
8,531 | 1da93e9113089f1a2881d4094180ba524d0d4a86 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Frame filtering
'''
import numpy as np
import cv2
def filter_frames(frames, method=cv2.HISTCMP_CORREL, target_size=(64, 64), threshold=0.65):
"""Filter noisy frames out
Args:
frames (list<numpy.ndarray[H, W, 3]>): video frames
method (int, optional): histogram comparison method
target_size (tuple<int, int>, optional): frame size used for histogram comparison
threshold (float, optional): minimum correlation between histograms to keep frame
Returns:
list<numpy.ndarray[H, W, 3]>: video frames
"""
resized_frames = [cv2.resize(f.copy(), target_size) for f in frames]
histograms = []
for f in resized_frames:
hist = cv2.calcHist([f], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
histograms.append(cv2.normalize(hist, hist).flatten())
# Find a reference histogram (median less sensitive to noise)
med_hist = np.median(histograms, axis=0)
filtered_frames = []
# Compare all histograms to the median one
for idx, hist in enumerate(histograms):
# Only keep frames with relatively high correlation
if cv2.compareHist(med_hist, hist, method) > threshold:
filtered_frames.append(frames[idx])
return filtered_frames
|
8,532 | 5c12ff4f88af991fa275cd08adf3678ee4a678f3 | #!/usr/bin/python
#===============================================================================
#
# Board Data File Analyzer
#
# Copyright (c) 2017 by QUALCOMM Atheros, Incorporated.
# All Rights Reserved
# QUALCOMM Atheros Confidential and Proprietary
#
# Notifications and licenses are retained for attribution purposes only
#===============================================================================
#--------------
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
from array import array
import numpy as np
Description = """
[Description]:
Read WLAN board data file and generate graph per chain.
1 2 3 4
fullmeas_pwr_0_G_0_0
fullmeas_pwr_0_A_0_0
1. Index/Step: a iteration takes 10 steps
2. Band: 'G' is 2.4G and 'A' is 5G.
3. Channel: 14 channels for 2.4G and 32 channels for 5G.
4. Chain: Either chain0 or chain1.
[Input]:
BIN/wlan_proc/wlan/halphy_tools/host/bdfUtil/qca61x0/bdf
[Usage]:
BDFAnalyzer.py input.txt
"""
fullpdadc_val_list = [] # y-axis
fullpwr_val_list = [] # x-axis
fullpwr_tag_list = []
win = pg.GraphicsWindow(title="Chain Analyzer: chain 0 (RED) chain 1 (GREEN)")
win.resize(1000,600)
def backup_calibration(fin):
for index in range(len(fullpwr_tag_list)):
fin.write(fullpwr_tag_list[index])
fin.write(" ")
fin.write(fullpwr_val_list[index])
fin.write(",")
fin.write(fullpdadc_val_list[index])
fin.write("\n")
def plot_render(band, channel):
index_lower = 0
index_upper = 0
X = []
Y = []
if band == "G": # 2.4G
index_lower = channel * 20
index_upper = (channel+1) * 20
elif band == "A": # 5G
index_lower = 280 + channel * 20
index_upper = 280 + (channel+1) * 20
else:
print "Plot render error\n"
for i in range(index_lower, index_upper):
X.append(int(fullpwr_val_list[i], 10))
Y.append(int(fullpdadc_val_list[i], 10))
title_description = "Channel " + str(channel)
pp = win.addPlot(title = title_description)
pp.plot(X[0:10],Y[0:10], title="Chain 0", pen=(255,0,0)) # chain 0 as red line
pp.plot(X[10:20],Y[10:20], title="Chain 1", pen=(0,255,0)) # chain 1 as green line
pp.showGrid(x=True, y=True)
def main():
global fullpwr_tag_list, fullpwr_val_list, fullpdadc_val_list
clpc = open("files/calibration.txt","w")
bdf = open("files/bdwlan30.txt",'r')
# read data
for line in bdf:
if "fullpdadc" in line:
tmp = line.split()
fullpdadc_val_list.append(tmp[1])
if "fullmeas_pwr" in line:
tmp = line.split()
fullpwr_tag_list.append(tmp[0])
fullpwr_val_list.append(tmp[1])
# write calibration backup file
backup_calibration(clpc)
bdf.close()
clpc.close()
# draw plot
plot_render('A', 7)
plot_render('A', 8)
win.nextRow()
plot_render('A', 9)
plot_render('A', 10)
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.exec_()
main()
|
8,533 | 315fe68f4adf39ded46fa9ad059fd2e962e46437 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import shuffle
import math
import vis_utils
class FLAGS(object):
image_height = 100
image_width = 100
image_channel = 1
CORRECT_ORIENTATION = True
class PrepareData():
def __init__(self):
return
def sparse_tuple_from_label(self, sequences, dtype=np.int32):
"""Create a sparse representention of x.
Args:
sequences: a list of lists of type dtype where each element is a sequence
Returns:
A tuple with (indices, values, shape)
"""
indices = []
values = []
for n, seq in enumerate(sequences):
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)
return indices, values, shape
def preprocess_samples(self, samples):
batch_inputs = []
batch_labels = []
for sample in samples:
im,label = sample[:FLAGS.image_height * FLAGS.image_width], sample[FLAGS.image_height * FLAGS.image_width:]
label = label.astype(np.int32).tolist()
im = np.reshape(im, [FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel])
batch_inputs.append(im)
batch_labels.append(label)
res = [batch_inputs]
if self.prepare_get_sparselabel:
res.append(self.sparse_tuple_from_label(batch_labels))
if self.parepare_get_denselabel:
res.append(batch_labels)
return res
def __generator(self, samples, batch_size,is_training=True):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
if is_training:
#during traning, shuffle the whole samples at the beginningof the epoch
samples = shuffle(samples)
for offset in range(0, num_samples, batch_size):
if is_training and (offset+batch_size > num_samples -1 ):
# this is to make sure all the batch are of same sizes during training
continue
batch_samples = samples[offset:offset+batch_size]
yield self.preprocess_samples(batch_samples)
def get_samples(self, split_name):
mnist_sequence = "./data/mnist_sequence3_sample_8distortions_9x9.npz"
data = np.load(mnist_sequence)
x_train, y_train = data['X_train'].reshape((-1, FLAGS.image_height * FLAGS.image_width)), data['y_train']
x_valid, y_valid = data['X_valid'].reshape((-1, FLAGS.image_height * FLAGS.image_width)), data['y_valid']
x_test, y_test = data['X_test'].reshape((-1, FLAGS.image_height * FLAGS.image_width)), data['y_test']
if split_name == "train":
res = np.concatenate([x_train, y_train], axis=1)
elif split_name == "sample_test":
res = np.concatenate([x_train[:100], y_train[:100]], axis=1)
elif split_name == "eval":
res = np.concatenate([x_valid, y_valid], axis=1)
else:
res = np.concatenate([x_test, y_test], axis=1)
return res
def input_batch_generator(self, split_name, is_training=False, batch_size=32, get_filenames = False, get_sparselabel = True, get_denselabel = True):
samples = self.get_samples(split_name)
self.prepare_get_filenames = get_filenames
self.prepare_get_sparselabel = get_sparselabel
self.parepare_get_denselabel = get_denselabel
gen = self.__generator(samples, batch_size, is_training=is_training)
return gen, len(samples)
def run(self):
batch_size = 32
split_name = 'sample_test'
# split_name = 'train'
# split_name = 'eval'
generator, dataset_size = self.input_batch_generator(split_name, is_training=True, batch_size=batch_size, get_filenames=True,get_sparselabel = True)
num_batches_per_epoch = int(math.ceil(dataset_size / float(batch_size)))
for _ in range(num_batches_per_epoch):
batch_inputs, batch_labels_sparse, batch_labels = next(generator)
batch_inputs = np.array(batch_inputs)
print(batch_labels)
print("batch_size={}".format(len(batch_labels)))
vis = True
if vis:
grid = vis_utils.visualize_grid(batch_inputs[:4])
grid = np.squeeze(grid)
plt.imshow(grid, cmap='gray')
plt.show()
break
return
if __name__ == "__main__":
obj= PrepareData()
obj.run() |
8,534 | c931d1ac5c2d003a8eaac3c6d777ce408df57117 | '''
Autor: Jazielinho
'''
import keyboard
from PIL import ImageGrab
import os
import tqdm
import random
from training import config_tr
class DataSet(object):
''' clase que crea dataset de entrenamiento '''
saltar = 'saltar'
nada = 'nada'
reglas = [saltar, nada]
formato = 'PNG'
train = 'train'
val = 'val'
def __init__(self, val_split: int = 0.2) -> None:
self.imagenes = []
self.targets = []
self.nombre_maximo = 0
nombres_maximos = []
for regla in DataSet.reglas:
if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla):
os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla)
if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla):
os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)
lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla) + \
os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)
if len(lista_imagenes) == 0:
nombre_maximo = [0]
else:
maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for x in lista_imagenes]
nombre_maximo = maximo_nombre
nombres_maximos = nombres_maximos + nombre_maximo
self.nombre_maximo = max(nombres_maximos)
self.val_split = val_split
def genera_datos(self) -> None:
imagenes = []
targets = []
# Empieza a funcionar desde presionar espacio
while True:
if keyboard.is_pressed('space'):
break
while True:
# Las imagenes estan en blanco y negro
imagen = ImageGrab.grab()
imagenes.append(imagen)
if keyboard.is_pressed('escape'):
break
if keyboard.is_pressed('space') or keyboard.is_pressed('up'):
targets.append(DataSet.saltar)
else:
targets.append(DataSet.nada)
self.imagenes = imagenes
self.targets = targets
self.guardar_info()
def guardar_info(self) -> None:
''' guardamos las imagenes '''
for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets), total=len(self.imagenes)):
self.nombre_maximo += 1
random_ = random.random()
if random_ <= 1 - self.val_split:
image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato
else:
image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato
imagen.save(image_PATH, DataSet.formato)
if __name__ == '__main__':
self = DataSet()
self.genera_datos()
|
8,535 | 0b2bc19aea9393562f79df026bc17513e25c6604 | __author__ = 'Chitrang'
from google.appengine.api import memcache
from google.appengine.ext import db
import logging
import os
import jinja2
class User(db.Model):
id = db.StringProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
name = db.StringProperty(required=True)
profile_url = db.StringProperty(required=True)
access_token = db.StringProperty(required=True)
email = db.StringProperty(required=False)
penn_id = db.StringProperty(required=False)
email_verified = db.BooleanProperty(required=True)
verification_code = db.StringProperty(required=True)
#posts
chair = db.StringProperty(required=False)
vicechair = db.StringProperty(required=False)
treasurer = db.StringProperty(required=False)
socialchair = db.StringProperty(required=False)
operationschair = db.StringProperty(required=False)
gapsaliason = db.StringProperty(required=False)
communicationschair = db.StringProperty(required=False)
webadmin = db.StringProperty(required=False)
marketingchair = db.StringProperty(required=False)
#counts
chair_count = db.IntegerProperty(required=False, default=0)
vicechair_count = db.IntegerProperty(required=False, default=0)
treasurer_count = db.IntegerProperty(required=False, default=0)
socialchair_count = db.IntegerProperty(required=False, default=0)
operationschair_count = db.IntegerProperty(required=False, default=0)
gapsaliason_count = db.IntegerProperty(required=False, default=0)
communicationschair_count = db.IntegerProperty(required=False, default=0)
webadmin_count = db.IntegerProperty(required=False, default=0)
marketingchair_count = db.IntegerProperty(required=False, default=0)
@classmethod
def all_data(cls):
all_data = db.GqlQuery("SELECT *"
"FROM User")
return list(all_data)
#logging.info("updating cache")
#memcache.set('users', list(all_data))
@classmethod
def set_email(cls, id, email):
user = User.get_by_key_name(id)
penn_id = email.split("@")[0]
user.email = email
user.penn_id = penn_id
user.put()
#User.update_cache()
@classmethod
def is_email_verified(cls, email):
data = User.all_data()
if data is not None:
all_emails = {user.email : user.email_verified for user in data}
logging.info("all email information "+ str(all_emails))
return all_emails.get(email, False)
@classmethod
def is_pennid_verified(cls, email):
penn_id = email.split("@")[0]
all_data = User.all_data()
if all_data is not None:
all_penn_ids = {user.penn_id: user.email_verified for user in all_data}
logging.info("all penn id information" + str(all_penn_ids))
return all_penn_ids.get(penn_id, False)
class Answer(db.Model):
answer = db.TextProperty(required=True)
answered_by = db.StringProperty(required=True)
answerer_name = db.StringProperty(required=True)
upvoted_by = db.ListProperty(str)
def get_votes(self):
return len(self.upvoted_by)
def get_upvote_link(self):
return "/q/question/upvote/%s"%self.key().id()
def render_str(template, **params):
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_environment = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
t = jinja_environment.get_template(template)
return t.render(params)
class Question(db.Model):
question = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
last_modified = db.DateTimeProperty(auto_now = True)
answers = db.ListProperty(item_type=db.Key,required=True)
asked_by = db.StringProperty(required=True)
asker_name = db.StringProperty(required=True)
def render(self):
self._render_text = self.question.replace('\n', '<br>')
return render_str("question.html", q = self)
def as_dict(self):
time_fmt = '%c'
d = {'question': self.question,
'created': self.created.strftime(time_fmt),
'last_modified': self.last_modified.strftime(time_fmt)}
return d
def link(self):
qid = self.key().id()
href_link = "/q/question/%s"%str(qid)
return href_link
|
8,536 | 89499ea8dd02d5e1b2ff635ab5203a65ceee4276 | import os
import codecs
import json
#~ from lxml import etree
import lxml.html
target = "test/index.html"
url = "http://de.wikipedia.org/wiki/Liste_von_Bergen_in_der_Schweiz"
command = "wget %s -O %s" % (url, target)
#~ os.popen(command)
f = open(target)
html = lxml.html.fromstring(f.read())
f.close()
tables = html.xpath("//table")
table = tables[2]
rows = table.xpath("//tr/th")
#~ row = rows[2]
#~ ths = row.xpath("th")
#~ print len(rows)
for cell in rows[:8]:
text = cell.xpath("string()").replace("(km)","").replace("(m)","")
text = text.strip()
print text
#~ f = codecs.open("out.html","w", encoding="utf-8")
f_out = codecs.open("out.json","w", encoding="utf-8")
rows = table.xpath("//tr")
print len(rows)
#~ liste = {}
liste = []
for i, row in enumerate(rows):
cells = row.xpath("td")
if len(cells)==8:
#~ print cells[1].xpath("string()")
#~ cell = cells[1]
out = []
for cell in cells[1:3]:
links = cell.xpath("a")
if links:
out.append(links[0].xpath("string()"))
else:
out.append(cell.xpath("string()"))
#~ liste.update({"n%s"% (i):{"name":out[0], "hight":out[1]}})
liste.append({"name":out[0], "hight":out[1]})
#~ f.write('<li><a data-icon="info" data-rel="dialog" data-transition="pop" href="#no_info">%s (%s)</a></li>\n' % (out[0], out[1]))
#~ f.close()
f_out.write(json.dumps({"mountains" : {"Switzerland" : liste}}))
f_out.close()
#~ for table in tables:
#~ print len(table)
print lxml.html.tostring(table)[:100]
|
8,537 | 99048ddb3f42382c8b8b435d832a45011a031cf1 | from .score_funcs import *
from cryptonita.fuzzy_set import FuzzySet
from cryptonita.helpers import are_bytes_or_fail
def scoring(msg, space, score_func, min_score=0.5, **score_func_params):
''' Run the score function over the given message and over a parametric
value x. Return all the values x as a FuzzySet (guess)
which scores is greather than the minimum score.
The parametric space <space> can is defined as:
- a range object
- or any other iterable of the parametric values x
For each possible x, score each using <score_func> and
drop anyone with a score of <min_score> or less.
Extra parameters can be passed to the <score_func> using
<score_func_params>.
Return a FuzzySet with the x values.
'''
assert 0.0 <= min_score <= 1.0
are_bytes_or_fail(msg, 'msg')
params = score_func_params
lengths = FuzzySet(
((x, score_func(msg, x, **params)) for x in space),
pr='tuple',
min_membership=min_score
)
return lengths
|
8,538 | 503726cd2d70286189f4b8e02acaa3d5f6e29e12 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from . import models
class RegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':"oldPassword"}))
password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label="Password")
password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')
|
8,539 | 773c217f7f76bd82ed3dabf7ae1aba1871f0932f | import requests
import unittest
import time
from common import HTMLTestReport
class Get(unittest.TestCase):
TMPTOKEN = ''
TOKEN = ''
def setUp(self):
pass
# 获取临时token,opterTmpToken
def test_gettmptoken(self):
url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'
params = {'sysID': '5'}
r = requests.get(url=url, params=params)
print(r.text)
opterTmpToken = r.json().get('data')['opterTmpToken']
Get.TMPTOKEN = opterTmpToken
print(opterTmpToken)
# 获取正式token,opterToken
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
#获取教师资质信息,校验结果是否返回success
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken':Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
# 获取教师资质信息,校验接口返回的老师资质相关信息是否正确
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
def Run():
suite = unittest.TestSuite()
# 执行顺序是安装加载顺序:先执行test_case2,再执行test_case1
suite.addTest(Get('test_gettmptoken'))
suite.addTest(Get('test_gettoken'))
suite.addTest(Get('test_getQualificationInfo'))
now = time.strftime("%Y-%m-%d_%H%M", time.localtime())
filepath = './report/' + now + '.html' # 测试报告存放的位置
fp = open(filepath, 'wb')
runner = HTMLTestReport.HTMLTestRunner(
stream=fp,
title='接口自动化测试报告',
tester='白雪'
)
runner.run(suite)
fp.close()
Run() |
8,540 | e26f673dfae38148a56927ce82d5ea7ea2545e12 | x = 'From marquard@uct.ac.za'
print(x[8])
x = 'From marquard@uct.ac.za'
print(x[14:17])
greet = 'Hello Bob'
xa = "aaa"
print(greet.upper())
print(len('banana')*7)
data = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
pos = data.find('.')
print(data[pos:pos+3])
stuff = dict()
print(stuff.get('candy',-1))
|
8,541 | f32b9dc36b2452fea8c8f284fbf800f22608c3ae | import csv
import io
import pickle
import os
import pip
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import cv2
import numpy as np
SCOPES = ['https://www.googleapis.com/auth/drive.metadata',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive']
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
def create_folder(service):
file_metadata = {
'name': 'Test Techm',
'mimeType': 'application/vnd.google-apps.folder'
}
file = service.files().create(body=file_metadata,
fields='id').execute()
print('Folder ID: %s' % file.get('id'))
def get_gdrive_service():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# return Google Drive API service
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri = []
map1,count=check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print("Duplicate image data preparing.. ")
# webbrowser.open_new_tab('helloworld.html')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real):
#folder_count=len(folder_count)
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
# create plot
#print(total_image_count, duplicate_image_count_in_folder, map.keys())
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print("Bar and Pie chart creating.... ")
def main():
service = get_gdrive_service()
print("Wait a moment script is running ..!!!")
results = service.files().list(pageSize=1000,
fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute()
items = results.get('files', [])
if not items:
# empty drive
print('No files found.')
else:
# create_folder(service)
print("-----_")
name="g_image_"
renameFile(service,items,name)
print("==============================")
#check_duplicate_image(items)
# createDeviceCSV()
list_files(items, service)
if __name__ == '__main__':
main()
print("Script is done ..!!!")
|
8,542 | 0ca751e050244fd85c8110d02d5e7a79eb449ada | print('Hi, I am Nag')
|
8,543 | 274af2a0b758472ca4116f1dfa47069647babf57 | import seaborn as sns
tips = sns.load_dataset('iris')
sns.violinplot(x='species', y='sepal_length', data=tips, palette='rainbow')
|
8,544 | 7f21ab8d332d169226ef17276abbdd373e3a62c2 | import http.cookies
import json
import os
import itertools
import types
from framework import helpers
from framework import security
class Model:
"""Manages the information received by the client"""
def __init__(self):
"""Puth the os.environ dict into the namespace"""
self.__dict__.update(
itertools.starmap(
lambda key, value: (
key[0].lower() + # upper case the first letter and add
key.title() # title case all text
.replace('_', '') # remove undersore
[1:] # all text without the first char
, value
) #lambda
,os.environ.items()
) #itertools.starmap
) #update
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO')\
or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
|
8,545 | 961bda96e433bb66d592ad1e99c92db0a9ab9fe9 | import os
import pandas as pd
import time
import sys
from tqdm import tqdm
sys.path.append(os.path.join(os.environ['HOME'],'Working/interaction/'))
from src.make import exec_gjf
from src.vdw import vdw_R, get_c_vec_vdw
from src.utils import get_E
import argparse
import numpy as np
from scipy import signal
import scipy.spatial.distance as distance
import random
def init_process(args):
auto_dir = args.auto_dir
monomer_name = args.monomer_name
os.makedirs(os.path.join(auto_dir,'gaussian'), exist_ok=True)
os.makedirs(os.path.join(auto_dir,'gaussview'), exist_ok=True)
def get_init_para_csv(auto_dir,monomer_name):
init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')
df = pd.read_csv('/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'.format(monomer_name))
# df = df[(df["A2"]==30)&(df["A1"]<=0)&(df["A1"]>=-10)&(df["theta"]>45)]
df = df[(df["A2"]==32)&(df["A1"]<=0)&(df["A1"]>=-20)&(df["theta"]>45)]
inner_zip = df[['a','b','theta','A1','A2']].values
print(inner_zip)
init_para_list = []
for a,b,theta,A1,A2 in tqdm(inner_zip):
c = get_c_vec_vdw(monomer_name,A1,A2,a,b,theta)
init_para_list.append([np.round(a,1),np.round(b,1),theta,A1,A2,np.round(c[0],1),np.round(c[1],1),np.round(c[2],1),'NotYet'])
df_init_params = pd.DataFrame(np.array(init_para_list),columns = ['a','b','theta','A1','A2','cx','cy','cz','status'])
df_init_params.to_csv(init_params_csv,index=False)
get_init_para_csv(auto_dir,monomer_name)
auto_csv_path = os.path.join(auto_dir,'step3-twist.csv')
if not os.path.exists(auto_csv_path):
df_E = pd.DataFrame(columns = ['a','b','theta','A1','A2','cx','cy','cz','E','E_p','E_t','machine_type','status','file_name'])
else:
df_E = pd.read_csv(auto_csv_path)
df_E = df_E[df_E['status']!='InProgress']
df_E.to_csv(auto_csv_path,index=False)
df_init=pd.read_csv(os.path.join(auto_dir,'step3-twist_init_params.csv'))
df_init['status']='NotYet'
df_init.to_csv(os.path.join(auto_dir,'step3-twist_init_params.csv'),index=False)
def main_process(args):
os.chdir(os.path.join(args.auto_dir,'gaussian'))
isOver = False
while not(isOver):
#check
isOver = listen(args)
time.sleep(1)
def listen(args):
auto_dir = args.auto_dir
monomer_name = args.monomer_name
num_nodes = args.num_nodes
isTest = args.isTest
fixed_param_keys = ['A1','A2']
opt_param_keys = ['a','b','theta','cx','cy','cz']
auto_step2_csv = '/home/koyama/Working/interaction/{}/step2-twist/step2-twist.csv'.format(monomer_name)
df_step2 = pd.read_csv(auto_step2_csv)
auto_csv = os.path.join(auto_dir,'step3-twist.csv')
df_E = pd.read_csv(auto_csv)
df_queue = df_E.loc[df_E['status']=='InProgress',['machine_type','file_name','A1','A2','a','b','theta','cx','cy','cz']]
machine_type_list = df_queue['machine_type'].values.tolist()
len_queue = len(df_queue)
maxnum_machine2 = 3#int(num_nodes/2)
for idx,row in zip(df_queue.index,df_queue.values):
machine_type,file_name,A1,A2,a,b,theta,cx,cy,cz = row
log_filepath = os.path.join(*[auto_dir,'gaussian',file_name])
if not(os.path.exists(log_filepath)):#logファイルが生成される直前だとまずいので
continue
E_list=get_E(log_filepath)
if len(E_list)!=5:
continue
else:
len_queue-=1;machine_type_list.remove(machine_type)
Ei0,Eip1,Eip2,Eit1,Eit2=map(float,E_list)
Eit3 = Eit2; Eit4 = Eit1
try:
Ep, Et = df_step2[(df_step2['A1']==A1)&(df_step2['A2']==A2)&(df_step2['theta']==theta)&(df_step2['a']==a)&(df_step2['b']==b)][['E_p','E_t']].values[0]
except IndexError:
inner_params_dict = {"A1":A1,"A2":A2,"a":a,"b":b,"theta":theta,'cx':0,'cy':0,'cz':0}
inner_file_name = exec_gjf(auto_dir, monomer_name, inner_params_dict, machine_type,isInterlayer=False,isTest=isTest)
time.sleep(200)#1:40で1計算終わる
is_inner_over = False
while not(is_inner_over):
time.sleep(30)#1:40で1計算終わる
E_inner_list=get_E(inner_file_name)
is_inner_over = len(E_inner_list)==2
Ep, Et=map(float,E_inner_list)
df_newline = pd.Series({**inner_params_dict,'E':2*Ep+4*Et,'E_p':Ep,'E_t':Et,'machine_type':machine_type,'status':'Done','file_name':inner_file_name})
df_step2=df_step2.append(df_newline,ignore_index=True)
df_step2.to_csv(auto_step2_csv,index=False)
E = 4*Et + 2*Ep + 2*(Ei0 + Eip1+ Eip2 + Eit1 + Eit2 + Eit3 + Eit4)
df_E.loc[idx, ['E_p','E_t','E_i0','E_ip1','E_ip2','E_it1','E_it2','E_it3','E_it4','E','status']] = [Ep,Et,Ei0,Eip1,Eip2,Eit1,Eit2,Eit3,Eit4,E,'Done']
df_E.to_csv(auto_csv,index=False)
break#2つ同時に計算終わったりしたらまずいので一個で切る
isAvailable = len_queue < num_nodes
machine2IsFull = machine_type_list.count(2) >= maxnum_machine2
machine_type = 1 if machine2IsFull else 2
if isAvailable:
params_dict = get_params_dict(auto_dir,num_nodes, fixed_param_keys, opt_param_keys, monomer_name)
if len(params_dict)!=0:#終わりがまだ見えないなら
alreadyCalculated = check_calc_status(auto_dir,params_dict)
if not(alreadyCalculated):
file_name = exec_gjf(auto_dir, monomer_name, {**params_dict}, machine_type,isInterlayer=True,isTest=isTest)
df_newline = pd.Series({**params_dict,'E':0.,'E_p':0.,'E_t':0.,'E_i0':0.,'E_ip1':0.,'E_ip2':0.,'E_it1':0.,'E_it2':0.,'E_it3':0.,'E_it4':0.,'machine_type':machine_type,'status':'InProgress','file_name':file_name})
df_E=df_E.append(df_newline,ignore_index=True)
df_E.to_csv(auto_csv,index=False)
init_params_csv=os.path.join(auto_dir, 'step3-twist_init_params.csv')
df_init_params = pd.read_csv(init_params_csv)
df_init_params_done = filter_df(df_init_params,{'status':'Done'})
isOver = True if len(df_init_params_done)==len(df_init_params) else False
return isOver
def check_calc_status(auto_dir,params_dict):
df_E= pd.read_csv(os.path.join(auto_dir,'step3-twist.csv'))
if len(df_E)==0:
return False
df_E_filtered = filter_df(df_E, params_dict)
df_E_filtered = df_E_filtered.reset_index(drop=True)
try:
status = get_values_from_df(df_E_filtered,0,'status')
return status=='Done'
except KeyError:
return False
def get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys, monomer_name):
"""
前提:
step3-twist_init_params.csvとstep3-twist.csvがauto_dirの下にある
"""
init_params_csv=os.path.join(auto_dir, 'step3-twist_init_params.csv')
df_init_params = pd.read_csv(init_params_csv)
df_cur = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))
df_init_params_inprogress = df_init_params[df_init_params['status']=='InProgress']
#最初の立ち上がり時
if len(df_init_params_inprogress) < num_nodes:
df_init_params_notyet = df_init_params[df_init_params['status']=='NotYet']
for index in df_init_params_notyet.index:
df_init_params = update_value_in_df(df_init_params,index,'status','InProgress')
df_init_params.to_csv(init_params_csv,index=False)
params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()
return params_dict
for index in df_init_params.index:
df_init_params = pd.read_csv(init_params_csv)
init_params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()
fixed_params_dict = df_init_params.loc[index,fixed_param_keys].to_dict()
isDone, opt_params_dict = get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict, monomer_name)
if isDone:
# df_init_paramsのstatusをupdate
df_init_params = update_value_in_df(df_init_params,index,'status','Done')
if np.max(df_init_params.index) < index+1:
status = 'Done'
else:
status = get_values_from_df(df_init_params,index+1,'status')
df_init_params.to_csv(init_params_csv,index=False)
if status=='NotYet':
opt_params_dict = get_values_from_df(df_init_params,index+1,opt_param_keys)
df_init_params = update_value_in_df(df_init_params,index+1,'status','InProgress')
df_init_params.to_csv(init_params_csv,index=False)
return {**fixed_params_dict,**opt_params_dict}
else:
continue
else:
df_inprogress = filter_df(df_cur, {**fixed_params_dict,**opt_params_dict,'status':'InProgress'})
if len(df_inprogress)>=1:
continue
return {**fixed_params_dict,**opt_params_dict}
return {}
def get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict, monomer_name):
df_val = filter_df(df_cur, fixed_params_dict)
a_init_prev = init_params_dict['a']; b_init_prev = init_params_dict['b']; theta_init_prev = init_params_dict['theta']
A1 = init_params_dict['A1']; A2 = init_params_dict['A2']
while True:
E_list=[];heri_list=[]
for a in [a_init_prev-0.1,a_init_prev,a_init_prev+0.1]:
for b in [b_init_prev-0.1,b_init_prev,b_init_prev+0.1]:
a = np.round(a,1);b = np.round(b,1)
for theta in [theta_init_prev-0.5,theta_init_prev,theta_init_prev+0.5]:
df_val_ab = df_val[
(df_val['a']==a)&(df_val['b']==b)&(df_val['theta']==theta)&
(df_val['A1']==A1)&(df_val['A2']==A2)&
(df_val['status']=='Done')
]
if len(df_val_ab)==0:
cx, cy, cz = get_c_vec_vdw(monomer_name,A1,A2,a,b,theta)
cx, cy, cz = np.round(cx,1), np.round(cy,1), np.round(cz,1)
return False,{'a':a,'b':b,'theta':theta, "cx":cx, "cy":cy, "cz":cz }
heri_list.append([a,b,theta]);E_list.append(df_val_ab['E'].values[0])
a_init,b_init,theta_init = heri_list[np.argmin(np.array(E_list))]
if a_init==a_init_prev and b_init==b_init_prev and theta_init==theta_init_prev:
cx, cy, cz = get_c_vec_vdw(monomer_name,A1,A2,a_init,b_init,theta_init)
cx, cy, cz = np.round(cx,1), np.round(cy,1), np.round(cz,1)
return True,{'a':a_init,'b':b_init, 'theta':theta_init, "cx":cx, "cy":cy, "cz":cz }
else:
a_init_prev=a_init;b_init_prev=b_init;theta_init_prev=theta_init
def get_values_from_df(df,index,key):
return df.loc[index,key]
def update_value_in_df(df,index,key,value):
df.loc[index,key]=value
return df
def filter_df(df, dict_filter):
query = []
for k, v in dict_filter.items():
if type(v)==str:
query.append('{} == "{}"'.format(k,v))
else:
query.append('{} == {}'.format(k,v))
df_filtered = df.query(' and '.join(query))
return df_filtered
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--init',action='store_true')
parser.add_argument('--isTest',action='store_true')
parser.add_argument('--auto-dir',type=str,help='path to dir which includes gaussian, gaussview and csv')
parser.add_argument('--monomer-name',type=str,help='monomer name')
parser.add_argument('--num-nodes',type=int,help='num nodes')
args = parser.parse_args()
if args.init:
print("----initial process----")
init_process(args)
print("----main process----")
main_process(args)
print("----finish process----")
|
8,546 | 11320922d24b27c5cfa714f88eb0a757deef987f | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
from .attack_models import (DriftAttack, AdditiveGaussian, RandomGaussian,
BitFlipAttack, RandomSignFlipAttack)
from typing import Dict
def get_attack(attack_config: Dict):
if attack_config["attack_model"] == 'drift':
return DriftAttack(attack_config=attack_config)
elif attack_config["attack_model"] == 'additive_gaussian':
return AdditiveGaussian(attack_config=attack_config)
elif attack_config["attack_model"] == 'random_gaussian':
return RandomGaussian(attack_config=attack_config)
elif attack_config["attack_model"] == 'bit_flip':
return BitFlipAttack(attack_config=attack_config)
elif attack_config["attack_model"] == 'random_sign_flip':
return RandomSignFlipAttack(attack_config=attack_config)
else:
raise NotImplementedError("Invalid attack model: {}".format(attack_config["attack_model"]))
def launch_attack(attack_mode, mal_nodes):
if attack_mode == 'coordinated':
# Co-ordinated Attack
attacker = mal_nodes[0].attack_model
print('Co-ordinated \'{}\' attack applied to {} clients'.format(mal_nodes[0].attack_model.attack_algorithm,
len(mal_nodes)))
attacker.attack(byz_clients=mal_nodes)
elif attack_mode == 'un_coordinated':
# un_coordinated stand alone attack per client
attacker = mal_nodes[0].attack_model
print('Un Co-ordinated \'{}\' attack applied to {} clients'.
format(mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
for mal_client in mal_nodes:
attacker.attack(byz_clients=[mal_client])
else:
raise NotImplementedError
|
8,547 | d08e4c85890dab7cb421fa994ef1947d8919d58f | # -*- coding: utf-8 -*-
# Item pipelines
import logging
import hashlib
from wsgiref.handlers import format_date_time
import time
import itertools
import psycopg2
from psycopg2.extensions import AsIs
from psycopg2.extras import Json
import requests
from scrapy import signals
from scrapy.pipelines.files import FilesPipeline
from twisted.enterprise import adbapi
from twisted.internet import threads
logger = logging.getLogger(__name__)
class DBStorePipeline(object):
'''
This class save the crawled item to a PostgreSQL table
The db operation is async and managed by the twisted reactor loop.
(References from https://gist.github.com/tzermias/6982723)
'''
@classmethod
def from_crawler(cls, crawler):
instance = cls(crawler.stats, crawler.settings)
crawler.signals.connect(instance.spider_closed, signals.spider_closed)
return instance
def __init__(self, stats, settings):
# Instantiate DB
self.dbpool = adbapi.ConnectionPool('psycopg2', settings['DB_DSN'])
self.stats = stats
def spider_closed(self, spider):
self.dbpool.close()
def process_item(self, item, spider):
table = getattr(item, "db_table", None)
if not table:
return item
query = self.dbpool.runInteraction(self._save_item, table, item)
query.addErrback(self._handle_error)
return item
def _save_item(self, tx, table, item):
skip_fields = getattr(item, "db_skip_fields", [])
cols = [k for k in item if k not in skip_fields]
self._insert_row(tx, table, cols, item)
self.stats.inc_value('database/records_added')
if hasattr(item, "db_helper_table_rows"):
helper_table, helper_rows = item.db_helper_table_rows()
if helper_rows:
self._insert_row(tx, helper_table,
helper_rows[0].keys(), *helper_rows)
self.stats.inc_value(
'database/records_added', len(helper_rows))
return item
def _insert_row(self, tx, table, cols, *rows):
val_fmt = "({})".format(",".join(itertools.repeat("%s", len(cols))))
def mk_row_param(row):
return tuple(row[k] for k in cols)
data_str = ','.join(tx.mogrify(val_fmt, mk_row_param(row)).decode('utf-8')
for row in rows)
q = "INSERT INTO {} ({}) VALUES ".format(table, ",".join(cols))
tx.execute(q + data_str)
def _handle_error(self, e):
logger.error("failed to track item to DB: %s", e)
class UpYunStore(object):
OPERATOR = None
SIGNATURE = None
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
assert uri.startswith('upyun://')
self.session = requests.Session()
self.bucket, self.prefix = uri[8:].split("/", 1)
def stat_file(self, path, info):
"""
TODO fetch and return file meta info from cloud
"""
return {}
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to Azure blob storage"""
headers = {
"Authorization": "UPYUN: {}:{}".format(self.OPERATOR, self.SIGNATURE),
"Date": format_date_time(int(time.time())),
}
url = "http://v0.api.upyun.com:5000/{}/{}{}".format(
self.bucket, self.prefix, path)
def upload():
try:
res = requests.put(url, headers=headers, data=buf)
if res.status_code != 200:
logger.info(
"failed to upload file %s to upyun, response code: %s, text:\n%s",
path, res.status_code, res.text)
else:
logger.debug("uploaded file %s to upyun", path)
except Exception:
logger.warn("upload file %s to upyun failed",
path, exc_info=True)
return threads.deferToThread(upload)
class MbCrawlImagesPipeline(FilesPipeline):
STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)
STORE_SCHEMES["upyun"] = UpYunStore
@classmethod
def from_settings(cls, settings):
upyunStore = cls.STORE_SCHEMES["upyun"]
upyunStore.OPERATOR = settings["UPYUN_OPERATOR"]
UpYunStore.SIGNATURE = settings["SIGNATURE"]
return super().from_settings(settings)
|
8,548 | 3caaa455cda0567b79ae063c777846157839d64f | from django.shortcuts import redirect, render
from users.models import CustomUser
from .models import Profile
def profile_page_view(request, username):
current_user = request.user
user = CustomUser.objects.get(username=username)
profile = Profile.objects.get(user=user)
if current_user in profile.followers.all():
check = True
posts = user.post_set.all()
context = {
'profile' : profile,
'posts' : posts,
'check' : check,
}
return render(request, 'profile.html', context)
def follow_user_view(request, user1, user2):
follower = CustomUser.objects.get(username = user1)
to_follow = CustomUser.objects.get(username = user2)
follower_profile = Profile.objects.get(user = follower)
to_follow_profile = Profile.objects.get(user = to_follow)
if follower not in to_follow_profile.followers.all():
follower_profile.following.add(to_follow)
to_follow_profile.followers.add(follower)
follower_profile.following_count += 1
to_follow_profile.followers_count += 1
follower_profile.save()
to_follow_profile.save()
return redirect('profile', user2)
else:
return redirect('profile', user2)
|
8,549 | 71ca67948100fb7ad388934740cead1ebe4a2b52 | ANCHO = 600
ALTO = 800
|
8,550 | 6065fae2a11f6b525ef10346e297505ec9d4e9d5 | import unittest
import numpy
import set_solver
class TestSets(unittest.TestCase):
def test_is_set(self):
"""Test set validator (Exercise 3a)."""
cards = numpy.array([[1,1,1,2,0],
[0,1,2,2,2],
[0,1,2,2,2],
[0,1,2,2,2]])
self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))
self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))
self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))
def test_find_sets(self):
"""Test solver (Exercise 3b)."""
cards = numpy.array([[1,1,1,2,0],
[0,1,2,2,2],
[0,1,2,2,2],
[0,1,2,2,2]])
set_indices = set_solver.find_sets(cards)
self.assertEqual(len(set_indices), 2)
self.assertTrue((0, 1, 2) in set_indices)
self.assertTrue((2, 3, 4) in set_indices)
if __name__ == '__main__':
unittest.main()
|
8,551 | 39ffb85fb10882041c2c9a81d796e7ff9df7d930 | # SPDX-FileCopyrightText: 2013 The glucometerutils Authors
#
# SPDX-License-Identifier: Unlicense
|
8,552 | 77b9b111cfb4d0b54e14b2aab81b7b05fd6bbccd | s = 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv'
sa = 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up'
ans = {}
for i in range(len(s)):
ans[s[i]] = sa[i];
S = set([])
for k in ans:
S.add(k)
#for w in range(26):
# if chr(w+97) not in S:
# print chr(w+97)
# q and z not in input so they must map to each other
ans['q'] = 'z'
ans['z'] = 'q'
f = open('A-small-attempt0.in', 'r')
L = f.readlines()
tc = 0
for i in range(1, len(L)):
s = L[i]
S = ''
for j in range(len(s)):
if s[j] == '\n':
continue
S += ans[s[j]]
tc += 1
print('Case #',tc,': ',S,sep='')
|
8,553 | b4b2307897f64bb30cad2fbaaa1b320ae2aa7456 | # Generated by Django 2.1.5 on 2019-03-12 18:07
from django.db import migrations
def associate_experiments_to_organisms(apps, schema_editor):
"""Creates missing associations between experiments and organisms.
Based off of:
https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html
We can't import the Experiment model directly as it may be a newer
version than this migration expects. We use the historical version.
"""
# I don't think this is truly necessary in this particular
# migration, but it seems to be a best practice for Django
# migrations and a lil extra safety never hurts.
Experiment = apps.get_model("data_refinery_common", "Experiment")
ExperimentOrganismAssociation = apps.get_model(
"data_refinery_common", "ExperimentOrganismAssociation"
)
for experiment in Experiment.objects.all():
organisms = experiment.organisms.all()
samples = experiment.samples.distinct("organism").exclude(
organism_id__in=organisms.values("id")
)
for sample in samples:
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment, organism=sample.organism
)
# This is the same as experiment.update_organism_names but we
# can't use that method because of the apps.get_model
# weirdness. It seems to be this issue:
# https://stackoverflow.com/questions/44907306/django-unavailable-field-of-model-while-doing-migration
# The method is simple enough that I'd rather duplicate it
# than disregard the warning about newer versions.
experiment.organism_names = list(
set([organism.name for organism in experiment.organisms.all()])
)
experiment.save()
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0015_dataset_email_ccdl_ok"),
]
operations = [
migrations.RunPython(associate_experiments_to_organisms),
]
|
8,554 | 9fa3a7c57b311a47e67de73bf6083f1f151d73f4 | from flask import Flask, render_template, request
import random, requests
app = Flask(__name__)
@app.route('/')
def hello():
# return 'Hello World'
return render_template('index.html')
# root 디렉토리에 있는 templates라는 폴더를 탐색하여 파일을 찾음
@app.route('/ace')
def ace():
return '불기둥!'
@app.route('/html')
def html():
return '<h1> 태그 사용할 수 있어요! <h1>'
@app.route('/html_multiline')
# 동적 라우팅
@app.route('/greeting/<string:name>')
def greeting(name):
return render_template('index.html', html_name=name)
#세제곱을 되돌려주는 cube 페이지 작성!
#사용자에게 숫자값을 받아서, 세제곱한 결과를 보여주는 페이지
@app.route('/cube/<int:number>')
def cube(number):
result = number ** 3
return render_template('cube.html',number=number, result=result)
@app.route('/movies')
def movies():
movie_list = ['82년생김지영', '조커', '엔드게임', '궁예']
return render_template('movies.html', movies=movie_list)
# ping : 사용자로부터 입력을 받을 form 페이지를 넘겨준다
@app.route('/ping')
def ping():
return render_template('ping.html')
# pong : 사용자로부터 form 데이터를 전달받아서 가공한다
@app.route('/pong')
def pong():
user_name = request.args.get('user_name')
return render_template('pong.html', user_name=user_name)
# fake naver, google
@app.route('/naver')
def naver():
return render_template('naver.html')
# 사용자로부터 이름을 입력받을 Form 페이지!
@app.route('/vonvon')
def vonvon():
return render_template('vonvon.html')
# 전달받은 이름을 기준으로 넘겨줄 각종 정보를 가공해서 돌려주는 (응답)로직!
@app.route('/godmademe')
def godmademe():
# 1. 사용자가 입력한 데이터를 가져온다.
name = request.args.get('user_name')
# 2. 사용자에게 보여줄 여러가지 재밌는 특성들 리스트를 만든다.
first_list = ['잘생김','못생김','개성','키','몸무게','노안','동안','오징어']
second_list = ['게으름','성실함','근면함','낭비벽','신중함','덜렁거림','귀찮음']
third_list = ['식욕','똘끼','허세','우울함','가벼움']
# 3. 리스트에서 랜덤으로 하나씩을 선택한다.
first = random.choice(first_list)
second = random.choice(second_list)
third = random.choice(third_list)
# 4. 가공한 정보를 템플릿에 담아서 사용자에게 보여준다.
return render_template('godmademe.html', name=name, first=first, second=second, third=third)
# 1. 사용자로부터 임의의 텍스트를 입력받아서, 아스키 아트로 변환해서 돌려준다.
# 2. 이 때, 아스키 아트 폰트는 랜덤으로 하나를 지정해서 변환한다
@app.route('/catch')
def catch():
return render_template('catch.html')
@app.route('/result')
def result():
# 1. 사용자가 입력한 Form 데이터를 가져온다.
word = request.args.get("word")
# 2. ARTII API로 요청을 보내서, 응답 결과를 변수에 담는다. (폰트 정보들)
fonts = requests.get('http://artii.herokuapp.com/fonts_list').text
# 3. 가져온 폰트들을 리스트 형태로 바꾼다. -> 줄바꿈(\n)을 기준으로 변수 구분
fonts = fonts.split('\n')
# 4. 폰트 하나를 랜덤으로 선택한다.
font = random.choice(fonts)
# 5. 사용자가 입력한 단어와 랜덤으로 선택한 폰트 정보를 담아서 API에게 요청한다.
result = requests.get(f'http://artii.herokuapp.com/make?text={word}&font={font}').text
# 6. 최종 결과물을 사용자에게 돌려준다.
return render_template('result.html', result=result)
# 마지막에 꼭 넣어야 하는 코드
# debug 모드를 활성화해서 서버 새로고침을 생략한다
if __name__ == '__main__':
app.run(debug=True) |
8,555 | f11ede752df7d9aff672eee4e230b109fcbf987b | # coding: gb18030
from setuptools import setup
setup(
name="qlquery",
version="1.0",
license="MIT",
packages=['qlquery'],
install_requires=[
'my-fake-useragent',
'requests',
'beautifulsoup4'
],
zip_safe=False
) |
8,556 | 5cf73e003b744b438c0db67ab39fb10a3f879f2f | import numpy as np
import math
class KMeans(object):
def __init__(self, data, option):
self.data = data
self.membership = None
self.centroids = None
self.option = option
self.temp_data = None
def fit(self, K):
data = np.asmatrix(self.data[0])
if self.option == 2:
self.data[:, 2] = np.log(data[:, 2])
self.data[:, 3] = np.log(data[:, 3])
elif self.option == 3:
for j in range(self.data.shape[1]):
self.data[:, j] -= np.mean(self.data[:, j])
self.data[:, j] /= np.std(self.data[:, j])
elif self.option == 5:
self.temp_data = self.data
np.random.shuffle(self.data)
self.data = self.data[0:int(self.data.shape[0]*.06), :]
centroids = self.data[np.random.choice(self.data.shape[0], K, replace=False), :]
membership = np.zeros(self.data.shape[0]).astype(int)
centroids_temp = None
while not np.array_equal(centroids_temp, centroids):
centroids_temp = np.copy(centroids)
for i, d in enumerate(self.data):
if self.option == 4:
membership[i] = np.argmin(np.array([np.abs(d - c).sum() for c in centroids]))
else:
membership[i] = np.argmin(np.array([np.sqrt(((d - c) ** 2).sum()) for c in centroids]))
for i in range(centroids.shape[0]):
centroids[i] = self.data[membership == i].mean(axis=0)
self.centroids = np.copy(centroids)
self.membership = np.copy(membership)
if self.option == 5:
self.data = self.temp_data
self.membership = np.zeros(self.data.shape[0]).astype(int)
for i, d in enumerate(self.data):
self.membership[i] = np.argmin(np.array([np.sqrt(((d - c) ** 2).sum()) for c in centroids]))
def predict(self):
error = 0
for i, c in enumerate(self.centroids):
subset = self.data[self.membership == i]
for i, d in enumerate(subset):
error += ((d - c) ** 2).sum()
return error
|
8,557 | c9df53ac06b8bb106d73825d60fa885c06385e95 | import contextlib
import logging
import os
import pwd
import sys
from typing import Iterable
from sqlalchemy import Table, exists, null, select
from sqlalchemy.engine import Engine
from sqlalchemy.exc import DBAPIError
from sqlalchemy.pool import NullPool
from hades import constants
from hades.common import db
from hades.common.cli import (
ArgumentParser, parser as common_parser, setup_cli_logging,
)
from hades.common.privileges import dropped_privileges
from hades.config.loader import load_config
logger = logging.getLogger(__package__)
def check_database(engine: Engine, user_name: pwd.struct_passwd,
tables: Iterable[Table]):
logger.info("Checking database access as user %s", user_name)
try:
conn = engine.connect()
except DBAPIError as e:
logger.critical("Could not connect to database as %s: %s",
user_name, e)
raise
with contextlib.closing(conn):
for table in tables:
try:
check_table(conn, table)
except DBAPIError as e:
logger.critical("Query check for table %s as user %s failed: "
"%s", table.name, user_name, e)
raise
def check_table(conn, table):
conn.execute(select([exists(select([null()]).select_from(table))])).scalar()
def main():
parser = ArgumentParser(parents=[common_parser])
args = parser.parse_args()
setup_cli_logging(parser.prog, args)
config = load_config(args.config, runtime_checks=True)
try:
engine = db.create_engine(config, poolclass=NullPool)
agent_pwd = pwd.getpwnam(constants.AGENT_USER)
with dropped_privileges(agent_pwd):
check_database(engine, agent_pwd.pw_name,
(db.radacct, db.radpostauth))
portal_pwd = pwd.getpwnam(constants.PORTAL_USER)
with dropped_privileges(portal_pwd):
check_database(engine, portal_pwd.pw_name,
(db.radacct, db.radpostauth, db.radusergroup))
radius_pwd = pwd.getpwnam(constants.RADIUS_USER)
with dropped_privileges(radius_pwd):
check_database(engine, radius_pwd.pw_name,
(db.radacct, db.radgroupcheck, db.radgroupreply,
db.radpostauth, db.radreply, db.radusergroup))
except DBAPIError:
return os.EX_TEMPFAIL
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
8,558 | 0a5ea7ad0ee34c8a3f0299908c61fa0a09139d2f | #Diagonal Traverse
#Given a matrix of M x N elements (M rows, N columns), return all elements of the matrix in diagonal
order as shown in the below image.
#Example:
#Input:
#[
# [ 1, 2, 3 ],
# [ 4, 5, 6 ],
# [ 7, 8, 9 ]
#]
#Output: [1,2,4,7,5,3,6,8,9]
#Explanation:
#Note:
# The total number of elements of the given matrix will not exceed 10,000.
class Solution(object):
def findDiagonalOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
r = []
if(not matrix) : return r
m,n = map(len, [matrix, matrix and matrix[0]])
for d in range(m+n-1) : #0,1,2,3,4
if(d%2 == 1) :
#change direction
for i in range(max(0, d-n+1), min(d+1,m)) :
r += [ matrix[i][d-i] ] #
else :
for i in range(max(0, d-m+1), min(d+1,n)) :
r += [ matrix[d-i][i] ] #
return r
#Deque & Dictionary - O(MN)
#1. Property for the diagonals is that: row + col = constant. This constant varies from 0 to M+N-2.
#2. The direction of the diagonal is top to bottom or bottom to top. The direction depends if constant
#is even or odd.
#3.Iterate the matrix. Maintain a dictionary with key as integer and value as a deque.
#4.The key will be row+col and deque will have all elements which have the same row +col. Depending
#5. whether row+col is even or odd, we will either append or appendleft.
from collections import deque, defaultdict
class Solution(object):
def findDiagonalOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if (not matrix) :
return []
M, N = map(len, [matrix, matrix[0]])
result = defaultdict(deque)
max_sum = M+N-2
for i in range(M):
for j in range(N):
s = i+j
if s&1:
result[s].append(matrix[i][j])
else:
result[s].appendleft(matrix[i][j])
output = []
for s in range(max_sum+1):
output.extend(result[s])
return output
#Within diagonal row+col is same, so we first sort index pairs by row+col, and within diagonal sort
#them either by row or by column index depending if row+col is odd/even.
def findDiagonalOrder(self, matrix):
l = [(i, j) for i in range(len(matrix)) for j in range(len(matrix[0]))]
l.sort(key=lambda x: sum(x) * 100000 - x[sum(x)%2])
return [matrix[x][y] for x, y in l]
def findDiagonalOrder(self, matrix):
l = [[i,j] for i in range(len(matrix)) for j in range(len(matrix[0]))]
l.sort(key=lambda x: float(x[0]+x[1])-float(x[(x[0]+x[1])%2])*0.00000001 )
return [matrix[x][y] for [x,y] in l]
#annotate the matrix entries with coordinate information so that we can just sort them by that.
def findDiagonalOrder(self, matrix):
entries = [(i+j, (j, i)[(i^j)&1], val)
for i, row in enumerate(matrix)
for j, val in enumerate(row)]
return [e[2] for e in sorted(entries)]
#just walk over the matrix in the desired order. My d is the diagonal number, i.e., i+j. So I can
#compute j as d-i.
def findDiagonalOrder(self, matrix):
m, n = len(matrix), len(matrix and matrix[0])
return [matrix[i][d-i]
for d in range(m+n-1)
for i in range(max(0, d-n+1), min(d+1, m))[::d%2*2-1]]
#Why the range range(max(0, d-n+1), min(d+1, m))? Well I need 0 <= i < m and 0 <= j < n. As said
#above, j is d-i, so I have 0 <= d-i < n. Isolating i gives me i <= d and i > d-n. Since we're
#dealing with integers, they're equivalent to i < d+1 and i >= d-n+1. So my i needs to be in the
#range [0, m) as well as in the range [d-n+1, d+1). And my range is simply the intersection of those
#two ranges.
#Simple two step approach:
#1- Group numbers according to diagonals. Sum of row+col in same diagonal is same.
#2- Reverse numbers in odd diagonals before adding numbers to result list.
#
def findDiagonalOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
result = [ ]
dd = collections.defaultdict(list)
if not matrix: return result
# Step 1: Numbers are grouped by the diagonals.
# Numbers in same diagonal have same value of row+col
for i in range(0, len(matrix)):
for j in range(0, len(matrix[0])):
dd[i+j+1].append(matrix[i][j]) # starting indices from 1, hence i+j+1.
# Step 2: Place diagonals in the result list.
# But remember to reverse numbers in odd diagonals.
for k, v in dd.iteritems():
if k%2==1: dd[k].reverse()
result += dd[k]
return result
for k, v in dd.iteritems():
if k % 2 == 1:
result += v[::-1]
else:
result += v
return result
|
8,559 | addab37cb23abead2d9f77a65336cd6026c52c68 | #coding=utf-8
'''
find words and count
By @liuxingpuu
'''
import re
fin= open("example","r")
fout = open("reuslt.txt","w")
str=fin.read()
reObj = re.compile("\b?([a-zA-Z]+)\b?")
words = reObj.findall(str)
word_dict={}
for word in words:
if(word_dict.has_key(word)):
word_dict[word.lower()]=max(word_dict[word.lower()],words.count(word.lower())+words.count(word.upper())+words.count(word))
else:
word_dict[word.lower()]=max(0,words.count(word.lower())+words.count(word.upper())+words.count(word))
for(word,number) in word_dict.items():
fout.write(word+":%d\n"%number) |
8,560 | 93418e554893db4eb888396e8d6f60a8364d9ee3 | #coding: utf-8
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^douban/books$', views.BookList.as_view()),
)
|
8,561 | 4c4275b96d3eceb5ff89a746c68d7f8736a1c2a5 | staff = ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА', 'токарь высшего разряда нИКОЛАй', 'директор аэлита']
def employee_name(name):
getting_a_name = name.split()
name_staff = getting_a_name[-1]
name_staff = name_staff.capitalize()
return name_staff
i = 0
while i < len(staff):
name_for_output = employee_name(staff[i])
print(f'Привет, {name_for_output}!')
i += 1 |
8,562 | 631904ae96584bd19756f9335175a419397ac252 | from os import environ
import boto3
from flask import Flask, redirect
from flask_sqlalchemy import SQLAlchemy
from json import load
from pathlib import Path
path = Path(__file__).parent
db = SQLAlchemy()
with open(path / "../schemas.json", "r") as fp:
schemas = load(fp)
with open(path / "../config.json", "r") as fp:
config = load(fp)
app = Flask(__name__, template_folder="templates")
app.config["SECRET_KEY"] = "3205fc85cd004116bfe218f14192e49a"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///app.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SWAGGER_UI_OAUTH_CLIENT_ID"] = "documentation"
domain = app.config.get("SERVER_NAME")
port = environ.get("PORT", config["default_port"])
redirect_uri = environ.get("REDIRECT_URI", config["redirect_uri"])
client_uri = environ.get("CLIENT_URI", config["client_uri"])
client_s3 = boto3.resource("s3")
@app.route("/")
def redirect_to_swagger():
return redirect("/swagger", 302)
|
8,563 | e41b5ee0dff30cca51593e737420889bce8f419f | """
Simple python script to help learn basic socket API
"""
import sys, socket
HOSTNAME = sys.argv[-2]
PORT = sys.argv[-1]
options = ( HOSTNAME, int(PORT) )
print options
print 'creating socket...'
sock = socket.socket()
print 'socket created'
print 'connecting...'
sock.connect(options)
print 'connected'
print 'sending message...'
sock.send('hello')
print 'sent message'
print 'closing...'
sock.close()
print 'closed' |
8,564 | bf764457e6af25d2d9406b18af51f63b36ab823a | import cv2 as cv
import numpy as np
import sys
from meio_tom_lib import *
imgname = sys.argv[1]
imgpath = "img/" + imgname
try:
img = cv.imread(imgpath)
newimg1 = jarvis_judice_ninke_1(img)*255
newimg2 = jarvis_judice_ninke_2(img)*255
cv.imshow("Imagem original",img)
cv.imshow("Jarvis, Judice e Ninke metodo 1",newimg1)
cv.imshow("Jarvis, Judice e Ninke metodo 2",newimg2)
print("")
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname,newimg1)
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname,newimg2)
print("Resultados salvos em:")
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname)
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname)
cv.waitKey(0)
cv.destroyAllWindows()
except:
print("Erro") |
8,565 | db31a69c57f773a79e5eaa8b3443b0366fd74861 | import random
from typing import List
from faker import Faker
from call_center.src.actors.agent import InsuranceAgent
from call_center.src.actors.consumer import Consumer
from call_center.src.common.person import (
AGE,
AVAILABLE,
INSURANCE_OPERATION,
PHONE_NUMBER,
INCOME,
CARS_COUNT,
KIDS_COUNT,
STATE,
RENT,
BUY,
)
from call_center.src.common.singleton_meta import SingletonMeta
CONSUMER_COUNT = 1000
AGENTS_COUNT = 20
FAKE = Faker("en_US")
class ActorsCreator(metaclass=SingletonMeta):
"""
Singleton class which acts as a container for both Agents and Consumers.
In a real-world scenario, we would have a database containing both actors/consumers.
This is a replacement, for the sake of example.
"""
def __init__(self):
self.consumers = ActorsCreator.create_consumers()
self.agents = ActorsCreator.create_agents()
def __del__(self):
self.stop_all_agents()
@staticmethod
def create_consumers() -> List[Consumer]:
"""
Create the consumers. Consumers are created with randomized attributes.
:return: A new list of Consumer.
"""
consumers = []
for consumer in range(CONSUMER_COUNT):
consumers.append(
Consumer(
{
AGE: FAKE.random_int(min=0, max=120),
STATE: FAKE.state(),
KIDS_COUNT: FAKE.random_int(min=0, max=12),
CARS_COUNT: FAKE.random_int(min=0, max=10),
INSURANCE_OPERATION: random.choice((RENT, BUY)),
INCOME: FAKE.random_int(min=0, max=99999999999),
PHONE_NUMBER: FAKE.phone_number(),
AVAILABLE: True,
}
)
)
return consumers
@staticmethod
def create_agents() -> List[InsuranceAgent]:
"""
Create the InsuranceAgents. Consumers are created with randomized attributes.
:return: A new list of InsuranceAgent.
"""
agents = []
for consumer in range(AGENTS_COUNT):
insurance_agent = InsuranceAgent(
personal_info={
AGE: FAKE.random_int(min=0, max=120),
STATE: FAKE.state(),
KIDS_COUNT: FAKE.random_int(min=0, max=12),
CARS_COUNT: FAKE.random_int(min=0, max=10),
INSURANCE_OPERATION: random.choice((RENT, BUY)),
INCOME: FAKE.random_int(min=0, max=1000000),
PHONE_NUMBER: FAKE.phone_number(),
AVAILABLE: True,
},
call_acceptance_criteria=[
{
"person_attribute": AGE,
"comparison_operator": random.choice(("<", ">")),
"value": FAKE.random_int(
min=0,
max=120,
),
},
{
"person_attribute": INCOME,
"comparison_operator": random.choice(("<", ">")),
"value": FAKE.random_int(
min=0,
max=1000000,
),
},
{
"person_attribute": KIDS_COUNT,
"comparison_operator": random.choice(("<", ">")),
"value": FAKE.random_int(
min=0,
max=12,
),
},
{
"person_attribute": CARS_COUNT,
"comparison_operator": random.choice(("<", ">")),
"value": FAKE.random_int(
min=0,
max=12,
),
},
{
"person_attribute": INSURANCE_OPERATION,
"comparison_operator": random.choice(("<", ">")),
"value": random.choice((RENT, BUY)),
},
],
)
agents.append(insurance_agent)
return agents
def stop_all_agents(self):
"""
Gracefully stop all agents threads on self deletion.
To find more on agents' threads, see agent.py
:return:
"""
for agent in self.agents:
if agent.available:
agent.stop_activity()
|
8,566 | 2332783c96b24caa383bf47d82384e1c40a48e94 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DashboardArgs', 'Dashboard']
@pulumi.input_type
class DashboardArgs:
def __init__(__self__, *,
dashboard_definition: pulumi.Input[str],
dashboard_description: pulumi.Input[str],
dashboard_name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]]] = None):
"""
The set of arguments for constructing a Dashboard resource.
:param pulumi.Input[str] dashboard_definition: The dashboard definition specified in a JSON literal.
:param pulumi.Input[str] dashboard_description: A description for the dashboard.
:param pulumi.Input[str] dashboard_name: A friendly name for the dashboard.
:param pulumi.Input[str] project_id: The ID of the project in which to create the dashboard.
:param pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]] tags: A list of key-value pairs that contain metadata for the dashboard.
"""
pulumi.set(__self__, "dashboard_definition", dashboard_definition)
pulumi.set(__self__, "dashboard_description", dashboard_description)
if dashboard_name is not None:
pulumi.set(__self__, "dashboard_name", dashboard_name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dashboardDefinition")
def dashboard_definition(self) -> pulumi.Input[str]:
"""
The dashboard definition specified in a JSON literal.
"""
return pulumi.get(self, "dashboard_definition")
@dashboard_definition.setter
def dashboard_definition(self, value: pulumi.Input[str]):
pulumi.set(self, "dashboard_definition", value)
@property
@pulumi.getter(name="dashboardDescription")
def dashboard_description(self) -> pulumi.Input[str]:
"""
A description for the dashboard.
"""
return pulumi.get(self, "dashboard_description")
@dashboard_description.setter
def dashboard_description(self, value: pulumi.Input[str]):
pulumi.set(self, "dashboard_description", value)
@property
@pulumi.getter(name="dashboardName")
def dashboard_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the dashboard.
"""
return pulumi.get(self, "dashboard_name")
@dashboard_name.setter
def dashboard_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which to create the dashboard.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]]]:
"""
A list of key-value pairs that contain metadata for the dashboard.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]]]):
pulumi.set(self, "tags", value)
class Dashboard(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_definition: Optional[pulumi.Input[str]] = None,
dashboard_description: Optional[pulumi.Input[str]] = None,
dashboard_name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardTagArgs']]]]] = None,
__props__=None):
"""
Resource schema for AWS::IoTSiteWise::Dashboard
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dashboard_definition: The dashboard definition specified in a JSON literal.
:param pulumi.Input[str] dashboard_description: A description for the dashboard.
:param pulumi.Input[str] dashboard_name: A friendly name for the dashboard.
:param pulumi.Input[str] project_id: The ID of the project in which to create the dashboard.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardTagArgs']]]] tags: A list of key-value pairs that contain metadata for the dashboard.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DashboardArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource schema for AWS::IoTSiteWise::Dashboard
:param str resource_name: The name of the resource.
:param DashboardArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DashboardArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_definition: Optional[pulumi.Input[str]] = None,
dashboard_description: Optional[pulumi.Input[str]] = None,
dashboard_name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardTagArgs']]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DashboardArgs.__new__(DashboardArgs)
if dashboard_definition is None and not opts.urn:
raise TypeError("Missing required property 'dashboard_definition'")
__props__.__dict__["dashboard_definition"] = dashboard_definition
if dashboard_description is None and not opts.urn:
raise TypeError("Missing required property 'dashboard_description'")
__props__.__dict__["dashboard_description"] = dashboard_description
__props__.__dict__["dashboard_name"] = dashboard_name
__props__.__dict__["project_id"] = project_id
__props__.__dict__["tags"] = tags
__props__.__dict__["dashboard_arn"] = None
__props__.__dict__["dashboard_id"] = None
super(Dashboard, __self__).__init__(
'aws-native:iotsitewise:Dashboard',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Dashboard':
"""
Get an existing Dashboard resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DashboardArgs.__new__(DashboardArgs)
__props__.__dict__["dashboard_arn"] = None
__props__.__dict__["dashboard_definition"] = None
__props__.__dict__["dashboard_description"] = None
__props__.__dict__["dashboard_id"] = None
__props__.__dict__["dashboard_name"] = None
__props__.__dict__["project_id"] = None
__props__.__dict__["tags"] = None
return Dashboard(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dashboardArn")
def dashboard_arn(self) -> pulumi.Output[str]:
"""
The ARN of the dashboard.
"""
return pulumi.get(self, "dashboard_arn")
@property
@pulumi.getter(name="dashboardDefinition")
def dashboard_definition(self) -> pulumi.Output[str]:
"""
The dashboard definition specified in a JSON literal.
"""
return pulumi.get(self, "dashboard_definition")
@property
@pulumi.getter(name="dashboardDescription")
def dashboard_description(self) -> pulumi.Output[str]:
"""
A description for the dashboard.
"""
return pulumi.get(self, "dashboard_description")
@property
@pulumi.getter(name="dashboardId")
def dashboard_id(self) -> pulumi.Output[str]:
"""
The ID of the dashboard.
"""
return pulumi.get(self, "dashboard_id")
@property
@pulumi.getter(name="dashboardName")
def dashboard_name(self) -> pulumi.Output[str]:
"""
A friendly name for the dashboard.
"""
return pulumi.get(self, "dashboard_name")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the project in which to create the dashboard.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.DashboardTag']]]:
"""
A list of key-value pairs that contain metadata for the dashboard.
"""
return pulumi.get(self, "tags")
|
8,567 | 2b3f8b1ac4735785683c00f6e6ced85d201de53f | from app01 import models
from rest_framework.views import APIView
# from api.utils.response import BaseResponse
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from api.serializers.course import DegreeCourseSerializer
# 查询所有学位课程
class DegreeCourseView(APIView):
def get(self,request,*args,**kwargs):
response = {'code':100,'data':None,'error':None}
try:
# 从数据库获取数据
degreecourse_list = models.DegreeCourse.objects.all()
# 分页
# page = PageNumberPagination()
# course_list = page.paginate_queryset(queryset,request,self)
# 分页之后的结果执行序列化
ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)
response['data'] = ser_obj.data
except Exception as e:
response['error'] = '获取数据失败'
return Response(response)
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
|
8,568 | 065354d2a8fd8a75e16bf85f624b12641377029a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : 河北雪域网络科技有限公司 A.Star
# @contact: astar@snowland.ltd
# @site:
# @file: img_to_sketch.py
# @time: 2018/8/6 1:15
# @Software: PyCharm
from skimage.color import rgb2grey
import numpy as np
def sketch(img, threshold=15):
"""
素描画生成
param img: Image实例
param threshold: 介于0到100
:return:
"""
if threshold < 0:
threshold = 0
if threshold > 100:
threshold = 100
if len(img.shape) == 3:
img = rgb2grey(img)
m, n = img.shape
diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])
img = np.zeros((m - 1, n - 1))
img[diff < threshold/255] = 1
return img
|
8,569 | ce8879dae6c7585a727e35f588722bc28045256a | # Ex 1
numbers = [10,20,30, 9,-12]
print("The sum of 'numbers' is:",sum(numbers))
# Ex 2
print("The largest of 'numbers' is:",max(numbers))
# Ex 3
print("The smallest of 'numbers' is:",min(numbers))
# Ex 4
for i in numbers:
if (i % 2 == 0):
print(i,"is even.")
# Ex 5
for i in numbers:
if (i > 0):
print(i,"is positive.")
# Ex 6
posNums = []
for i in numbers:
if (i > 0):
posNums.append(i)
print(posNums)
# Ex 7
times5 = []
for x in numbers:
times5.append(x*5)
print(times5)
# Ex 8
a=[1,3,6]
b=[2,4,6]
ab = []
for i in range(0, len(a)):
ab.append(a[i]*b[i])
print(ab)
# Ex 9 and 10
m=[[1,2],[7,8],[3,4]]
n=[[3,4],[5,6],[3,4]]
m_n = []
for h in range(len(m)):
row = []
for j in range(len(m[h])):
row.append(m[h][j] + n[h][j])
m_n.append(row)
print(m_n)
# Ex 11
dupList = ["x","y","z","y",23,0.5,23]
noDup = []
for z in dupList:
if(z not in noDup):
noDup.append(z)
print(noDup) |
8,570 | 797e7c1b3e8b41a167bfbedfb6a9449e6426ba22 | # -*- coding: utf-8 -*-
{
'name': 'EDC Analytic Entry',
'depends': [
'stock_account',
'purchase_stock',
'account_accountant',
],
"description": """
""",
'author': "Ejaftech",
'data': [
'views/account_move_view.xml',
],
}
|
8,571 | a52edeec62a6849bda7e5a5481fb6e3d7d9a4c6a | """Utilties to access a column and one field of a column if the column is composite."""
from typing import TYPE_CHECKING, Optional
from greenplumpython.db import Database
from greenplumpython.expr import Expr
from greenplumpython.type import DataType
if TYPE_CHECKING:
from greenplumpython.dataframe import DataFrame
class ColumnField(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a field of a :class:`~col.Column` of composite type. This
type allows to access to the fields in a dict-like manner.
"""
def __init__(
self,
column: "Column",
field_name: str,
) -> None:
# noqa
""":meta private:"""
self._field_name = field_name
self._column = column
super().__init__(column._dataframe)
def _serialize(self, db: Optional[Database] = None) -> str:
return (
f'({self._column._serialize(db=db)})."{self._field_name}"'
if self._field_name != "*"
else f"({self._column._serialize(db=db)}).*"
)
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: "DataFrame") -> None:
# noqa: D400
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None # TODO: Add type inference
def _serialize(self, db: Optional[Database] = None) -> str:
assert self._dataframe is not None
# Quote both dataframe name and column name to avoid SQL injection.
return (
f'{self._dataframe._name}."{self._name}"'
if self._name != "*"
else f"{self._dataframe._name}.*"
)
def __getitem__(self, field_name: str) -> ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(
self,
dataframe: Optional["DataFrame"] = None,
db: Optional[Database] = None,
):
# noqa D400
""":meta private:"""
c = Column(
self._name,
self._dataframe,
)
c._db = db if db is not None else dataframe._db if dataframe is not None else self._db
assert c._db is not None
return c
|
8,572 | 51a8b963047215bf864eb4a3e62beb5741dfbafe | class Graph():
def __init__(self, nvertices):
self.N = nvertices
self.graph = [[0 for column in range(nvertices)]
for row in range(nvertices)]
self.V = ['0' for column in range(nvertices)]
def nameVertex(self):
for i in range(self.N):
print("Qual o rotúlo do vértice %i?"%(i))
self.V[i]=input()
def setEdge(self,u,v,w):
self.graph[u][v]=w
self.graph[v][u]=w
def loadEdges(self):
for i in range(self.N):
for j in range(self.N):
if i>j:
print("Qual o peso entre %c e %c?"%
(self.V[i],self.V[j]))
self.setEdge(i,j,input())
print('Qual o número de vértices?')
n = int(input())
g = Graph(n)
g1 = Graph(n-1)
print(g.graph)
g.nameVertex()
g.loadEdges()
print(g.graph)
|
8,573 | db46fbfb1acd855eebb5c9f557d70038b84e812d | import surname_common as sc
from sklearn.utils import shuffle
import glob
import os
import re
import pprint
import pandas as pd
import unicodedata
import string
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS
)
def load_surnames():
df_surnames = pd.DataFrame()
list_ = []
for filename in glob.glob('data/names/*.txt'):
m = re.match(r'(.*)\/(.*?)\.txt', filename)
category = m.group(2)
df = pd.read_csv(filename,names=['surname'])
df['category'] = category
list_.append(df)
df_surnames = pd.concat(list_)
df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x))
series_categories = df_surnames.groupby(['category'])['category'].count()
df_categories = pd.DataFrame({
'category':series_categories.index,
'freq':series_categories.tolist(),
'index':range(0,len(series_categories))
})
return df_surnames, df_categories
def save_df_surnames_as_pickle():
df_surnames, df_categories = load_surnames()
# train test split
df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)
train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO)
train = df[0:train_cnt]
test = df[train_cnt+1:]
# save as pickle
df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2')
df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2')
train.to_pickle('data/pickles/train.pickle',compression='bz2')
test.to_pickle('data/pickles/test.pickle',compression='bz2')
# train test stat
t1 = train.groupby(['category']).count().drop(['normalized'],axis=1)
t2 = test.groupby(['category']).count().drop(['normalized'],axis=1)
t1.columns = ['surname_train']
t2.columns = ['surname_test']
tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))
tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test'])
tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2')
return tt |
8,574 | c20a414f7f96a96f6e458fc27e5d2c7ac7ab05cf | def ispalindrome(s):
if len(s) <= 1:
return True
elif s[0] != s[-1]:
return False
else:
return ispalindrome(s[1:-1]) |
8,575 | 866ff68744a16158b7917ca6defc35440208ae71 | from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from polls.models import Poll
from .serializers import PollSerializer
# class PollView(APIView):
#
# def get(self, request):
# serializer = PollSerializer(Poll.objects.all(), many=True)
# response = {"polls": serializer.data}
# return Response(response, status=status.HTTP_200_OK)
#
# def post(self, request, format=None):
# data = request.data
# serializer = PollSerializer(data=data)
# if serializer.is_valid():
# poll = Poll(**data)
# poll.save()
# response = serializer.data
# return Response(response, status=status.HTTP_200_OK)
#
#
def index(request):
data = {}
return render(request,"polls/index.html",data)
#
# def show(request):
# data = {}
# p = Poll.objects.all()
# data["polls"] = p
# return render(request, "polls/show.html", data)
def show(request):
# data = {}
# p = Poll.objects.all()
# data["polls"] = p
return render(request, "polls/show.html")
def searchShow(request):
if 'search' in request.GET:
search_string = request.GET['search']
context = {
"search_string": search_string,
}
return render(request, "polls/show.html", context) |
8,576 | bdda42665acfefccad45a2b49f5436a186140579 | class people:
def __init__(self, name):
self.name = name
self.purchase_descrip = []
self.purchase_price_descrip = []
self.purchases = []
self.total_spent = 0
self.debt = 0
self.debt_temp = 0
self.pay = []
self.pay_out = []
self.pay_who = []
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
def get_purchase(self):
return self.purchases
def get_description(self):
return self.purchase_descrip
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent+float(items)
def get_total(self):
return self.total_spent
def get_name(self):
return self.name
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append("$"+str(price)+" "+description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent)-cost_per_person
def get_debt(self):
return self.debt
def add_payment(self, payment):
self.pay.append(payment)
def get_pay(self):
return self.pay
def add_pay_who(self, who_to_pay):
self.pay_who.append(who_to_pay)
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
def update_temp_debt(self, payment):
self.debt_temp = self.debt_temp+payment*-1
def pay_temp_debt(self, payment):
self.debt_temp-payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
def round_purchases(self):
for x in range(0, len(self.purchases)):
self.purchases[x] = round(float(self.purchases[x]), 2)
|
8,577 | 1f86fe72c90c8457715a2f400dae8d355a9a97cf | from HiddenLayer import HiddenLayer
from Vector import Vector
import IO
import Loss
import Utils
import Activation
import Backpropagation
import Rate
# As a test, let's simulate the OR-gate with a single perceptron
""" training = []
training.append(Vector(2, arr=[1, 1]))
training.append(Vector(2, arr=[1, 0]))
training.append(Vector(2, arr=[0, 1]))
training.append(Vector(2, arr=[0, 0]))
labels = Vector(4, arr=[1, 1, 1, 0])
from Vector
left_true= Vector(2, arr=[1, 0])
both_false = Vector(2, arr=[0, 0])
print(tron.predict(both_true))
print(tron.predict(right_true))
print(tron.predict(left_true))
print(tron.predict(both_false)) """
# Testing the reading of data
""" images = Data.read_images('test')
labels = Data.read_labels('test')
UI.draw_image(images[1234], "testi")
print(labels[1234]) """
# Vector multiplication test
""" print(Vector(4, arr=[1, 2, 3, 4]) * Vector(4, arr=[1, 2, 2, 2])) """
# Neuron output test
""" n = Neuron(Utils.rand_array(4), Activation.sigmoid, Activation.sigmoid_d, 3)
x = Vector(4, arr=Utils.rand_array(4))
print(n)
print(x)
print(n.output(x)) """
# rand_array and normalization test
""" arr = Utils.rand_array(10, -5, 15)
print(arr)
print(Utils.normalize(arr, -5, 15)) """
# Testing some hidden layer basic functionality and saving/loading
""" images = IO.read_images('test')
labels = IO.read_labels('test')
weights = [Utils.rand_array(784, -1, 1) for _ in range(10)]
hl_a = HiddenLayer(10, 784, weights, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
#IO.save_layer(hl_a, "test")
hl_b = IO.load_layer("test")
for i in range(9):
img = Vector(Utils.normalize(Utils.flatten_2d(images[i]), 0, 255))
o1 = hl_a.generate_output(img)
o2 = hl_b.generate_output(img)
#print("Picture " + str(i + 1) + ": " + str(o1) + ", " + str(o2) + ", correct answer is " + str(labels[i]))
print(o1)
print(o2) """
# Array flattening testing
""" testarr = [[1, 2, 7, 8], [3, 4, 9, 10], [5, 6, 11, 12]]
testarr = Utils.flatten_2d(testarr)
print(testarr)
testarr = Utils.deflatten_2d(testarr, 4, 3)
print(testarr) """
# Let's test multi-layer nets
""" images = IO.read_images('test')
labels = IO.read_labels('test')
img_test = images[:20]
lab_test = labels[:20]
weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]
weights_b = [Utils.rand_array(10, 0, 1) for _ in range(10)]
hl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
hl_b = HiddenLayer(10, 10, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
LEARNING_RATE = 0.5
for (i, l) in zip(images, labels):
img = Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255))
lab = Utils.onehot_label_arr(l)
o_a = hl_a.generate_output(img)
o_b = hl_b.generate_output(o_a)
grads = Backpropagation.output_layer_grads(hl_b, o_b, lab, hl_a, LEARNING_RATE)
#grad_b =
#print("Picture " + str(i + 1) + ": " + str(o1) + ", " + str(o2) + ", correct answer is " + str(labels[i]))
#print(o_a)
#print(o_b)
#print(lab)
#print()
#print("----")
for n in hl_b.neurons:
print(n.weights) """
# Let's try how well a single one-layer 10-neuron net performs!
# Read images and labels
""" images = IO.read_images('training')
labels = IO.read_labels('training')
test_images = IO.read_images('test')
test_labels = IO.read_labels('test')
print("Images & labels read!")
# Preprocess images and labels
images_flat = []
labels_oh = []
test_images_flat = []
for (i, l) in zip(images, labels):
images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))
labels_oh.append(Utils.onehot_label_arr(l))
for i in test_images:
test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))
print("Images & labels processed!")
# Initialize weights and layer
#weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]
weights_a = [[0] * 784] * 10
hl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
LEARNING_RATE = 0.05
iter = 1eturn super().setUp()
prev_correct = 0
#old_weights = weights_a
while True:
print("Iteration: " + str(iter))
j = 1
for (img, lab) in zip(images_flat, labels_oh):
o_a = hl_a.generate_output(img)
grads = Backpropagation.output_layer_backpropagate(hl_a, o_a, lab, img, LEARNING_RATE)
if j % 1000 == 0:
print(" " + str(j))
j += 1
right_amount = 0
for (img, lab) in zip(test_images_flat, test_labels):
o_a = hl_a.generate_output(img)
pred = Utils.make_prediction(o_a)
if pred == lab:
right_amount += 1
print("Correct predictions: " + str(right_amount))
if (iter > 10):
break
prev_correct = right_amount
iter = iter + 1 """
#IO.save_layer(hl_a, "test1_3")
# Visualize weights!
""" hl_a = IO.load_layer("test1_3")
i = 0
for n in hl_a.neurons:
weights = n.weights
weights = Utils.fit_arr(weights, 0, 255)
#print(weights)
IO.save_image(Utils.deflatten_2d(weights, 28, 28), "w" + str(i))
i += 1 """
# Final boss: a 32-16-10 multi-layer net!
images = IO.read_images('training')
labels = IO.read_labels('training')
test_images = IO.read_images('test')
test_labels = IO.read_labels('test')
print("Images & labels read!")
# Preprocess images and labels
images_flat = []
labels_oh = []
test_images_flat = []
for (i, l) in zip(images, labels):
images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))
labels_oh.append(Utils.onehot_label_arr(l))
for i in test_images:
test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))
print("Images & labels processed!")
# Don't change these two
IMAGE_INPUT_SIZE = 784
OUTPUT_LAYER_SIZE = 10
# These define how many neurons in layers A & B
LAYER_A_SIZE = 32
LAYER_B_SIZE = 16
# Initialize weights and layer
weights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(LAYER_A_SIZE)]
weights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)]
weights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(OUTPUT_LAYER_SIZE)]
hl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)
hl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)
opl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)
# ---- Change these if you want to play around with the program ----
# These decide when the training stops
ITERATION_CAP = 20 # after 20 iterations or
ACCURACY_CAP = 6500 # at 65% accuracy
# These adjust the learning process
INITIAL_LEARNING_RATE = 0.05
LEARNING_DECAY_SCALAR = 0.0025
BATCH_SIZE = 100
# ----------------
learning_rate = INITIAL_LEARNING_RATE
iter = 1
prev_correct = 0
while True:
print("Iteration: " + str(iter))
learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)
print("Learning rate: " + str(learning_rate))
j = 1
batchtracker = 0
img_sum = Vector([0] * IMAGE_INPUT_SIZE)
lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)
oa_sum = Vector([0] * LAYER_A_SIZE)
ob_sum = Vector([0] * LAYER_B_SIZE)
op_sum = Vector([0] * OUTPUT_LAYER_SIZE)
for (img, lab) in zip(images_flat, labels_oh):
o_a = hl_a.generate_output(img)
o_b = hl_b.generate_output(o_a['op'])
output = opl.generate_output(o_b['op'])
img_sum = img_sum + img
lab_sum = lab_sum + Vector(lab)
oa_sum = oa_sum + o_a['op']
ob_sum = ob_sum + o_b['op']
op_sum = op_sum + output['op']
batchtracker = batchtracker + 1
if batchtracker == BATCH_SIZE:
img_sum = img_sum * (1 / BATCH_SIZE)
lab_sum = lab_sum * (1 / BATCH_SIZE)
oa_sum = oa_sum * (1 / BATCH_SIZE)
ob_sum = ob_sum * (1 / BATCH_SIZE)
op_sum = op_sum * (1 / BATCH_SIZE)
#print(opl.loss(lab_sum, op_sum))
opl_backprop = Backpropagation.output_layer_backpropagate(opl, op_sum, lab, ob_sum, learning_rate)
hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b, oa_sum, ob_sum, opl_backprop, learning_rate)
hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a, img, oa_sum, hl_b_backprop, learning_rate)
img_sum = Vector([0] * IMAGE_INPUT_SIZE)
lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)
oa_sum = Vector([0] * LAYER_A_SIZE)
ob_sum = Vector([0] * LAYER_B_SIZE)
op_sum = Vector([0] * OUTPUT_LAYER_SIZE)
batchtracker = 0
if j % 10000 == 0:
print(" " + str(j))
j += 1
print("Iteration " + str(iter) + " done! Now testing accuracy...")
right_amount = 0
for (img_t, lab_t) in zip(test_images_flat, test_labels):
oa = hl_a.generate_output(img_t)['op']
ob = hl_b.generate_output(oa)['op']
op = opl.generate_output(ob)['op']
pred = Utils.make_prediction(op)
if pred == lab_t:
right_amount += 1
print("Correct predictions: " + str(right_amount))
if (iter >= ITERATION_CAP):
break
if (prev_correct >= ACCURACY_CAP):
break
#if (prev_correct > right_amount):
# break
prev_correct = right_amount
iter = iter + 1
IO.save_layer(hl_a, "test_layer_a")
IO.save_layer(hl_b, "test_layer_b")
IO.save_layer(opl, "test_layer_c") |
8,578 | 18789b5106d4be8a02197b165e16a74c08a58c66 | import math
def sexpr_key(s_expr):
return s_expr.strip('(').split(' ')[0]
def expr_key(expr):
return expr.split(' ')[0]
def expr_data(expr):
return expr.split(' ')[1:]
def list_key(_list):
if type(_list) is type(list()):
return _list[0]
else:
return expr_key(_list)
def list_data(_list):
if type(_list) is type(list()):
return _list[1]
else:
temp = expr_data(_list)
if temp:
return temp[0]
else:
return []
def extracted_data(string):
t = string.split(' ')
t.pop(0)
return t
def mean(data):
if data:
return float(sum(data))/len(data)
else:
return 0
def variance(data):
if data:
m_mean = mean(data)
return sum([math.pow((i - m_mean), 2) for i in data])/len(data)
else:
return 0
|
8,579 | fc20a2bf09d510892a4d144fbbd2cb2012c3ad98 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'FormHello.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FormHello(object):
def setupUi(self, FormHello):
FormHello.setObjectName("FormHello")
FormHello.resize(705, 477)
self.LabelHello = QtWidgets.QLabel(FormHello)
self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.LabelHello.setFont(font)
self.LabelHello.setObjectName("LabelHello")
self.btnClose = QtWidgets.QPushButton(FormHello)
self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))
self.btnClose.setObjectName("btnClose")
self.retranslateUi(FormHello)
QtCore.QMetaObject.connectSlotsByName(FormHello)
def retranslateUi(self, FormHello):
_translate = QtCore.QCoreApplication.translate
FormHello.setWindowTitle(_translate("FormHello", "Demo2_2"))
self.LabelHello.setText(_translate("FormHello", " Hello, by UI Designer"))
self.btnClose.setText(_translate("FormHello", "关闭"))
|
8,580 | 4282303e3e6ee122f1379bea73c619870f983f61 | age=int(input('请输入您的年龄:'))
subject=input('请输入您的专业:')
college=input('请输入您是否毕业于重点大学:(是/不是)')
if (subject=='电子信息工程' and age>25) or (subject=='电子信息工程' and college=='是') or (age<28 and subject=='计算机'):
print('恭喜您被录取!')
else:
print('抱歉,您未达到面试要求')
|
8,581 | 47259844f76f12060f0cf52f1086c05b9f300175 | def firstDuplicate(array):
"""
Time O(n) | Space O(n)
"""
dic = {}
for num in array:
if num in dic:
return num
else:
dic[num] = True
return -1
print(firstDuplicate([2,1,3,5,3])) |
8,582 | f9ea29f882c6491a2ac0007e4d9435c732d0967a | import math
import numpy
import theano
from theano import tensor as T
from utils import shared_dataset
from layer import HiddenLayer, LogisticRegressionLayer
import pickle as pkl
from mlp import MLP, Costs, NeuralActivations
DEBUGGING = False
class PostMLP(MLP):
"""Post training:- Second phase MLP.
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function thanh or the
sigmoid function (defined here by a ``SigmoidalLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self,
input,
n_in=64*11,
n_hiddens=[500, 400],
n_out=1,
normalize_inputs=False,
use_adagrad=True,
activation=NeuralActivations.Rectifier,
exp_id=1,
rng=None,
params_first_phase=None):
"""
Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in which
the labels lie.
"""
if DEBUGGING:
theano.config.compute_test_value = 'raise'
self.input.tag.test_value = numpy.random.rand(1800, n_in)
super(PostMLP, self).__init__(input,
n_in,
n_hiddens,
n_out,
normalize_inputs,
use_adagrad,
activation,
exp_id,
rng)
self.params_first_phase = params_first_phase
def train(self,
data=None,
labels=None,
**kwargs):
learning_rate = kwargs["learning_rate"]
L1_reg = kwargs["L1_reg"]
L2_reg = kwargs["L2_reg"]
n_epochs = kwargs["nepochs"]
cost_type = kwargs["cost_type"]
save_exp_data = kwargs["save_exp_data"]
batch_size = kwargs["batch_size"]
normalize_weights = kwargs["normalize_weights"]
enable_dropout = kwargs["enable_dropout"]
if data is None:
raise Exception("Post-training can't start without pretraining class membership probabilities.")
if labels is None:
raise Exception("Post-training can not start without posttraining class labels.")
self.state = "train"
self.learning_rate = learning_rate
train_set_x = shared_dataset(data, name="training_set_x")
train_set_y = shared_dataset(labels, name="labels")
train_set_y = T.cast(train_set_y, "int32")
# compute number of minibatches for training
n_examples = data.shape[0]
n_train_batches = int(math.ceil(n_examples / batch_size))
######################
# BUILD ACTUAL MODEL #
######################
print '...postraining the model'
# allocate symbolic variables for the data
index = T.lscalar('index') # index to a [mini]batch
y = T.ivector('y') # the labels are presented as 1D vector of int32
mode = "FAST_RUN"
#import pudb; pudb.set_trace()
if DEBUGGING:
index.tag.test_value = 0
y.tag.test_value = numpy.ones(n_examples)
mode = "DEBUG_MODE"
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically.
cost = self.get_cost_function(cost_type, y, L1_reg, L2_reg)
updates = self.sgd_updates(cost, learning_rate)
# compiling a Theano function `train_model` that returns the cost, butx
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
# p_y_given_x = self.class_memberships
train_model = theano.function(inputs=[index],
outputs=cost,
updates = updates,
givens = {
self.input: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
},
mode=mode)
if DEBUGGING:
theano.printing.debugprint(train_model)
epoch = 0
costs = []
Ws = []
while (epoch < n_epochs):
print "In da epoch %d" % (epoch)
for minibatch_index in xrange(n_train_batches):
print "Postraining in Minibatch %i " % (minibatch_index)
minibatch_avg_cost = train_model(minibatch_index)
if enable_dropout:
self.dropout()
if normalize_weights:
self.normalize_weights()
costs.append(float(minibatch_avg_cost))
Ws.append(self.params[2])
epoch +=1
if save_exp_data:
self.data_dict['Ws'].append(Ws)
self.data_dict['costs'].append([costs])
self.save_data()
return costs
def test(self,
data=None,
labels=None,
**kwargs):
save_exp_data = kwargs["save_exp_data"]
batch_size = kwargs["batch_size"]
if data is None:
raise Exception("Post-training can't start without pretraining class membership probabilities.")
if labels is None:
raise Exception("Post-training can not start without posttraining class-membership probabilities.")
test_set_x = shared_dataset(data)
test_set_y = shared_dataset(labels)
test_set_y = T.cast(test_set_y, "int32")
self.state = "test"
# compute number of minibatches for training, validation and testing
n_examples = data.shape[0]
n_test_batches = int(math.ceil(n_examples / batch_size))
print '...post-testing the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
mode = "FAST_RUN"
if DEBUGGING:
theano.config.compute_test_value = 'raise'
index.tag.test_value = 0
y.tag.test_value = numpy.ones(n_examples)
mode = "DEBUG_MODE"
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
# compiling a Theano function `test_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
test_model = theano.function(inputs=[index],
outputs=self.errors(y),
givens={
self.input: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]},
mode=mode)
###############
# TEST MODEL #
###############
test_losses = []
for minibatch_index in xrange(n_test_batches):
test_losses.append(float(test_model(minibatch_index)))
test_score = numpy.mean(test_losses)
print("Minibatch %i, mean test error %f" % (minibatch_index, test_score * 100))
if save_exp_data:
self.data_dict['test_scores'].append(test_losses)
self.save_data()
return test_score, test_losses
|
8,583 | 03a1f9f533f7550db32fa25578ef2f7f4c741510 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: sx
import string
def reverse(text):
"""将字符串翻转"""
return text[::-1]
def is_palindrome(text):
print(e for e in text if e.isalnum())
# 去掉标点空格
m = ''.join(e for e in text if e.isalnum())
print(m)
"""是否是回文数"""
return m == reverse(m)
something = input('请输入:')
if is_palindrome(something):
print('是,这是个回文数')
else:
print('不,这不是回文数')
|
8,584 | cc19ff829cc4a11c3dc873353fa2194ec9a87718 | import os
from PIL import Image
import cv2
import shutil
root = './train'
save_path = './thumbnail'
for r, d, files in os.walk(root):
if files != []:
for i in files:
fp = os.path.join(r, i)
label = i.split('_')[0]
dst = os.path.join(save_path, label)
if not os.path.exists(dst):
os.makedirs(dst)
img = Image.open(fp).convert('RGB')
w, h = img.size
if max(w, h) > 256:
img.thumbnail((256, 256), Image.ANTIALIAS)
img.save(os.path.join(dst, i), quality=95, subsampling=0)
else:
shutil.copy(fp, os.path.join(dst, i))
#原数据由于尺寸不一,多数是高清图片,训练时resize会很耗时,因此先resize到一个小尺寸保存起来。
# Image.thumbnail()可以起到过滤的作用,如果hw在范围内就不会resize,超过就会按比例放缩。
#处理前数据集大小为114G,处理后为86G。在 Tesla V100 32GB*2 硬件环境下,训练Baseline,处理前训练时间一个epoch约为2400s(40min),
# 处理后一个epoch约1400s(23min),极大缩小了训练时间,精度应该没有什么影响,调小判别尺寸应该还能更快,毕竟训练数据尺寸是224x224。 |
8,585 | 707e3e60d6d9a3db5b9bc733e912b34e2cec5974 | from .models import RecommendedArtifact
from .serializers import RecommendedArtifactSerialize
from rest_framework.decorators import api_view
from rest_framework.response import Response
from datetime import datetime
import requests, bs4
# constant value
service_key = "{jo's museum key}"
@api_view(['GET'])
def artifact_save_recommend(request,pageNo):
# 1. 페이지 선정 및 페이지 내 모든 유물 정보 가져오기
artifact_url = f"http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}"
#http://www.emuseum.go.kr/openapi/relic/list?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&numOfRows=100&pageNo=1
response = requests.get(artifact_url)
response_dict = bs4.BeautifulSoup(response.content, 'html.parser')
search_list = []
for data in response_dict.findAll('data'):
for item in data.findAll('item'):
if item['key'] == 'id':
id_num = item['value']
search_list.append(id_num)
# 2-1. 변수설정
detail_list = []
dataDict = {
'id_num': '',
'name': '',
'desc': '',
'museum_name': '',
'nationality_name': '',
'image_uri': '',
}
# 2-2. 모든 유물에서 desc있나 파악하기
for i in range(len(search_list)):
artifact_num = search_list[i]
artifact_url = f"http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}"
# http://www.emuseum.go.kr/openapi/relic/detail?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&id=PS0100100100100021500000
response = requests.get(artifact_url)
response_dict = bs4.BeautifulSoup(response.content, 'html.parser')
for data in response_dict.findAll('list'):
for item in data.findAll('item'):
if item['key'] == 'id':
dataDict['id_num'] = item['value']
elif item['key'] == 'desc':
dataDict['desc'] = item['value']
elif item['key'] == 'nameKr':
dataDict['name'] = item['value']
elif item['key'] == 'nationalityName2':
dataDict['nationality_name'] = item['value']
elif item['key'] == 'museumName2':
dataDict['museum_name'] = item['value']
elif item['key'] == 'imgThumUriM':
dataDict['image_uri'] = item['value']
# 2-3 db에 저장하기
if dataDict['desc'] != '':
serializer = RecommendedArtifactSerialize(data=dataDict)
if serializer.is_valid(raise_exception=True):
serializer.save()
dataDict = {
'id_num': '',
'name': '',
'desc': '',
'museum_name': '',
'nationality_name': '',
'image_uri': '',
}
return Response(serializer.data)
@api_view(['GET'])
def artifact_recommend(request):
## 오늘은 며칠째인가요??
now = datetime.now()
nowYear = now.year
nowMonth = now.month
nowDay = now.day
daySum = 0
if nowYear%4==0 and nowYear%100!=0 or nowYear%400==0:
month = [31,29,31,30,31,30,31,31,30,31,30,31]
else:
month = [31,28,31,30,31,30,31,31,30,31,30,31]
for i in range(nowMonth-1):
daySum += month[i]
daySum += nowDay
Recommended_list = RecommendedArtifact.objects.all()
Recommended_artifact = Recommended_list[daySum]
dataDict = {
'id_num': Recommended_artifact.id_num,
'name': Recommended_artifact.name,
'desc': Recommended_artifact.desc,
'museum_name': Recommended_artifact.museum_name,
'nationality_name': Recommended_artifact.nationality_name,
'image_uri': Recommended_artifact.image_uri,
}
# print(Recommended_artifact.name)
return Response(dataDict)
|
8,586 | 5a4a014d07cf312f148e089ea43484f663ce32bc | import requests
from bs4 import BeautifulSoup
import re
# if no using some headers, wikiloc answers HTML error 503, probably they protect their servers against scrapping
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0',}
def main():
print("##############################")
response=requests.get("http://www.muenchen.de/rathaus/Serviceangebote/familie/kinderbetreuung/corona.html#geschlossene-kitas-oder-kitagruppen-_6", headers=headers)
soup=BeautifulSoup(response.text, "lxml")
p = soup.find("p", text =re.compile("geschlossen"))
if p != None:
kitaUl = p.findNext("ul")
kitaList = kitaUl.find_all("li")
# for kita in kitaList:
# print("KITA: " + kita.text)
print("TOTAL closed Kitas=", len(kitaList))
else:
print("Error, Kita list not found")
print("##############################")
response=requests.get("http://www.lgl.bayern.de/gesundheit/infektionsschutz/infektionskrankheiten_a_z/coronavirus/karte_coronavirus/", headers=headers)
soup2=BeautifulSoup(response.text, "lxml")
munich = soup2.find("td", text =re.compile("München Stadt"))
if munich != None:
change = munich.findNext("td").findNext("td")
average=change.findNext("td").findNext("td").findNext("td")
print("Munich 7-day average %s, today´s increase %s" %(re.sub(r"\s+", "", average.text), re.sub(r"\s+", "", change.text)))
else:
print("Error, Munich row not found")
print("##############################")
exit
if __name__ == "__main__":
main() |
8,587 | e3a59a1ae65dd86ff2f5dcc15d4df9e8dc451990 | def is_prime(x):
divisor = 2
while divisor <= x**(1/2.0):
if x % divisor == 0:
return False
divisor += 1
return True
for j in range(int(raw_input())):
a, b = map(int, raw_input().split())
count = 0
if a == 2:
a += 1
count += 1
elif a % 2 == 0:
a += 1
elif a == 1:
a += 2
count += 1
for i in range(a, b, 2):
if is_prime(i):
count += 1
print count
|
8,588 | 45d5c75a993ff50e1a88510bdb16e963403c5356 | # Tip Calculator
# Dan Soloha
# 9/12/2019
total = int(input("What was the total your bill came to? "))
print(f"With a total of {total}, you should tip ${int(total + (total * 0.15))}. If the waiter did a really good job, you should tip ${int(total + (total * 0.20))}. ") # Multiplying by 1.x was returning the number rounded down for some reason |
8,589 | 2ec5e43860a1d248a2f5cd1abc26676342275425 | from django.shortcuts import render, redirect
from django.utils.crypto import get_random_string
def index(request):
if not "word" in request.session:
request.session["word"] = 'Empty'
if not "count" in request.session:
request.session["count"] = 0
if request.method == "GET":
return render(request, "app_one/index.html")
if request.method == "POST":
request.session['word'] = get_random_string(length=14)
request.session['count'] += 1
return redirect('/')
# def generator(request):
# return redirect('/')
def reset(request):
request.session['count'] = 0
return redirect('/') |
8,590 | 80531ac3cc247d48ee36bff581925b8f29f9e235 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
import time
import os
import csv
import matplotlib.pyplot as plt
from GELu import GELu
from My_Dataset import MyDataset
from pytorchtools import EarlyStopping
from LSTM import LSTM
'''
Written by KKL on 2020-12-1
This file is used to train LSTM
'''
def train_model(model, DEVICE, patience, n_epochs, csv_record=False):
train_losses = []
valid_losses = []
avg_train_losses = []
avg_valid_losses = []
# initialize the early_stopping object
early_stopping = EarlyStopping(patience=patience, verbose=True)
t1 = time.time()
for epoch in range(1, n_epochs + 1):
###################
# train the model #
###################
model.train() # prep model for training
for step, (feature, label) in enumerate(train_loader, 1):
feature = feature.to(DEVICE)
label = label.to(DEVICE).squeeze()
# print(feature.size(), label.size())
optimizer.zero_grad()
output = model(feature).squeeze()
# print(output.size(), label.size())
loss = loss_func(output, label)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
######################
# test the model #
######################
model.eval() # prep model for evaluation
with torch.no_grad():
for feature, label in valid_loader:
feature = feature.to(DEVICE)
label = label.to(DEVICE)
output = model(feature)
loss = loss_func(output.squeeze(), label.squeeze())
# record validation loss
valid_losses.append(loss.item())
# print training/validation statistics
# calculate average loss over an epoch
train_loss = np.average(train_losses)
valid_loss = np.average(valid_losses)
avg_train_losses.append(train_loss)
avg_valid_losses.append(valid_loss)
epoch_len = len(str(n_epochs))
print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}] ' +
f'train_loss: {train_loss:.5f} ' +
f'valid_loss: {valid_loss:.5f}'+ f'| Using time: {time.time()-t1:.5f}')
t1 = time.time()
print(print_msg)
if csv_record==True:
with open(train_log_dir, "a", newline="") as train_log:
writer = csv.writer(train_log)
writer.writerow([epoch, train_loss])
with open(valid_log_dir, "a", newline="") as test_log:
writer = csv.writer(test_log)
writer.writerow([epoch, valid_loss])
# clear lists to track next epoch
train_losses = []
valid_losses = []
# early_stopping needs the validation loss to check if it has decresed,
# and if it has, it will make a checkpoint of the current model
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# load the last checkpoint with the best model
model.load_state_dict(torch.load('checkpoint.pt'))
return model, avg_train_losses, avg_valid_losses
if __name__ == '__main__':
# Hyper Parameters
EPOCH = 1000
# BATCH_SIZE = 16
BATCH_SIZE = 64 # 等下试试16
LR = 0.001
patience = 100
csv_record = True
# whether use multti GPUs
MultiGPU = False
torch.set_default_dtype(torch.float64)
# torch.backends.cudnn.enabled = False
print('Epoch = ', EPOCH, '|Batch size = ', BATCH_SIZE, '|Learning rate =', LR)
if MultiGPU:
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
torch.cuda.set_device(0)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('DEVICE=', DEVICE, "| PyTorch", torch.__version__, '| CUDA version ', torch.version.cuda, '| cudnn version', torch.backends.cudnn.version())
cPath = os.getcwd() # current path
hdf5_dir = hdf5_dir = r'C:\Users\...\语音信号处理\data.hdf5'
train_data = MyDataset(hdf5_dir, 'train')
valid_data = MyDataset(hdf5_dir, 'valid')
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True,)
valid_loader = torch.utils.data.DataLoader(dataset=valid_data, batch_size=BATCH_SIZE, shuffle=False)
train_log_dir = os.path.join(r'C:\Users\...\语音信号处理\train_log.csv')
valid_log_dir = os.path.join(r'C:\Users\...\语音信号处理\valid_log.csv')
print('train data len:',train_data.__len__())
# log file
with open(train_log_dir, "w", newline="") as train_log:
writer = csv.writer(train_log)
writer.writerow(['epoch', 'loss'])
with open(valid_log_dir, "w", newline="") as valid_log:
writer = csv.writer(valid_log)
writer.writerow(['epoch', 'loss'])
net = LSTM().to(DEVICE)
print(net, '\n\n------------------training start-----------------')
# net.load_state_dict(torch.load('./workspace/'+model_name))
# optimizer = torch.optim.Adam(net.parameters(), lr=LR)
optimizer = torch.optim.Adam(net.parameters(), lr=LR, weight_decay=0.001)
loss_func = nn.MSELoss()
#--------------- training -----------------------
net, train_loss, valid_loss = train_model(net, DEVICE, patience, EPOCH, csv_record)
print('---------------result------')
print('train_loss:',train_loss[-1],'valid_loss:',valid_loss[-1])
torch.save(net.state_dict(), './VAD.pkl')
print('save model successfully')
|
8,591 | dffcaf47ec8e0daa940e7047f11681ef3eabc772 | import sys, os
class Extractor:
def __init__(self, prefix=''):
self.variables = {}
self.prefix = os.path.basename(prefix)
'''
Returns the variable name if a variable with
the value <value> is found.
'''
def find_variable_name(self, value):
for var, val in self.variables.items():
if value == val:
return var
'''
Scans a list of <lines> containing CSS and
returns a list of strings containing the
rendered LESS version.
'''
def scan(self, lines):
yield "@import '%s_variables.less'\n\n" %self.prefix
for line in lines:
found_prop = False
for prop in ('background-color', 'background', 'color'):
if prop in line:
found_prop = True
value = line.split(':')[1].strip().replace('}', '')
if not (value in self.variables.values()):
self.variables['@var%i' %(len(self.variables) + 1)] = value
yield line.replace(value, self.find_variable_name(value) + ';')
if not found_prop:
yield line
'''
Returns the output for the variables.less
file as a list of strings
'''
def get_variables(self):
for var, val in self.variables.items():
yield var + ': ' + val
if __name__ == '__main__':
if len(sys.argv) > 1:
for path in sys.argv[1:]:
name = '.'.join(path.split('.')[:-1])
extractor = Extractor(name)
read = open(path)
write = open(name + '.less', 'w')
variables = open(name + '_variables.less', 'w')
try:
for line in extractor.scan(read.readlines()):
write.write(line)
for line in extractor.get_variables():
variables.write(line + os.linesep)
finally:
variables.close()
write.close()
read.close()
else:
print('usage: python extract.py [file]')
|
8,592 | b3f4815495c781fe6cc15f77b4ee601680117419 | from ctypes import *
class GF_IPMPX_Data(Structure):
_fields_=[
("tag", c_char),
("Version", c_char),
("dataID", c_char)
] |
8,593 | 8787126e654808a5fec52283780d9b4f668fa50f | import Numberjack as Nj
class Teachers(object):
"""Will be expanded to allow constraints for individual teachers"""
def __init__(self):
self.store = list()
def add(self, teachers):
if isinstance(teachers, (list, tuple)):
self.store.extend(teachers)
elif isinstance(teachers, str):
self.store.append(teachers)
else:
raise TypeError('only lists, tuples and strings '
'of teachers can be added')
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError('only lists, tuples and strings '
'of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers '
'than timeslots')
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects),
self.timeslots,
len(self.teachers)+1)
self.model = Nj.Model(
[Nj.AllDiffExcept0(row) for row in self.matrix.row],
[Nj.AllDiffExcept0(col) for col in self.matrix.col]
)
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
|
8,594 | 2317a2fff493588ad6cc3a4ac2b600fbf1c5583c | import numpy as np
import dl_style_transfer.workspace.data_helpers
import os
here = os.path.dirname(os.path.abspath(__file__))
sents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt')))
thresh = 5
col = dict()
word_to_ind = dict()
ind_to_word = dict()
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(" ")
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
for l in sents:
__line_into_col__(l)
lis = list(col.items())
lis.sort(key=lambda count: count[1], reverse=True)
for i, word in enumerate(lis):
word_to_ind[word[0]] = i
ind_to_word[i] = word[0]
voc_len = len(word_to_ind)
shape = (len(sents), voc_len)
def get_small_bag():
bag = []
for sent in sents:
sbag =[]
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "):
sbag.append(word_to_ind[wor])
bag.append(sbag)
return bag
def get_bag():
bag = np.zeros(shape)
for j,sent in enumerate(sents):
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "):
bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1
return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)
def string_to_vec(string):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(" ")
vec = np.zeros(voc_len)
for wor in tokens:
vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1
return vec
def get_ryans_strange_input():
vec = []
for l in sents:
vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))
return np.array([word_to_ind[i] for l in vec for i in l.split(" ")])
def vocab_length():
return voc_len
|
8,595 | 5f471fb75b1c4f6fc7aa4cb4f99f9c1a1a9f0ea1 | import pytest
from chess.board import Board, ImpossibleMove
from chess.pieces import King, Rook, Pawn, Knight
def test_board_has_32_pieces():
board = Board()
assert board.pieces_quantity() == 32
def test_board_can_be_instatiated_with_any_set_of_pieces():
board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})
assert board.pieces_quantity() == 2
def test_piece_cant_capture_an_ally():
board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})
with pytest.raises(ImpossibleMove):
board.move('f3', 'e5')
def test_alternating_between_players():
board = Board()
assert board.turn == 'white'
board.move('g2', 'g3') # white pawn moves
assert board.turn == 'black'
board.move('b7', 'b6') # black pawn moves
assert board.turn == 'white'
board.move('f1', 'g2') # white bishop moves
assert board.turn == 'black'
def test_only_white_pieces_can_start():
board = Board()
assert board.turn == 'white'
with pytest.raises(ImpossibleMove):
board.move('b7', 'b6')
def test_players_can_put_opponent_in_check():
board = Board({'e1': King('black'), 'f8': Rook('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
def test_players_can_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
board.move('e1', 'f1')
assert board.check is None
def test_player_should_to_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
with pytest.raises(ImpossibleMove):
board.move('e1', 'e2')
def test_pieces_can_capture_opponent_pieces():
board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'), 'f3': Knight('white')})
assert board.pieces_quantity() == 3
knight = board.get_piece('f3')
board.move('f3', 'e5')
assert board.get_piece('e5') is knight
assert board.pieces_quantity() == 2
|
8,596 | 7c65d0bdd4fd808b3d87706357a651601368e43b | import os
from unittest import TestCase
from pyfibre.gui.file_display_pane import FileDisplayPane
from pyfibre.tests.fixtures import (
directory,
test_image_path)
from pyfibre.tests.probe_classes.parsers import ProbeParser
from pyfibre.tests.probe_classes.readers import ProbeMultiImageReader
source_dir = os.path.dirname(os.path.realpath(__file__))
pyfibre_dir = os.path.dirname(os.path.dirname(source_dir))
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(
supported_readers={'Probe': ProbeMultiImageReader()},
supported_parsers={'Probe': ProbeParser()}
)
self.file_path = test_image_path
def test_add_file(self):
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual(
{'Probe': test_image_path},
table_row.file_set.registry)
self.file_display.add_files(test_image_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual(
{'Probe': test_image_path},
table_row.file_set.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file(
[self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
|
8,597 | 88a379747f955b0410ab2bb33c1165034c701673 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'wenchao.hao'
"""
data.guid package.
"""
from .guid import Guid
|
8,598 | 360881cecbad88ea5d150548fba6a39d8dc30681 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
db = {
'host': "localhost",
'user': "root",
'passwd': "m74e71",
'database': "dw_toner"
}
data_inicial = '1990-01-01'
ano_final = 2018
feriados = "feriados.csv"
meses_de_ferias = (1, 2, 7, 12) #Janeiro, Fevereiro, Julho, Dezembro
dias_final_semana = (1, 6, 7) #Domingo, sexta e sábado
|
8,599 | 9e21a39358d97633b49ad83805990c29c19a80ed | import argparse
import glob
import importlib
import inspect
import math
import os
import re
import subprocess
import sys
import moviepy.audio.fx.all as afx
import moviepy.video.fx.all as vfx
import numpy as np
from _appmanager import get_executable
from _shutil import format_time, get_time_str, getch, print2
from moviepy.config import change_settings
from moviepy.editor import *
from open_with.open_with import open_with
import codeapi
import core
import coreapi
import datastruct
SCRIPT_ROOT = os.path.dirname(os.path.abspath(__file__))
ignore_undefined = False
if 1:
change_settings({"FFMPEG_BINARY": get_executable("ffmpeg")})
# def _get_markers(file):
# marker_file = file + ".marker.txt"
# if os.path.exists(marker_file):
# with open(marker_file, "r") as f:
# s = f.read()
# return [float(x) for x in s.split()]
# else:
# return None
# def _load_and_expand_img(f):
# fg = Image.open(f).convert("RGBA")
# bg = Image.new("RGB", (1920, 1080))
# bg.paste(fg, ((bg.width - fg.width) // 2, (bg.height - fg.height) // 2), fg)
# return np.array(bg)
def _update_mpy_clip(
clip, subclip, speed, frame, norm, loop, duration, pos, scale, vol, **kwargs,
):
assert duration is not None
# video clip operations / fx
if subclip is not None:
if isinstance(subclip, (int, float)):
clip = clip.subclip(subclip).set_duration(duration)
else:
subclip_duration = subclip[1] - subclip[0]
if duration > subclip_duration:
c1 = clip.subclip(subclip[0], subclip[1])
c2 = clip.to_ImageClip(subclip[1]).set_duration(
duration - subclip_duration
)
clip = concatenate_videoclips([c1, c2])
# HACK: workaround for a bug: 'CompositeAudioClip' object has no attribute 'fps'
if clip.audio is not None:
clip = clip.set_audio(clip.audio.set_fps(44100))
else:
clip = clip.subclip(subclip[0], subclip[1]).set_duration(duration)
if speed is not None:
clip = clip.fx(
# pylint: disable=maybe-no-member
vfx.speedx,
speed,
)
if frame is not None:
clip = clip.to_ImageClip(frame).set_duration(duration)
# Loop or change duration
if loop:
clip = clip.fx(
# pylint: disable=maybe-no-member
vfx.loop
)
if subclip is None:
clip = clip.set_duration(duration)
if pos is not None:
# (x, y) marks the center location of the of the clip instead of the top
# left corner.
if pos == "center":
clip = clip.set_position(("center", "center"))
elif isinstance(pos, (list, tuple)):
pos = list(pos)
half_size = [x // 2 for x in clip.size]
for i in range(2):
if isinstance(pos[i], (int, float)):
pos[i] = pos[i] - half_size[i]
pos[i] = int(coreapi.global_scale * pos[i])
clip = clip.set_position(pos)
else:
clip = clip.set_position(pos)
if scale[0] != 1.0 or scale[1] != 1.0:
clip = clip.resize((int(clip.w * scale[0]), int(clip.h * scale[1])))
return clip
def _update_clip_duration(track):
def is_connected(prev_clip, cur_clip):
return math.isclose(
prev_clip.start + prev_clip.duration, cur_clip.start, rel_tol=1e-3,
)
prev_clip_info = None
for clip_info in track:
if prev_clip_info is not None:
if prev_clip_info.auto_extend:
prev_clip_info.duration = clip_info.start - prev_clip_info.start
prev_clip_info.auto_extend = False
assert prev_clip_info.duration > 0
# Apply fadeout to previous clip if it's not connected with
# current clip.
if prev_clip_info.crossfade > 0 and not is_connected(
prev_clip_info, clip_info
):
prev_clip_info.fadeout = prev_clip_info.crossfade
prev_clip_info = clip_info
# Update last clip duration
if prev_clip_info is not None:
if prev_clip_info.auto_extend:
duration = prev_clip_info.duration
# Extend the last video clip to match the voice track
if "re" in coreapi.pos_dict:
duration = max(duration, coreapi.pos_dict["re"] - clip_info.start)
prev_clip_info.duration = duration
prev_clip_info.auto_extend = False
if prev_clip_info.crossfade > 0:
prev_clip_info.fadeout = prev_clip_info.crossfade
def _export_video(*, resolution, audio_only):
resolution = [int(x * coreapi.global_scale) for x in resolution]
audio_clips = []
# Update clip duration for each track
for track in datastruct.video_tracks.values():
_update_clip_duration(track)
# TODO: post-process video track clips
# Update MoviePy clip object in each track.
video_clips = []
for track_name, track in datastruct.video_tracks.items():
for i, clip_info in enumerate(track):
assert clip_info.mpy_clip is not None
assert clip_info.duration is not None
# Unlink audio clip from video clip (adjust audio duration)
if clip_info.no_audio:
clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None)
elif clip_info.mpy_clip.audio is not None:
audio_clip = clip_info.mpy_clip.audio
clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None)
# Audio timing
# TODO: audio subclip
if clip_info.subclip is not None:
duration = clip_info.subclip[1] - clip_info.subclip[0]
audio_clip = audio_clip.subclip(
clip_info.subclip[0], clip_info.subclip[1]
)
else:
duration = clip_info.duration
duration = min(duration, audio_clip.duration)
audio_clip = audio_clip.set_duration(duration)
audio_clip = audio_clip.set_start(clip_info.start)
# Adjust volume
if clip_info.norm:
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.audio_normalize
)
if clip_info.vol is not None:
if isinstance(clip_info.vol, (int, float)):
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.volumex,
clip_info.vol,
)
else:
audio_clip = _adjust_mpy_audio_clip_volume(
audio_clip, clip_info.vol
)
audio_clips.append(audio_clip)
# If the next clip has crossfade enabled
crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0
if crossfade_duration:
# clip_info.fadeout = crossfade_duration # Fadeout current clip
clip_info.duration += crossfade_duration
clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info))
# Deal with video fade in / out / crossfade
if clip_info.fadein:
assert isinstance(clip_info.fadein, (int, float))
# TODO: crossfadein and crossfadeout is very slow in moviepy
if track_name != "vid":
clip_info.mpy_clip = clip_info.mpy_clip.crossfadein(
clip_info.fadein
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadein,
clip_info.fadein,
)
elif (
clip_info.crossfade > 0
): # crossfade and fadein should not happen at the same time
video_clips.append(
clip_info.mpy_clip.set_duration(clip_info.crossfade)
.crossfadein(clip_info.crossfade)
.set_start(clip_info.start)
)
clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade)
clip_info.start += clip_info.crossfade
if clip_info.fadeout:
assert isinstance(clip_info.fadeout, (int, float))
if track_name != "vid":
# pylint: disable=maybe-no-member
clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout(
clip_info.fadeout
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadeout,
clip_info.fadeout,
)
video_clips.append(clip_info.mpy_clip.set_start(clip_info.start))
if len(video_clips) == 0:
video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2))
# raise Exception("no video clips??")
final_clip = CompositeVideoClip(video_clips, size=resolution)
# Resize here is too late, does not speed up the video encoding at all.
# final_clip = final_clip.resize(width=480)
# Deal with audio clips
for _, track in datastruct.audio_tracks.items():
clips = []
for clip_info in track.clips:
if clip_info.loop:
# HACK: reload the clip.
#
# still don't know why using loaded mpy_clip directly will cause
# "IndexError: index -200001 is out of bounds for axis 0 with
# size 0"...
clip = AudioFileClip(clip_info.file, buffersize=400000)
else:
clip = clip_info.mpy_clip
if clip_info.subclip is not None:
clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1])
duration = clip_info.duration
if duration is not None:
if clip_info.loop:
# pylint: disable=maybe-no-member
clip = clip.fx(afx.audio_loop, duration=duration)
else:
duration = min(duration, clip.duration)
if clip_info.subclip:
duration = min(
duration, clip_info.subclip[1] - clip_info.subclip[0]
)
clip = clip.set_duration(duration)
if clip_info.start is not None:
clip = clip.set_start(clip_info.start)
# Adjust volume by keypoints
if len(clip_info.vol_keypoints) > 0:
clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints)
clips.append(clip)
if len(clips) > 0:
clip = CompositeAudioClip(clips)
audio_clips.append(clip)
if final_clip.audio:
audio_clips.append(final_clip.audio)
if len(audio_clips) > 0:
final_audio_clip = CompositeAudioClip(audio_clips)
# XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'.
# See: https://github.com/Zulko/moviepy/issues/863
# final_audio_clip.fps = 44100
final_clip = final_clip.set_audio(final_audio_clip)
# final_clip.show(10.5, interactive=True)
os.makedirs("tmp/out", exist_ok=True)
if audio_only:
final_audio_clip.fps = 44100
final_audio_clip.write_audiofile("%s.mp3" % out_filename)
open_with("%s.mp3" % out_filename, program_id=0)
else:
final_clip.write_videofile(
"%s.mp4" % out_filename,
temp_audiofile="%s.mp3" % out_filename,
remove_temp=False,
codec="libx264",
threads=8,
fps=coreapi.FPS,
ffmpeg_params=["-crf", "19"],
)
subprocess.Popen(
["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"],
close_fds=True,
)
def _adjust_mpy_audio_clip_volume(clip, vol_keypoints):
xp = []
fp = []
print("vol_keypoints:", vol_keypoints)
for (p, vol) in vol_keypoints:
if isinstance(vol, (int, float)):
xp.append(p)
fp.append(vol)
else:
raise Exception("unsupported bgm parameter type:" % type(vol))
def volume_adjust(gf, t):
factor = np.interp(t, xp, fp)
factor = np.vstack([factor, factor]).T
return factor * gf(t)
return clip.fl(volume_adjust)
# def _export_srt():
# with open("out.srt", "w", encoding="utf-8") as f:
# f.write("\n".join(_srt_lines))
def _convert_to_readable_time(seconds):
seconds = int(seconds)
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
if hour > 0:
return "%d:%02d:%02d" % (hour, minutes, seconds)
else:
return "%02d:%02d" % (minutes, seconds)
def _write_timestamp(t, section_name):
os.makedirs(os.path.dirname(out_filename), exist_ok=True)
if not hasattr(_write_timestamp, "f"):
_write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8")
_write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t)))
_write_timestamp.f.flush()
@core.api
def include(file):
with open(file, "r", encoding="utf-8") as f:
s = f.read()
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(file)))
_parse_text(s)
os.chdir(cwd)
def _remove_unused_recordings(s):
used_recordings = set()
unused_recordings = []
apis = {"record": (lambda f, **kargs: used_recordings.add(f))}
_parse_text(s, apis=apis)
files = [f for f in glob.glob("record/*") if os.path.isfile(f)]
files = [f.replace("\\", "/") for f in files]
for f in files:
if f not in used_recordings:
unused_recordings.append(f)
print2("Used : %d" % len(used_recordings), color="green")
print2("Unused : %d" % len(unused_recordings), color="red")
assert len(used_recordings) + len(unused_recordings) == len(files)
print("Press y to clean up: ", end="", flush=True)
if getch() == "y":
for f in unused_recordings:
try:
os.remove(f)
except:
print("WARNING: failed to remove: %s" % f)
def _parse_text(text, apis=core.apis, **kwargs):
def find_next(text, needle, p):
pos = text.find(needle, p)
if pos < 0:
pos = len(text)
return pos
# Remove all comments
text = re.sub(r"<!--[\d\D]*?-->", "", text)
p = 0 # Current position
while p < len(text):
if text[p : p + 2] == "{{":
end = find_next(text, "}}", p)
python_code = text[p + 2 : end].strip()
p = end + 2
if ignore_undefined:
try:
exec(python_code, apis)
except NameError: # API is not defined
pass # simply ignore
else:
exec(python_code, apis)
continue
if text[p : p + 1] == "#":
end = find_next(text, "\n", p)
line = text[p:end].strip()
_write_timestamp(coreapi.pos_dict["a"], line)
p = end + 1
continue
match = re.match("---((?:[0-9]*[.])?[0-9]+)?\n", text[p:])
if match is not None:
if match.group(1) is not None:
coreapi.audio_gap(float(match.group(1)))
else:
coreapi.audio_gap(0.2)
p += match.end(0) + 1
continue
# Parse regular text
end = find_next(text, "\n", p)
line = text[p:end].strip()
p = end + 1
if line != "" and "parse_line" in apis:
apis["parse_line"](line)
# Call it at the end
core.on_api_func(None)
def _show_stats(s):
TIME_PER_CHAR = 0.1334154351395731
total = 0
def parse_line(line):
nonlocal total
total += len(line)
_parse_text(s, apis={"parse_line": parse_line}, ignore_undefined=True)
total_secs = TIME_PER_CHAR * total
print("Estimated Time: %s" % format_time(total_secs))
input()
def load_config():
import yaml
CONFIG_FILE = "config.yaml"
DEFAULT_CONFIG = {"fps": 30}
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, "r") as f:
config = yaml.load(f.read(), Loader=yaml.FullLoader)
else:
with open(CONFIG_FILE, "w", newline="\n") as f:
yaml.dump(DEFAULT_CONFIG, f, default_flow_style=False)
config = DEFAULT_CONFIG
coreapi.fps(config["fps"])
if __name__ == "__main__":
out_filename = "tmp/out/" + get_time_str()
parser = argparse.ArgumentParser()
parser.add_argument("--stdin", default=False, action="store_true")
parser.add_argument("--proj_dir", type=str, default=None)
parser.add_argument("-i", "--input", type=str, default=None)
parser.add_argument("-a", "--audio_only", action="store_true", default=False)
parser.add_argument(
"--remove_unused_recordings", action="store_true", default=False
)
parser.add_argument("--show_stats", action="store_true", default=False)
parser.add_argument("--preview", action="store_true", default=False)
args = parser.parse_args()
if args.proj_dir is not None:
os.chdir(args.proj_dir)
elif args.input:
os.chdir(os.path.dirname(args.input))
print("Project dir: %s" % os.getcwd())
# Load custom APIs (api.py) if exists
if os.path.exists("api.py"):
sys.path.append(os.getcwd())
mymodule = importlib.import_module("api")
global_functions = inspect.getmembers(mymodule, inspect.isfunction)
core.apis.update({k: v for k, v in global_functions})
# HACK
if args.audio_only:
coreapi.audio_only()
# Read text
if args.stdin:
s = sys.stdin.read()
elif args.input:
with open(args.input, "r", encoding="utf-8") as f:
s = f.read()
else:
raise Exception("Either --stdin or --input should be specified.")
load_config()
if args.preview:
coreapi.preview()
if args.remove_unused_recordings:
ignore_undefined = True
_remove_unused_recordings(s)
elif args.show_stats:
ignore_undefined = True
_show_stats(s)
else:
_parse_text(s, apis=core.apis)
_export_video(resolution=(1920, 1080), audio_only=args.audio_only)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.