index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,000 | 0233b46da3b9351f110ffc7f8622ca8f9ee9944d | import asyncio
import secrets
import pytest
from libp2p.host.ping import ID, PING_LENGTH
from libp2p.tools.factories import pair_of_connected_hosts
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
SOME_PING_COUNT = 3
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
# NOTE: simulate some time to sleep to mirror a real
# world usage where a peer sends pings on some periodic interval
# NOTE: this interval can be `0` for this test.
await asyncio.sleep(0)
await stream.close()
|
3,001 | 89059915df8891efcbe742174bd468a1390598e3 | from unittest import TestCase
from utils.fileutils import is_empty_dir, clear_attributes
class FileUtilsTest(TestCase):
def test_is_empty_dir(self):
self.assertFalse(is_empty_dir(r'c:\Windows'))
def test_clear_attributes(self):
clear_attributes(__file__)
|
3,002 | 4a0d8e6b6205fa57b8614857e1462203a2a7d2c5 | from django.conf.urls import url
from ..views import (buildings_upload, keytype_upload, key_upload, keystatus_upload, keyissue_upload)
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^buildings_csv/$', # NOQA
buildings_upload,
name="buildings_upload"),
url(r'^keytype_csv/$', # NOQA
keytype_upload,
name="keytype_upload"),
url(r'^key_csv/$', # NOQA
key_upload,
name="key_upload"),
url(r'^keystatus_csv/$', # NOQA
keystatus_upload,
name="keystatus_upload"),
url(r'^keyissue_csv/$', # NOQA
keyissue_upload,
name="keyissue_upload"),
]
|
3,003 | 01b14da7d081a67bab6f9921bb1a6a4c3d5ac216 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
import datetime
class Document(models.Model):
document = models.FileField(upload_to='documents/')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.document)
class Assignment(models.Model):
name= models.CharField(max_length=250)
technology= models.CharField(max_length=100)
directory= models.CharField(max_length=500, default="NA")
def __str__(self):
return self.name + '-' + self.technology
class Assestment(models.Model):
name= models.CharField(max_length=250)
technology= models.CharField(max_length=100)
username= models.CharField(max_length=100, default="NA")
date = models.DateTimeField(default=datetime.datetime.now, blank=True)
def __str__(self):
return self.name + '-' + self.technology
class UserProfile(models.Model):
user = models.OneToOneField(User)
email = models.CharField(max_length=100)
phone = models.IntegerField(default=0)
city = models.CharField(max_length=100)
def create_profile(sender, **kwargs):
if kwargs['created']:
user_profile = UserProfile.objects.create(user=kwargs['instance'])
post_save.connect(create_profile, sender=User) |
3,004 | 1a7a28a2264ed0204184ab1dd273b0b114657fa7 | # -*- coding:utf-8 -*-
from spider.driver.base.driver import Driver
from spider.driver.base.mysql import Mysql
import time
from pyquery import PyQuery
from spider.driver.base.field import Field,FieldName,Fieldlist,FieldType
from spider.driver.base.page import Page
from spider.driver.base.listcssselector import ListCssSelector
from spider.driver.base.mongodb import Mongodb
from spider.driver.base.tabsetup import TabSetup
fl_weixin1 = Fieldlist(
Field(fieldname='public_name', css_selector='div > div.txt-box > p.tit > a', regex=r'[^\u4e00-\u9fa5]*'),
)
fl_weixin2 = Fieldlist(
Field(fieldname='article_name', css_selector='div > div > h4'),
Field(fieldname='article_time', css_selector='div > div > p.weui_media_extra_info'),
)
page_weixin_1 = Page(name='微信公众号列表页面', fieldlist=fl_weixin1, listcssselector=ListCssSelector(list_css_selector='#main > div.news-box > ul > li'))
page_weixin_2 = Page(name='微信公众号文章列表页面', fieldlist=fl_weixin2, tabsetup=TabSetup(click_css_selector='div > div.txt-box > p.tit > a'), listcssselector=ListCssSelector(list_css_selector='#history > div'))
class WeixinSpider(Driver):
def __init__(self,isheadless=False,ismobile=False,isvirtualdisplay=False,spider_id='',name=''):
Driver.__init__(self, log_file_name=spider_id, ismobile=ismobile, isvirtualdisplay=isvirtualdisplay,
isheadless=isheadless)
self.name = name
self.debug_log(name=name)
def get_article(self, data_list=[]):
article_list = self.until_presence_of_all_elements_located_by_css_selector(css_selector=page_weixin_2.listcssselector.list_css_selector)
for i in range(1, len(article_list)+1):
self.until_scroll_to_center_click_by_css_selector(css_selector='%s:nth-child(%s)'%(page_weixin_2.listcssselector.list_css_selector,i))
time.sleep(3)
self.driver.back()
def run_spider(self):
for public in Mysql().query_data(table='weixin_public', field='public_name')[:1]:
self.fast_get_page(url='http://weixin.sogou.com/', min_time_to_wait=15,max_time_to_wait=30)
self.until_send_text_by_css_selector(css_selector='#query', text=public[0])
time.sleep(3)
self.fast_enter_page_by_css_selector(css_selector='#query')
time.sleep(2)
self.fast_click_same_page_by_css_selector(click_css_selector='#scroll-header > form > div > input.swz2')
public_name_list = self.from_page_get_data_list(page=page_weixin_1)
article_name_list = self.from_page_add_data_list_to_data_list(page=page_weixin_2, pre_page=page_weixin_1,data_list=public_name_list, extra_page_func=self.get_article)
# self.fast_click_page_by_css_selector(ele=item, click_css_selector='div > div.txt-box > p.tit > a')
# self.driver.switch_to.window(self.driver.window_handles[-1])
# shop_data_list = self.from_page_get_data_list(page=page_weixin_1)
# self.driver.close()
# self.driver.switch_to.window(self.driver.window_handles[-1]) |
3,005 | 5d618acc0962447554807cbb9d3546cd4e0b3572 | #Calculadora mediante el terminal
numero1 = 0
numero2 = 0
#Preguntamos los valores
operacion = input("¿Qué operación quiere realizar (Suma / Resta / Division / Multiplicacion)?: ").upper()
numero1 = int(input("Introduzca el valor 1: "))
numero2 = int(input("Introduzca el valor 2: "))
#Realizamos las operaciones
if operacion == "SUMA":
resultado = numero1 + numero2
elif operacion == "RESTA":
resultado = numero1 - numero2
elif operacion == "DIVISION":
resultado = numero1 / numero2
elif operacion == "MULTIPLICACION":
resultado = numero1 * numero2
#Mostramos en pantalla el resultado
print("Resultado : {}".format(resultado)) |
3,006 | 9bbf0953d228c970764b8ba94675346820bc5d90 | #!../virtual_env/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from models.base import metadata
from sqlalchemy import create_engine
import os.path
engine = create_engine(SQLALCHEMY_DATABASE_URI)
metadata.create_all(engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO)) |
3,007 | 5d05351cd6cd6c0d216e8bc09308532605bfd26e | from sys import exit
def hard():
print("Nice! Let's try something harder")
print("Could you calculate this for me?")
print("4 * 35 + 18 / 2 = ")
aws = input(">")
while True:
if aws == "176":
print("Nice, you correctly answer all the questions")
exit(0)
else:
print("Ummm not quite right, let's try something easier")
easy()
def easy():
print("Ok, seems like you are not good at math.")
print("What about this.")
print("Say you have 10 apples, your Mom gave you another 2.")
print("How many apples you have now?")
choice = input("> ")
if choice == "12":
print("You did a good job!")
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print("How old are you?")
choice = input("> ")
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input("> ")
while True:
if "y" in choice:
hard()
elif "n" in choice:
easy()
else:
print("I don't know what that mean")
start()
|
3,008 | 4f870e0d86d9f9b8c620115a618ea32abc24c52d | # 只放置可执行文件
#
# from ..src import package
# data_dict = package.pack()
# from ..src.plugins import * #解释一遍全放入内存
# from ..src import plugins #导入这个文件夹(包,模块,类库),默认加载init文件到内存
#
#
# plugins.pack()
from ..src.script import run
if __name__ == '__main__':
run()
|
3,009 | f2c592a0ea38d800510323a1001c646cdbecefff | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:tom_tao626
@license: Apache Licence
@file: 17.列表中的元素统计.py
@time: 2020/12/09
@contact: tp320670258@gmail.com
@site: xxxx.suizhu.net
@software: PyCharm
"""
# collections.Counter()
from collections import Counter
list1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']
count = Counter(list1)
print(count)
# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})
print(count['b'])
# 3
# 出现次数最多的元素
print(count.most_common(1))
# [('b', 3)]
print(count.items())
# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])
|
3,010 | 1a72da7f436e6c5e73e396b771f8ce1a3affba1a | DEFAULT_LL_URL = "https://ll.thespacedevs.com"
DEFAULT_VERSION = "2.0.0"
DEFAULT_API_URL = "/".join([DEFAULT_LL_URL, DEFAULT_VERSION])
|
3,011 | 422a4945ebf453d3e09e9e7e76dd32b30488680e | import pandas as pd
df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})
print(df.head())
#print(df['col2'].unique())
#print(df['col1'] > 2)
newdf = df[(df['col1']>0) & (df['col2'] == 444)]
print("========================")
print(newdf)
def times2(x):
return x*2
print("========================")
print(df['col1'].apply(times2))
print("========================")
print(df.sort_values(by='col2'))
print("========================")
print(df) |
3,012 | 41f70cdfc9cbe5ec4560c1f3271a4636cca06d16 | #!/usr/bin/env python
''' export_claims -- export claims in CSV format
https://sfreeclaims.anvicare.com/docs/forms/Reference-CSV%20Specifications.txt
'''
import csv
from itertools import groupby
from operator import itemgetter
import wsgiref.handlers
import MySQLdb
import ocap
from hhtcb import Xataface, WSGI
def cgi_main(xf, cal):
app = ReportApp(xf, cal)
wsgiref.handlers.CGIHandler().run(app)
def _test_main(argv, xf):
outfn, visits = argv[1], argv[2:]
host, user, password, name = xf.dbopts()
content, pages = format_claims(MySQLdb.connect(host=host, user=user,
passwd=password, db=name),
map(int, visits))
outfp = open(outfn, 'w')
for part in content:
outfp.write(part)
print pages
class ReportApp(object):
def __init__(self, xf, cal):
self._xf = xf
self._datesrc = cal
def __call__(self, env, start_response):
try:
host, user, password, name = self._xf.webapp_login(env)
except KeyError:
start_response('400 bad request', WSGI.PLAIN)
return ['missing key parameter ']
except OSError:
start_response('401 unauthorized', WSGI.PLAIN)
return ['report key does not match.']
conn = MySQLdb.connect(host=host, user=user, passwd=password, db=name)
start_response('200 ok',
[('content-type', 'text/plain'),
('Content-Disposition',
'attachment; filename=claims-%s.csv'
% self._datesrc.today())])
content, pages = format_claims(conn)
return content
def format_claims(conn, visit_ids):
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(QUERY % dict(
visit_ids=', '.join([str(i) for i in visit_ids])))
pages = []
buf = ListWriter()
out = csv.DictWriter(buf, COLUMNS, quoting=csv.QUOTE_ALL)
out.writerow(dict(zip(COLUMNS, COLUMNS)))
for client_id, records in by_page(groupby(cursor.fetchall(),
itemgetter('client_id')),
pg_size=6):
claim = records[0]
tot = claim['28-TotalCharge']
for idx in range(1, len(records)):
for k, v in records[idx].items():
if k.startswith('24.'):
kk = k.replace('.1.', '.%d.' % (idx + 1))
claim[kk] = v
if k == '24.1.f-Charges':
tot += v
claim['28-TotalCharge'] = tot
# is there ever an amount paid?
claim['30-BalanceDue'] = tot
#import pprint
#pprint.pprint(claim)
visit_ids = [r['visit_id'] for r in records]
pages.append(dict(client_id=client_id,
total=tot,
visit_ids=visit_ids,
items=records,
detail=claim))
del claim['client_id']
del claim['visit_id']
out.writerow(claim)
return buf.parts, pages
def by_page(record_groups, pg_size):
for k, group in record_groups:
gl = list(group)
offset = 0
while offset < len(gl):
yield k, gl[offset:offset + pg_size]
offset += pg_size
class ListWriter(object):
def __init__(self):
self.parts = []
def write(self, txt):
self.parts.append(txt)
QUERY = r'''
select c.id client_id, v.id visit_id
, co.name as `Insurance Company Name`
, co.address `Insurance Company Address 1`
, co.city_st_zip `Insurance Company Address 2`
, ins.payer_type `1-InsuredPlanName`
, ins.id_number `1a-InsuredIDNo`
, c.name as `2-PatientName`
, date_format(c.DOB, '%%m/%%d/%%Y') as `3-PatientDOB`
, ins.patient_sex `3-PatientGender`
, case when upper(ins.patient_rel) = 'SELF'
then c.name
else ins.insured_name end `4-InsuredName`
, c.address `5-PatientAddress`
, c.city `5-PatientCity`
, c.state `5-PatientState`
, c.zip `5-PatientZip`
, c.phone `5-PatientPhone`
, upper(ins.patient_rel) `6-PatientRel`
, case when upper(ins.patient_rel) = 'SELF'
then c.address
else ins.insured_address end `7-InsuredAddr`
, case when upper(ins.patient_rel) = 'SELF'
then c.city
else ins.insured_city end `7-InsAddCity`
, case when upper(ins.patient_rel) = 'SELF'
then c.state
else ins.insured_state end `7-InsAddState`
, case when upper(ins.patient_rel) = 'SELF'
then c.zip
else ins.insured_zip end `7-InsAddZip`
, case when upper(ins.patient_rel) = 'SELF'
then c.phone
else ins.insured_phone end `7-InsAddPhone`
, ins.patient_status `8-MaritalStatus`
, ins.patient_status2 `8-Employed?`
, 'NO' as `10a-CondEmployment`
, 'NO' as `10b-CondAutoAccident`
, 'NO' as `10c-CondOtherAccident`
, ins.insured_policy `11-InsuredGroupNo`
, date_format(case when upper(ins.patient_rel) = 'SELF'
then c.dob
else ins.insured_dob end, '%%m/%%d/%%Y') `11a-InsuredsDOB`
, case when upper(ins.patient_rel) = 'SELF'
then ins.patient_sex
else ins.insured_sex end `11a-InsuredsGender`
, 'Signature on file' `12-PatientSign`
, date_format(current_date, '%%m/%%d/%%Y') `12-Date`
, 'Signature on file' as `13-AuthSign`
, 'NO' as `20-OutsideLab`
, '0.00' as `20-Charges`
, ins.dx1 `21.1-Diagnosis`
, ins.dx2 `21.2-Diagnosis`
, ins.approval `23-PriorAuth`
, date_format(s.session_date, '%%m/%%d/%%Y') `24.1.a-DOSFrom`
, date_format(s.session_date, '%%m/%%d/%%Y') `24.1.a-DOSTo`
, v.cpt as `24.1.d-CPT`
, '11' as `24.1.b-Place`
, 1 as `24.1.e-Code`
, p.price `24.1.f-Charges`
, 1 as `24.1.g-Units`
, bp.npi `24.1.j-ProvNPI`
, bp.tax_id `25-TaxID`
, 'SSN' as `25-SSN/EIN`
, concat(upper(substr(c.name, 1, 3)), '.',
upper(substr(c.name, instr(c.name, ',') + 2, 3)), '.',
convert(c.id, char)) as `26-PatientAcctNo`
, 'Y' as `27-AcceptAssign`
, p.price as `28-TotalCharge`
, 0 `29-AmountPaid`
, p.price as `30-BalanceDue`
, bp.name as `31-PhysicianSignature`
, date_format(current_date, '%%m/%%d/%%Y') `31-Date`
, bp.name `33-ClinicName`
, bp.address as `33-ClinicAddressLine1`
, bp.city_st_zip as `33-ClinicCityStateZip`
, bp.npi as `33-a-NPI`
from Insurance ins
join Client c on ins.Client_id = c.id
join Carrier co on ins.Carrier_id = co.id
join Visit v on v.Client_id = c.id
join `Procedure` p on p.cpt = v.cpt
join `Session` s on v.Session_id = s.id
join `Group` g on s.Group_id = g.id
join Therapist as bp on bp.tax_id is not null
where v.bill_date is null and v.check_date is null
and v.id in (%(visit_ids)s)
order by c.name, c.id, s.session_date, v.id
'''
COLUMNS = [literal.strip()[1:-1] for literal in '''
"Insurance Company Name"
"Insurance Company Name 2"
"Insurance Company Address 1"
"Insurance Company Address 2"
"1-InsuredPlanName"
"1a-InsuredIDNo"
"2-PatientName"
"3-PatientDOB"
"3-PatientGender"
"4-InsuredName"
"5-PatientAddress"
"5-PatientCity"
"5-PatientState"
"5-PatientZip"
"5-PatientPhone"
"6-PatientRel"
"7-InsuredAddr"
"7-InsAddCity"
"7-InsAddState"
"7-InsAddZip"
"7-InsAddPhone"
"8-MaritalStatus"
"8-Employed?"
"9-InsuredName2"
"9a-InsuredGroupNo2"
"9b-Insureds2DOB"
"9b-Insureds2Gender"
"9c-EmployerName"
"9d-InsuredPlanName2"
"10a-CondEmployment"
"10b-CondAutoAccident"
"10c-CondOtherAccident"
"10b2-AccidentState"
"10d-LocalUse"
"11-InsuredGroupNo"
"11a-InsuredsDOB"
"11a-InsuredsGender"
"11b-EmployerName"
"11c-InsuredPlanName"
"11d-OtherHealthPlan"
"12-PatientSign"
"12-Date"
"13-AuthSign"
"14-DateOfCondition"
"15-FirstDateOfCondition"
"16-DateFromNoWork"
"16-DateToNoWork"
"17-ReferringPhysician"
"17a-PhysicianNo"
"17b-ReferNPI"
"18-DateFromHosp"
"18-DateToHosp"
"19-LocalUse"
"20-OutsideLab"
"20-Charges"
"21.1-Diagnosis"
"21.2-Diagnosis"
"21.3-Diagnosis"
"21.4-Diagnosis"
"22-MedicaidResubmissionCode"
"22-MedicaidResubmissionRefNo"
"23-PriorAuth"
"24.1.a-DOSFrom"
"24.1.a-DOSTo"
"24.1.b-Place"
"24.1.c-EMG"
"24.1.d-CPT"
"24.1.d-Modifier"
"24.1.e-Code"
"24.1.f-Charges"
"24.1.g-Units"
"24.1.h-Epsot"
"24.1.i-Qualifier"
"24.1.j-ProvLegacyNo"
"24.1.j-ProvNPI"
"24.2.a-DOSFrom"
"24.2.a-DOSTo"
"24.2.b-Place"
"24.2.c-EMG"
"24.2.d-CPT"
"24.2.d-Modifier"
"24.2.e-Code"
"24.2.f-Charges"
"24.2.g-Units"
"24.2.h-Epsot"
"24.2.i-Qualifier"
"24.2.j-ProvLegacyNo"
"24.2.j-ProvNPI"
"24.3.a-DOSFrom"
"24.3.a-DOSTo"
"24.3.b-Place"
"24.3.c-EMG"
"24.3.d-CPT"
"24.3.d-Modifier"
"24.3.e-Code"
"24.3.f-Charges"
"24.3.g-Units"
"24.3.h-Epsot"
"24.3.i-Qualifier"
"24.3.j-ProvLegacyNo"
"24.3.j-ProvNPI"
"24.4.a-DOSFrom"
"24.4.a-DOSTo"
"24.4.b-Place"
"24.4.c-EMG"
"24.4.d-CPT"
"24.4.d-Modifier"
"24.4.e-Code"
"24.4.f-Charges"
"24.4.g-Units"
"24.4.h-Epsot"
"24.4.i-Qualifier"
"24.4.j-ProvLegacyNo"
"24.4.j-ProvNPI"
"24.5.a-DOSFrom"
"24.5.a-DOSTo"
"24.5.b-Place"
"24.5.c-EMG"
"24.5.d-CPT"
"24.5.d-Modifier"
"24.5.e-Code"
"24.5.f-Charges"
"24.5.g-Units"
"24.5.h-Epsot"
"24.5.i-Qualifier"
"24.5.j-ProvLegacyNo"
"24.5.j-ProvNPI"
"24.6.a-DOSFrom"
"24.6.a-DOSTo"
"24.6.b-Place"
"24.6.c-EMG"
"24.6.d-CPT"
"24.6.d-Modifier"
"24.6.e-Code"
"24.6.f-Charges"
"24.6.g-Units"
"24.6.h-Epsot"
"24.6.i-Qualifier"
"24.6.j-ProvLegacyNo"
"24.6.j-ProvNPI"
"25-TaxID"
"25-SSN/EIN"
"26-PatientAcctNo"
"27-AcceptAssign"
"28-TotalCharge"
"29-AmountPaid"
"30-BalanceDue"
"31-PhysicianSignature"
"31-Date"
"32-FacilityName"
"32-FacilityAddressLine1"
"32-FacilityAddressLine2"
"32-FacilityCityStateZip"
"32-FacilityNPI"
"33-ClinicName"
"33-ClinicAddressLine1"
"33-ClinicAddressLine2"
"33-ClinicCityStateZip"
"33-PIN#"
"33-GRP#"
"33-a-NPI"
"33-b-GrpLegacyNo"
'''.strip().split('\n')]
if __name__ == '__main__':
def _with_caps():
from os import environ, path as os_path
import datetime
here = ocap.Rd(os_path.dirname(__file__), os_path,
open_rd=lambda n: open(n))
xf = Xataface.make(here)
if 'SCRIPT_NAME' in environ:
cgi_main(xf, cal=datetime.date)
else:
from sys import argv
_test_main(argv, xf)
|
3,013 | 06a721c12e3140d4d1cf544a598f512595c4ab66 | #!/usr/bin/env python3
"""(Optional) Test for GameDealer class."""
import unittest
import os, sys
from functools import reduce
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], ".."))
import Lab19_Extending_Builtins.lab19_3 as game_dealer
WHOLE_DECK = sorted(game_dealer.Deck())
class ReportingDealer(game_dealer.GameDealer):
"""GameDealer only had methods that output strings,
so here we provide a list version for testing.
"""
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(
reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y,
too_many)
self.assertTrue("Sorry" in too_many_collapsed)
too_many_collapsed.remove("Sorry")
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y,
way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count("Sorry"),
12)
for i in range(12):
way_too_many_collapsed.remove("Sorry")
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
if __name__ == "__main__":
unittest.main()
|
3,014 | 3efa5eb97af116929a7426ed3bfb5e4a170cfacd | import sys, math
nums = sys.stdin.readline().split(" ")
my_set = set()
my_list = []
for i in xrange(int(nums[1])):
inpt = int(sys.stdin.readline())
my_set.add(inpt)
my_list.append(inpt)
x = 0
for i in xrange(1, int(nums[0]) + 1):
if (i in my_set):
continue
while (x < len(my_list) and my_list[x] < i):
print my_list[x]
x += 1
print i
while (x < len(my_list)):
print my_list[x]
x += 1
|
3,015 | d44f8a2dee35d76c152695d49d73f74e9c25bfa9 | #read file
my_file=open("file.txt","r")
#print(my_file.read())
#print(my_file.readline())
#print(my_file.read(3))#read 3 caracteres
"""
for line in my_file:
print(line)
my_file.close()
"""
print(my_file.readlines())#list
#close file
my_file.close()
#create new file and writing
new_file=open("newfile.txt",mode="w",encoding="utf-8")
for i in range (5) :
new_file.write("new line "+str(i+1)+"\n")
new_file.close()
#append
a=["new line 5\n","new line 6\n"]
new_file=open("newfile.txt",mode="a+",encoding="utf-8")
new_file.writelines(a)
new_file.close()
|
3,016 | 1b4a012f5b491c39c0abd139dd54f2095ea9d221 | import re
from captcha.fields import CaptchaField
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from news.models import News, Comment, Profile
class UserRegisterForm(UserCreationForm):
"""Форма регистрации"""
username = forms.CharField(label='Имя пользоватьеля', help_text='Максимум 150 символов',
widget=forms.TextInput(attrs={"class": "form-control"}))
password1 = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={"class": "form-control"}))
password2 = forms.CharField(label='Подтверждение пароля',
widget=forms.PasswordInput(attrs={"class": "form-control"}))
email = forms.EmailField(label='Адрес электронной почты', widget=forms.EmailInput(attrs={"class": "form-control"}))
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
# widgets = {
# 'username': forms.TextInput(attrs={"class": "form-control"}),
# 'email': forms.EmailInput(attrs={"class": "form-control"}),
# 'password1': forms.PasswordInput(attrs={"class": "form-control"}),
# 'password2': forms.PasswordInput(attrs={"class": "form-control"}),
# }
class UserLoginForm(AuthenticationForm):
"""Форма входа в систему"""
username = forms.CharField(label='Имя пользоватьеля',
widget=forms.TextInput(attrs={"class": "form-control"}))
password = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={"class": "form-control"}))
class NewsForm(forms.ModelForm):
"""Форма создания новости"""
class Meta:
model = News
fields = ['title', 'slug', 'content', 'photo', 'category']
widgets = {
'title': forms.TextInput(attrs={"class": "form-control"}),
'content': forms.Textarea(attrs={"class": "form-control", "rows": 5}),
'category': forms.Select(attrs={"class": "form-control"}),
}
"""Напишем собственный валидатор для title"""
def clean_title(self):
"""Получим очищеный title"""
title = self.cleaned_data['title']
if re.match(r'\d', title):
raise ValidationError('Название не должно начинаться с цифры')
return title
class ContactForm(forms.Form):
"""Форма обратной связи"""
subject = forms.CharField(label='Тема',
widget=forms.TextInput(attrs={"class": "form-control"}))
content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={"class": "form-control",
'rows': 5}))
captcha = CaptchaField()
class CommentForm(forms.ModelForm):
"""Форма комментариев"""
class Meta:
model = Comment
fields = ['text', ]
widgets = {
'text': forms.Textarea(attrs={"class": "form-control", "rows": 5}),
}
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['location', 'birth_date', ]
|
3,017 | 0f2d215a34758f85a29ef7ed8264fccd5e85b66f | #Peptide Encoding Problem: Find substrings of a genome encoding a given amino acid sequence.
# Input: A DNA string Text, an amino acid string Peptide, and the array GeneticCode.
# Output: All substrings of Text encoding Peptide (if any such substrings exist).
def reverse_string(seq):
return seq[::-1]
def complement(seq):
#return the complementary sequence string.
seq=seq.upper()
basecomplement={"A":"T","C":"G","G":"C","T":"A","N":"N"}
letters=list(seq)
letters=[basecomplement[base] for base in letters]
return ''.join(letters)
def reversecomplement(seq):
#return the reverse complement of the dna string.
seq=reverse_string(seq)
seq=complement(seq)
return seq
def DNA_To_AA(seq):
RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA': '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H',
'CGT': 'R', 'CGG': 'R', 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG': 'Q',
'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I', 'CAT': 'H', 'GGC': 'G', 'GGG': 'G',
'GCT': 'A', 'GAT': 'D', 'GCA': 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',
'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG': 'W', 'AGA': 'R', 'TTT': 'F',
'TAG': '*', 'TGC': 'C', 'GGA': 'G', 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V',
'CGC': 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E', 'TCT': 'S', 'ATT': 'I',
'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA': 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}
F_position = 0
R_position = 0
Aa=""
for i in range(int(len(seq) / 3)):
F_position = i*3
R_position = F_position+3
RNA_one=seq[F_position:R_position]
#if RNA_one == "TAA" or RNA_one == "TAG" or RNA_one == "TGA":
# break
Aa += RNA_AA_dict[RNA_one]
return Aa
def Peptide_Encoding(DNA,AA_input):
AA= DNA_To_AA(DNA)
print(AA)
l=len(AA_input)
return_DNA=[]
find_position=0
#print(DNA,AA,l,return_DNA,find_position)
while AA_input in AA[find_position:]:
#print(AA[find_position:])
AA_position = find_position + AA[find_position:].find(AA_input)
DNA_position = 3 * AA_position
#print(AA_position, DNA_position)
return_DNA.append(DNA[DNA_position:DNA_position + 3 * l])
find_position = AA_position + 1
#print(find_position)
return return_DNA
DNA=""
filename = input("Enter file name: ")
fileread = open(filename, "r")
for i in fileread:
read = i.strip()
DNA+=read.upper()
print(DNA[:200])
F_position=0
R_position=0
Aa_input=input("what is the aa?")
DNA_F_1=DNA
print1=Peptide_Encoding(DNA_F_1,Aa_input)
if print1!=[]:
for i in print1:
print("1",i)
DNA_F_2=DNA[1:]
print2=Peptide_Encoding(DNA_F_2,Aa_input)
if print2!=[]:
for i in print2:
print("2",i)
DNA_F_3=DNA[2:]
print3=Peptide_Encoding(DNA_F_3,Aa_input)
if print3!=[]:
for i in print3:
print("3",i)
RC_DNA=reversecomplement(DNA)
DNA_R_1=RC_DNA
print4=Peptide_Encoding(DNA_R_1,Aa_input)
if print4!=[]:
for i in print4:
print("4",reversecomplement(i))
DNA_R_2=RC_DNA[1:]
print5=Peptide_Encoding(DNA_R_2,Aa_input)
if print5!=[]:
for i in print5:
print("5",reversecomplement(i))
DNA_R_3=RC_DNA[2:]
print6=Peptide_Encoding(DNA_R_3,Aa_input)
if print6!=[]:
for i in print6:
print("6",reversecomplement(i))
#print(DNA_F_1,DNA_F_2,DNA_F_3,DNA_R_1,DNA_R_2,DNA_R_3)
#print(Aa_F_1,Aa_F_2,Aa_F_3,Aa_R_1,Aa_R_2,Aa_R_3)
#根据AA序列在基因组中寻找相关序列
#Bacillus brevis.txt
#VKLFPWFNQY |
3,018 | 6f35c29f6f2dcc6c1dae3e9c1ddf595225748041 | #import cvxopt
from cvxopt import matrix, spmatrix, solvers
#import scipy
from scipy.special import expit
import numpy as np
import sys
import pandas as pd
import time
class KernelNC():
"""
distance based classifier for spectrum kernels
"""
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2*K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100*(y__==y).mean()
class MultiKerOpt():
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm=='l1':
return u/np.sum(u)
elif norm=='l2':
return u / np.sqrt(np.sum(u**2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights = None):
"""
Weighted KRR iterations
"""
K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N, D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N,D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree
y_enc = 2*y-1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis]*m_t)
w_t = expit(m_t)*expit(-m_t)
z_t = m_t - (p_t * y_enc[:, np.newaxis]) /(w_t+ 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=w_t.flatten())
if np.linalg.norm(c_new - c_old)<tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / ( 2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree)
y_enc = 2*y-1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:,np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree-1)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot((K_t * Kernels[m])).dot(delta)
return - self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1, weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
#print(i+1)
if self.method=='klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07, max_iters=5)
elif self.method=='svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights = weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score>score_prev:
step *= 0.9
if score<self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
#return new_coef
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** (self.degree)
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method!='krr':
score = 100*(y__==y).mean()
else:
score = np.mean((y__- y)**2)
return score
def CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5, n_iter=5):
tt = time.time()
n_iters = cv * len(degrees) * len(alphas)
n_samples = y.shape[0]
DEG, ALPH, TRAIN, VAL = [], [], [], []
i=0
for degree in degrees:
for alpha in alphas:
DEG.append(degree)
ALPH.append(alpha)
#SPLITTING
INDS = np.array(range(n_samples))
idx = np.random.permutation(n_samples)
INDS = INDS[idx]
vals = np.array_split(INDS, cv)
perfs_train = []
perfs_val = []
for val in vals:
i += 1
sys.stderr.write('\rIteration %d/%d -- degree %d --alpha %.3f' %(i, n_iters, degree, alpha))
sys.stderr.flush()
train = np.setdiff1d(range(n_samples),val)
clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree, method=method, hide=True)
clf.fit(K_xx[:,train.reshape(-1,1), train], y[train], n_iter=n_iter)
score_train = clf.score(K_xx[:,train.reshape(-1,1), train], y[train])
score_val = clf.score(K_xx[:,val.reshape(-1,1), train], y[val])
perfs_train.append(score_train)
perfs_val.append(score_val)
TRAIN.append(np.mean(np.array(perfs_train)))
VAL.append(np.mean(np.array(perfs_val)))
df = pd.DataFrame({'degree':DEG, 'alpha':ALPH, 'train':TRAIN, 'val':VAL})
tt = time.time() - tt
print('Done in %.3f'%(tt/60))
return df
#
def get_best(df):
idx = np.argmax(df.val.values)
best = np.max(df.val.values)
best_degree = df.degree[idx]
best_alpha = df.alpha[idx]
return best_degree, best_alpha, best
|
3,019 | ac2edcd6ea71ebdc5b1df5fd4211632b5d8e2704 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 18:53:02 2020
@author: vinhe
I followed below tutorial to push newly created csv to google sheets:
https://medium.com/craftsmenltd/from-csv-to-google-sheet-using-python-ef097cb014f9
"""
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ["https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive"]
credentials = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(credentials)
spreadsheet = client.open('golf-csv-to-sheets')
with open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:
content = file_obj.read()
client.import_csv(spreadsheet.id, data=content)
|
3,020 | 14f7f31fa64799cdc08b1363b945da50841d16b5 |
class Component:
pass
class Entity:
def __init__(self, id):
self.id = id
self.components = {}
def add_component(self, component):
if type(component) in self.components:
raise Exception("This entity already has a component of that type")
# Since there is only one of each type of component, they are stored by type
self.components[type(component)] = component
def has_component(self, component_type):
return component_type in self.components
def get_component(self, component_type):
return self.components[component_type]
class System:
def __init__(self, *required):
self.required = required
self.entity_ids = set()
def bind_manager(self, manager):
self.manager = manager
def update(self, deltaTime):
self.begin()
for entity_id in self.entity_ids:
entity = self.manager.get_entity_by_id(entity_id)
self.process(entity, deltaTime)
self.end()
# Overridden in the derived class to specify functionality of system
def process(self, entity, deltaTime):
pass
# Can be overridden if you want to do something before the first entity is processed
def begin(self):
pass
# Can be overridden if you want to do something after the last entity is processed
def end(self):
pass
def update_entity_registration(self, entity):
contains = entity.id in self.entity_ids
matches = self.matches(entity)
# Already exists, but no longer matches
if contains and not matches:
self.entity_ids.remove(entity.id)
# Doesn't exist, but does match
elif not contains and matches:
self.entity_ids.add(entity.id)
def matches(self, entity):
for required in self.required:
if not entity.has_component(required):
return False
return True
class Manager:
def __init__(self):
self.entities = {}
self.current_id = 0
self.systems = []
def create_entity(self):
entity = Entity(self.current_id)
self.current_id += 1
self.entities[entity.id] = entity
return entity
def get_entity_by_id(self, id):
return self.entities[id]
# Use this to add components, not the entity method!! Wish there was a way to enforce that in python
def add_component_to_entity(self, entity, component):
entity.add_component(component)
self.update_entity_registration(entity)
def add_system(self, system):
system.bind_manager(self)
self.systems.append(system)
def update(self, deltaTime):
for system in self.systems:
system.update(deltaTime)
def update_entity_registration(self, entity):
for system in self.systems:
system.update_entity_registration(entity)
|
3,021 | cb0be932813a144cfb51b3aa2f6e0792e49c4945 | # encoding=UTF-8
# This file serves the project in production
# See http://wsgi.readthedocs.org/en/latest/
from __future__ import unicode_literals
from moya.wsgi import Application
application = Application(
"./", ["local.ini", "production.ini"], server="main", logging="prodlogging.ini"
)
|
3,022 | 73d1129418711c35046a99c1972a413357079836 | ../../2.0.2/mpl_examples/axes_grid/simple_axesgrid2.py |
3,023 | b7d75c2523dba0baaf06ba270045a4a344b8156c | """A simple script to create a motion plan."""
import os
import json
import logging
from logging.config import dictConfig
import argparse
import numpy as np
from opentrons_hardware.hardware_control.motion_planning import move_manager
from opentrons_hardware.hardware_control.motion_planning.types import (
AxisConstraints,
SystemConstraints,
MoveTarget,
vectorize,
Coordinates,
)
from typing import Dict, Any, List, cast
AXIS_NAMES = ["X", "Y", "Z", "A", "B", "C"]
log = logging.getLogger(__name__)
LOG_CONFIG: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"}
},
"handlers": {
"stream_handler": {
"class": "logging.StreamHandler",
"formatter": "basic",
"level": logging.INFO,
},
},
"loggers": {
"": {
"handlers": ["stream_handler"],
"level": logging.DEBUG,
},
},
}
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(description="Motion planning script.")
parser.add_argument(
"--params-file-path",
"-p",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_params.json"),
help="the parameter file path",
)
parser.add_argument(
"--debug",
"-d",
type=bool,
required=False,
default=False,
help="set logging level to debug",
)
parser.add_argument(
"--output",
"-o",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_output.json"),
help="the output file path",
)
parser.add_argument(
"--blend-log",
"-b",
choices=["last", "all"],
required=False,
default="last",
help="output the last list or all of the blend log",
)
args = parser.parse_args()
if args.debug:
LOG_CONFIG["handlers"]["stream_handler"]["level"] = logging.DEBUG
LOG_CONFIG["loggers"][""]["level"] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, "r") as f:
params = json.load(f)
constraints: SystemConstraints[str] = {
axis: AxisConstraints.build(**params["constraints"][axis])
for axis in AXIS_NAMES
}
origin_from_file: List[float] = cast(List[float], params["origin"])
origin: Coordinates[str, np.float64] = dict(
zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))
)
target_list = [
MoveTarget.build(
dict(zip(AXIS_NAMES, target["coordinates"])), target["max_speed"]
)
for target in params["target_list"]
]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(
origin=origin,
target_list=target_list,
iteration_limit=params["iteration_limit"],
)
output = {
"moves": [v.to_dict() for v in blend_log[-1]],
"origin": list(vectorize(origin)),
}
def myconverter(obj: Any) -> Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, "w") as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == "__main__":
main()
|
3,024 | 0c8eb90c1d8a58f54186a30ce98a67310955a367 | import pygame
import utils
from random import randint
class TileSurface():
tileGroup = pygame.sprite.Group()
tileGrid = []
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.surface = pygame.Surface((width, height))
def updatePos(self, x, y):
self.x = self.x
self.y = self.y
def generateTiles(self):
tiles = []
x = 0
y = 368
for i in range(0, 150):
row = []
for j in range(0, 150):
newTile = Dirt(x, y, self)
newTile.rect.x = x
newTile.rect.y = y
row.append(newTile)
x += 16
x = 0
y += 16
tiles.append(row)
self.tileGrid = tiles
def drawTiles(self):
for i in range(0, len(self.tileGrid)):
for j in range(0, len(self.tileGrid[i])):
self.tileGrid[i][j].update()
class Tile(pygame.sprite.Sprite):
x = 0
y = 0
def __init__(self, sprite, x, y, surface):
# Call pygame sprite init method
super().__init__()
self.image = pygame.image.load(sprite).convert_alpha() #load a sprite image
self.rect = self.image.get_rect() # set collision rectangle
self.x = x
self.y = y
self.parentSurface = surface
self.parentSurface.tileGroup.add(self)
def update(self):
self.parentSurface.surface.blit(self.image, (self.x, self.y))
class Dirt(Tile):
def __init__(self, x, y, surface):
spriteVariant = randint(1, 3)
super().__init__("./assets/dirt0" + str(spriteVariant) + ".png", x, y, surface)
class Air(Tile):
def __init__(self, x, y, surface):
super().__init__("./assets/air.png", x, y, surface) |
3,025 | b147a22d6bd12a954c0d85c11e578a67f0a51332 | number = int(input("Enter a number, and I'll tell you if it's even or odd: "))
if number % 2 == 0:
print(f"{number} is an even number.")
else:
print(f"{number} is an odd number.") |
3,026 | a9531fb020428e573d189c377652692e301ea4d3 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
school = "Old boy"
def chang_name(name):
global school #声明全局变量
school = "Mage Linux"
print("Before change:", name, school)
name = 'Stack Cong'
age = 33
print("After change:", name)
print("School:", school)
name = "Stack"
chang_name(name)
print(name)
|
3,027 | 4e5e1be289b32655736d8c6c02d354a85d4268b7 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""BatchNorm (BN) utility functions and custom batch-size BN implementations"""
from functools import partial
import torch
import torch.nn as nn
from pytorchvideo.layers.batch_norm import (
NaiveSyncBatchNorm1d,
NaiveSyncBatchNorm3d,
) # noqa
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE in {"batchnorm", "sync_batchnorm_apex"}:
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == "sub_batchnorm":
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == "sync_batchnorm":
return partial(
NaiveSyncBatchNorm3d,
num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,
global_sync=cfg.BN.GLOBAL_SYNC,
)
else:
raise NotImplementedError(
"Norm type {} is not supported".format(cfg.BN.NORM_TYPE)
)
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args["num_features"]
# Keep only one set of weight and bias.
if args.get("affine", True):
self.affine = True
args["affine"] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args["num_features"] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = (
stds.view(n, -1).sum(0) / n
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
)
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
(
self.bn.running_mean.data,
self.bn.running_var.data,
) = self._get_aggregated_mean_std(
self.split_bn.running_mean,
self.split_bn.running_var,
self.num_splits,
)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
|
3,028 | f26dc3139413c4ed4b04484c095a433e53039cdb | import requests as r
import re
class web_scrap:
seed=""
result=""
tag_attr=[]
def __init__(self,seed):
self.seed=seed
self.set_tag()
self.set_attr()
self.fetch_web(self.seed)
self.crawl()
def fetch_web(self,link):
self.result=r.get(link)
self.extract_tags()
def set_tag(self):
self.re_tag=r"(<a [^>]+>)"
def set_attr(self):
self.re_attr_parser=r"href\=\"([^\"]+)\""
def extract_tags(self):
title=re.findall(r"<title>([^<]+)</title>",self.result.text)
if len(title)!=0:
print(title[0])
else:
print("No Title")
tags=re.findall(self.re_tag,self.result.text)
for i in tags:
self.attr_parser(i)
def attr_parser(self,tag):
attributes=re.findall(self.re_attr_parser,tag)
for data in attributes:
if data[0]=="/":
if data[1]=="/":
self.tag_attr.append({data[1:]:0})
else:
self.tag_attr.append({data:0})
def crawl(self):
for i in self.tag_attr:
link=list(i.keys())[0]
if(not i[link]):
print(link)
self.fetch_web(self.seed+link)
print("\t HELLO WELCOME TO EMAIL SCRAPPER")
scrap=web_scrap(input("enter the link \t"))
|
3,029 | 9d0d4707cc9a654752dd0b98fe0fec6a0c1419a1 | # -*- coding: utf-8 -*-
from handlers.base import Base
class Home(Base):
def start(self):
from movuca import DataBase, User
from datamodel.article import Article, ContentType, Category
from datamodel.ads import Ads
self.db = DataBase([User, ContentType, Category, Article, Ads])
def pre_render(self):
# obrigatorio ter um config, um self.response|request, que tenha um render self.response.render
self.response = self.db.response
self.request = self.db.request
self.config = self.db.config
#self.view = "app/home.html"
self.response.meta.title = self.db.config.meta.title
self.response.meta.description = self.db.config.meta.description
self.response.meta.keywords = self.db.config.meta.keywords
self.context.use_facebook = self.db.config.auth.use_facebook
def last_articles(self):
from helpers.article import latest_articles
self.context.latest_articles = latest_articles(self.db)
def ads(self):
self.context.ads = self.db(self.db.Ads.place == "top_slider").select(limitby=(0, 5), orderby="<random>")
if not self.context.ads:
from gluon.storage import Storage
self.context.ads = [Storage(id=1, thumbnail='', link=self.db.CURL('contact', 'ads')),
Storage(id=2, thumbnail="http://placehold.it/250x220&text=%s" % self.db.T("Your add here!"), link=self.db.CURL('contact', 'ads')),
Storage(id=3, thumbnail="http://placekitten.com/250/220", link=self.db.CURL('contact', 'ads')),
Storage(id=3, thumbnail="http://placehold.it/250x220&text=%s" % self.db.T("Your Logo"), link=self.db.CURL('contact', 'ads'))
]
def featured(self):
self.context.featured = self.db(self.db.Article.featured == True).select(limitby=(0, 4), orderby="<random>")
if not self.context.featured:
self.context.featured = self.db(self.db.Article).select(limitby=(0, 4), orderby=~self.db.Article.likes)
|
3,030 | 886101e5d86daf6c2ac0fe92b361ccca6132b1aa | #!/usr/bin/env python
#_*_ coding:utf-8 _*_
'''
@author: tanglei
@contact: tanglei_0315@163.com
@file: index.py
@time: 2017/11/1 16:26
'''
#需求:
#1.每个客户端需要监控的服务不同
#2.每个服务的监控间隔不同
#3.允许模板的形式批量修改监控指标
#4.不同设备的监控阀值不同
#5.可自定义最近n分钟内hit\max\avg\last\... 指标超过阀值
#6.报警策略,报警等级,报警自动升级
#7.历史数据的存储和优化 时间越久数据越失真
#8.跨机房,跨区域代理服务器
#第三方的socket框架:twisted
|
3,031 | 5e17299e6a409e433e384935a815bab6ce178ff5 | import tkinter as tk # Import tkinker for GUI creation
from PIL import Image, ImageTk # Allow images to be used as backgrounds
import socket # Importing sockets for low level implementation of networks
import select # Importing select to poll between the user input and received message
import sys # Getting input from terminal and writing output to terminal
# Size of GUI
HEIGHT = 714
WIDTH = 1000
root = tk.Tk() #Define root to begin window
def sigint_handler(signum, frame):
print('\n Disconnecting from server')
sys.exit()
# creating the client_socket object and adding the TCP/IP and IPv4 protocol
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# IP and PORT of the socket
IP = "127.0.0.1"
PORT = 42069
# Let's connect to the server!
client_socket.connect((IP, PORT))
# Handling Ctrl+C in a very cool way
import signal
signal.signal(signal.SIGINT, sigint_handler)
# Clever function to send username to the server
# Format
# Header: length_of_username
# Body: Username
# Header length used to receive the username
HEADER_LENGTH = 10
def sendUsernameToServer(username_entry):
username = username_entry.encode('utf-8')
username_header = f"{len(username):<{HEADER_LENGTH}}".encode('utf-8')
client_socket.send(username_header + username)
checkIO()
def checkIO():
# polling between user input and message received from the server
sockets_list = [sys.stdin, client_socket]
# checking for I/O in read_sockets
read_sockets, write_socket, error_socket = select.select(
sockets_list, [], [])
for socket in read_sockets:
# If socket == client_socket, we got a message
if socket == client_socket:
message = socket.recv(2048)
if not len(message):
text_label['text'] = "Connection closed by server"
print("Connection closed by server")
sys.exit()
text_label['text'] = message.decode('utf-8')
print(message.decode('utf-8'))
def sendY():
# Else, we can send a message
message = 'y'
message = message.encode('utf-8')
client_socket.send(message)
#sys.stdout.write(str(my_username) + " > ")
# sys.stdout.write(message.decode('utf-8'))
sys.stdout.flush()
checkIO()
def sendN():
# Else, we can send a message
message = 'n'
message = message.encode('utf-8')
client_socket.send(message)
#sys.stdout.write(str(my_username) + " > ")
# sys.stdout.write(message.decode('utf-8'))
sys.stdout.flush()
checkIO()
#client_socket.close()
#-----------------------------------------------------
#-------------GUI-LAYOUT------------------------------
#-----------------------------------------------------
canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)
canvas.pack()
background_image = tk.PhotoImage(file='background.gif')
background_label = tk.Label(root, image=background_image)
background_label.place(relwidth=1, relheight=1)
covid_label = tk.Label(root, text="COVID-19 Helper", bg="sky blue")
covid_label.config(font=("Arial", 40))
covid_label.place(relx=0.12, rely=0.1, relwidth=0.76, relheight=0.1)
main_frame = tk.Frame(root, bg="light blue")
main_frame.place(relx=0.12, rely=0.2, relwidth=0.76, relheight=0.7)
#----------------------------------------------------------
right_frame = tk.Frame(main_frame, bg="sky blue")
right_frame.place(relx=0.74, rely=0.05, relwidth=0.23, relheight=0.9)
heat_button = tk.Button(right_frame, text="View HeatMap", bg="deep sky blue", activebackground="steel blue")
heat_button.place(relx=0.05, rely=0.04, relwidth=0.9, relheight=0.2)
info_button = tk.Button(right_frame, text="Covid-19 HSE Info", bg="deep sky blue", activebackground="steel blue")
info_button.place(relx=0.05, rely=0.28, relwidth=0.9, relheight=0.2)
contact_button = tk.Button(right_frame, text="Heathcare Contacts", bg="deep sky blue", activebackground="steel blue")
contact_button.place(relx=0.05, rely=0.52, relwidth=0.9, relheight=0.2)
doctor_button = tk.Button(right_frame, text="Speak with a doctor", bg="orange2", activebackground="DarkOrange1")
doctor_button.place(relx=0.05, rely=0.76, relwidth=0.9, relheight=0.2)
#----------------------------------------------------------
left_frame = tk.Frame(main_frame, bg="sky blue")
left_frame.place(relx=0.03, rely=0.05, relwidth=0.69, relheight=0.9)
text_frame = tk.Frame(left_frame, bg="ghost white")
text_frame.place(relx=0.05, rely=0.05, relwidth= 0.9, relheight=0.6)
text_label = tk.Label(text_frame, bg="ghost white", font=('Courier', 10))
text_label['text'] = "Please enter your username and click\n'Connect to testing server'"
text_label.place(relwidth=1, relheight=1)
server_button = tk.Button(left_frame, text="Connect to testing server", bg="deep sky blue", activebackground="steel blue", command=lambda: sendUsernameToServer(username_entry.get()))
server_button.place(relx=0.05, rely=0.7, relwidth=0.9, relheight=0.05)
username_label = tk.Label(left_frame, text="Username:", bg="DarkSeaGreen1")
username_label.place(relx=0.05, rely=0.77, relwidth=0.2, relheight=0.05)
username_entry = tk.Entry(left_frame, bg="PaleGreen1")
username_entry.place(relx=0.3, rely=0.77, relwidth=0.65, relheight=0.05)
yes_button = tk.Button(left_frame, text="Yes", bg="deep sky blue", activebackground="steel blue", command=lambda: sendY())
yes_button.place(relx=0.05, rely=0.84, relwidth=0.44, relheight=0.12)
no_button = tk.Button(left_frame, text="No", bg="deep sky blue", activebackground="steel blue", command=lambda: sendN())
no_button.place(relx=0.51, rely=0.84, relwidth=0.44, relheight=0.12)
#----------------------------------------------------------
root.mainloop() |
3,032 | e7494104ab98df2b640f710fa69584802b3e1259 | class Solution:
def maximumTime(self, time: str) -> str:
ans = ''
for i in range(5):
if time[i] != '?':
ans += time[i]
continue
if i == 0:
if time[1] in ['0', '1', '2', '3', '?']:
ans += '2'
else:
ans += '1'
elif i == 1:
if ans[0] == '1' or ans[0] == '0':
ans += '9'
else:
ans += '3'
elif i == 3:
ans += '5'
elif i == 4:
ans += '9'
return ans |
3,033 | a01783e3687278d1ec529c5123b9151721ba3364 | # coding=utf8
def InsertSort(array_a, n):
for i in range(1, n):
temp = array_a[i]
j = i - 1
while temp < array_a[j] and j >= 0:
array_a[j + 1] = array_a[j] # 如果小于其前驱,则从后往前寻找插入位置并后移。
j -= 1
array_a[j + 1] = temp
return array_a
def ShellSort(array_a, n):
dk = n / 2
while dk >= 1:
for i in xrange(0, dk):
for j in range(i + dk, n, dk):
temp = array_a[j]
k = j - dk
while temp < array_a[k] and k >= 0:
array_a[k + dk] = array_a[k] # 如果小于其前驱,则从后往前寻找插入位置并后移。
k -= dk
array_a[k + dk] = temp
dk = dk / 2
return array_a
def ShellSort2(array_a, n):
dk = n / 2
while dk >= 1:
for i in range(dk, n):
temp = array_a[i]
k = i - dk
while temp < array_a[k] and k >= 0:
array_a[k + dk] = array_a[k] # 如果小于其前驱,则从后往前寻找插入位置并后移。
k -= dk
array_a[k + dk] = temp
dk = dk / 2
return array_a
def BubbleSort(array_a, n):
for i in range(0, n - 1):
flag = 0 # 交换标志
for j in range(n - 1, i, -1):
if array_a[j] < array_a[j - 1]:
temp = array_a[j]
array_a[j] = array_a[j - 1]
array_a[j - 1] = temp
flag = 1
if flag == 0:
return array_a # 若此趟未发生交换,说明已经有序,返回
return array_a
def QuickSort(array_a, low, high):
if low < high:
pivotpos = Partition(array_a, low, high)
QuickSort(array_a, pivotpos + 1, high)
QuickSort(array_a, low, pivotpos - 1)
def Partition(array_a, low, high):
pivot = array_a[low]
while low < high:
while low < high and array_a[high] >= pivot:
high -= 1
array_a[low] = array_a[high] # 左移比pivot小的元素
while low < high and array_a[low] <= pivot:
low += 1
array_a[high] = array_a[low] # 右移比pivot大的元素
array_a[low] = pivot
return low
def SelectSort(arrau_a, n):
for i in xrange(n - 1):
min = i
for j in range(i + 1, n):
if array_a[j] < array_a[min]:
min = j
temp = array_a[i]
array_a[i] = array_a[min]
array_a[min] = temp
def BuildMaxHeap(array_a, n):
for i in range(n / 2, 0, -1): # 从i=[n/2-1]~0,反复调整堆。
AdjustDown(array_a, i, n - 1)
def AdjustDown(array_a, k, n):
array_a[0] = array_a[k]
i = 2 * k
while (i <= n): # 沿着k的子结点筛选
if i < n:
if array_a[i] < array_a[i + 1]:
i += 1 # 取值更大的子结点
if array_a[0] > array_a[i]:
break
else:
array_a[k] = array_a[i] # array_a[i]调整到双亲上。
k = i
i *= 2
array_a[k] = array_a[0] # 被筛选的点放入最终位置。
def HeapSort(array_a, n):
array_a.insert(0, 0) # 首先array_a所有元素后移,rray_a[0]不存放元素
n = len(array_a)
BuildMaxHeap(array_a, n)
for i in range(n - 1, 1, -1):
temp = array_a[i]
array_a[i] = array_a[1]
array_a[1] = temp # 将最大的元素放在当前无序数组的最后
AdjustDown(array_a, 1, i - 1) # 把剩余的i-1整理成堆。
def Merge(array_a, low, mid, high):
# 合并array_a的[low,...mid]和[mid+1,...high]的各自有序的两部分为一个新的有序表
b = []
for each in array_a[low:high + 1]:
b.append(each)
i, j = low, mid + 1
k = i
while i <= mid and j <= high:
if b[i - low] <= b[j - low]:
array_a[k] = b[i - low]
i += 1
else:
array_a[k] = b[j - low]
j += 1
k += 1
while i <= mid:
array_a[k] = b[i - low]
k += 1
i += 1
while j <= high:
array_a[k] = b[j - low]
k += 1
j += 1
def MergeSort(array_a, low, high):
if low < high:
mid = (low + high) / 2
MergeSort(array_a, low, mid)
MergeSort(array_a, mid + 1, high)
Merge(array_a, low, mid, high)
array_a = [1, 2, 5, 3, 4, 9, 6, 5, 4, 76, 88, 0, -1]
MergeSort(array_a, 0, len(array_a) - 1)
print array_a
|
3,034 | 024bc95f7255bb8be5c3c4ade9d212c9555a4f01 | string="Rutuja MaluSare"
print(string.casefold())
print(len(string))
"""string1=input("enter string 1")
print("string1")
print(len(string1))
string2=input("enter string 2")
print("string2")
print(len(string2))
string3=string1+string2
print(len(string3))"""
#lower case
print(string.lower())
#upper case
print(string.upper())
#strip =removes white spaces from start and end
a=" hello "
print(a)
print(a.strip())
#isdigit
b= 12
print(b)
|
3,035 | 019e8d7159fe07adc245e6476ac1fed5e9c457b5 | import os, sys, string
import linecache, math
import numpy as np
import datetime , time
from pople import NFC
from pople import uniqatoms
from pople import orca_printbas
####### orca_run - S
def orca_run(method, basis,optfreq,custombasis, correlated, values, charge, multip, sym, R_coord):
"""
Runs orca
Parameters:
method (char) : Name of functional to be used
basis (char) : Basis set name
optfreq (char) : true/false value of the optfreq keyword
custombasis (char) : true/false value of the custombasis keyword
correlated (char) : true/false value of the correlated keyword
values (dict): Values of the control variables
"""
with open("input.com", "w") as com_f:
if optfreq == "true":
if values["verticalIP"] != "true" or values["IPss"] != "true": # IPss not defined
if values["MGGA"] == "true":
Freqstr="NumFreq"
else:
Freqstr="Freq"
if custombasis == "true":
com_f.write("! " +str(method) + " " + values["String_Opt"] + " " + Freqstr + " \n")
else:
com_f.write("! " +str(method) + " " + str(basis) +" "+values["String_Opt"] + " " + Freqstr + " \n")
else:
if custombasis == "true":
com_f.write("! " +str(method) + " " + Freqstr + " \n")
else:
com_f.write("! " +str(method) + " " + str(basis) + " " + Freqstr + " \n")
else:
if custombasis == "true":
com_f.write("! " +str(method) + " \n")
else:
com_f.write("! " +str(method) + " " + str(basis) + " \n")
Nat=len(sym)
com_f.write("*xyz "+str(charge)+" "+str(multip) + "\n")
for tmp in range(Nat):
R_x=float(R_coord[tmp][0])
R_y=float(R_coord[tmp][1])
R_z=float(R_coord[tmp][2])
com_f.write(' {:2s}{:15.8f}{:15.8f}{:15.8f}\n'.format(sym[tmp],R_x,R_y,R_z))
com_f.write("*\n")
com_f.write("%MaxCore " + values["maxcore_mb"] + "\n")
com_f.write("%scf\n MaxIter 500 \n")
com_f.write(" Convergence " + values["conv_scf"] + "\n")
com_f.write("end\n")
if values["switch_guess"] == "true": ### this is not part of the inp file!!!
if values["guess_TM"] == "true" and values["G4MP2TM"]:
com_f.write(" Guess = " + values["option_guess"] + "\n")
com_f.write("end\n")
if values["switch_load_rel_file"] == "true":
f1 = open("rel_file.txt", "r")
com_f.write(f1.read())
f1.close()
with open("Thermochemistry.out", "a") as ther_chem:
ther_chem.write("check if rel_file.txt exists!!")
if values["SCFDIIS"] == "true":
com_f.write("%scf\n DIISMaxEq 15\n")
com_f.write(" directresetfreq 1\n")
com_f.write("end\n")
if values["LSHIFT"] == "true":
com_f.write("%scf\n")
com_f.write(" Shift Shift 0.1 ErrOff 0.1 end\n")
com_f.write("end\n")
if values["SOSCF"] == "true":
com_f.write("%scf\n")
com_f.write(" soscfmaxit 12\n")
com_f.write(" directresetfreq 1\n")
com_f.write("end\n")
if values["switch_DLPNO_CCSDT"] == "true":
com_f.write("%mdci\n")
com_f.write(" UseFullLMP2Guess true\n")
com_f.write(" TcutDOPre = " + str(values["TcutDOPre"]) +"\n") #TODO Is this really needed?
com_f.write("end\n")
if ( float(values["Ntotale"]) <= float(values["nproc"]) ) or ( (float(values["Ntotale"])-float(values["Ntotalecore"])) < float(values["nproc"]) ):
com_f.write("%pal nprocs 1 \n")
else:
com_f.write("%pal nprocs "+values["nproc"]+" \n")
com_f.write("end\n")
com_f.write("%method\n") ## CHECK
com_f.write(" IntAcc 7.0\n")
if values["optdiis"] == "true":
com_f.write(" Z_solver DIIS\n")
com_f.write(" Z_MaxIter 300\n")
if correlated == "true":
uniq_atom_res = uniqatoms(sym)
if values["ALLELE"] == "true": ### CHECK!!!!
for iat in range(int(uniq_atom_res["N_ua"])):
pre1 = uniq_atom_res["uniq_sym"]
at_pr1 = pre1[iat]
com_f.write(" NewNCore " + at_pr1 + " " + " 0 end\n")
else:
for iat in range(int(uniq_atom_res["N_ua"])):
pre1 = uniq_atom_res["uniq_sym"]
at_pr1 = pre1[iat]
NFC_res = NFC(at_pr1)
com_f.write(" NewNCore " + at_pr1 + " " + str(NFC_res) +" end\n")
com_f.write("end\n")
if optfreq == "true":
com_f.write("%geom\n")
if values["MGGA"] == "true":
com_f.write(" Calc_Hess true; NumHess true\n")
else:
com_f.write(" Calc_Hess true\n")
com_f.write(" Recalc_Hess " + str(values["iterhess"]) +" \n") ## revisit !!!! CHECK!!! IMPORTANT
com_f.write("end\n")
com_f.write("%freq Temp 273.15, 298.15\n")
com_f.write("end\n")
if custombasis == "true":
com_f.write("%basis \n")
if custombasis == "true":
uniq_atom_res = uniqatoms(sym)
fname = basis
if Nat == 1:
orca_printbas(fname, sym[0])
else:
for iat1 in range(int(uniq_atom_res["N_ua"])):
orca_printbas(fname, uniq_atom_res["uniq_sym"][iat1]) # GTBAS1 C
with open("input.com", "a") as com_f:
com_f.write("end\n")
os.system(values["orca_exe"] + " input.com > input.out")
os.system("cat input.com >> ORCA.inp")
os.system("cat input.out >> ORCA.out")
#os.system("rm -f input*")
####### orca_run - E
|
3,036 | cd1ada2d7979fffc17f707ed113efde7aa134954 | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path as osp
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch.distributed import barrier
from torch.nn import Module
from nncf.api.compression import CompressionAlgorithmController
from nncf.common.compression import BaseCompressionAlgorithmController as BaseController
from nncf.common.deprecation import warning_deprecated
from nncf.common.logging import nncf_logger
from nncf.common.utils.api_marker import api
from nncf.common.utils.debug import set_debug_log_dir
from nncf.config import NNCFConfig
from nncf.config.extractors import extract_algorithm_names
from nncf.config.telemetry_extractors import CompressionStartedFromConfig
from nncf.telemetry import tracked_function
from nncf.telemetry.events import NNCF_PT_CATEGORY
from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS
from nncf.torch.algo_selector import NoCompressionAlgorithmBuilder
from nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder
from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder
from nncf.torch.dynamic_graph.graph_tracer import create_input_infos
from nncf.torch.nncf_network import NNCFNetwork
# pylint:disable=too-many-branches
from nncf.torch.utils import is_dist_avail_and_initialized
from nncf.torch.utils import is_main_process
from nncf.torch.utils import maybe_convert_legacy_names_in_compress_state
from nncf.torch.utils import training_mode_switcher
@api(canonical_alias="nncf.torch.create_compressed_model")
@tracked_function(
NNCF_PT_CATEGORY,
[
CompressionStartedFromConfig(argname="config"),
],
)
def create_compressed_model(
model: Module,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None,
dummy_forward_fn: Callable[[Module], Any] = None,
wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
dump_graphs=True,
) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:
"""
The main function used to produce a model ready for compression fine-tuning from an original PyTorch
model and a configuration object.
dummy_forward_fn
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:type config: nncf.NNCFConfig
:param compression_state: representation of the entire compression state to unambiguously restore
the compressed model. Includes builder and controller states.
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's
args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code
(see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's
input tensors that are important for compression are not supplied as arguments to the model's forward call
directly, but instead are located in a container (such as list), and the model receives the container as an
argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the
underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each
tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs
to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args
and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified
if dummy_forward_fn is specified.
:param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs
:param dump_graphs: Whether to dump the internal graph representation of the
original and compressed models in the .dot format into the log directory.
:return: A controller for the compression algorithm (or algorithms, in which case the controller
is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped
as an object of NNCFNetwork.
"""
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"The model object has already been compressed.\n"
"NNCF for PyTorch modifies the model object in-place, and repeat calls to "
"`nncf.torch.create_compressed_model` with the same model object passed as argument "
"will lead to an incorrect attempt to compress the model twice.\n"
"Make sure that the model object you are passing has not already been compressed (for "
"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\n"
"If you are encountering this in a Jupyter notebook context - make sure that when "
"re-running cells involving `nncf.torch.create_compressed_model` the original model object "
"is also re-created (via constructor call)."
)
if config.get("target_device") == "VPU":
warning_deprecated("VPU device is deprecated and will no longer be supported in the future.")
set_debug_log_dir(config.get("log_dir", "."))
is_legacy_model_state_dict = (
compression_state is not None
and BaseController.BUILDER_STATE not in compression_state
and BaseController.CONTROLLER_STATE not in compression_state
)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get("log_dir", "."), "original_graph.dot"))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = not is_legacy_model_state_dict and compression_state is not None
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
# Required to ensure that the model leaving create_compressed_model has correct compressed graph.
# In particular, this is currently required for correct functioning of RNNs.
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state # pylint: disable=cyclic-import
state_dict_to_load = compression_state.get("state_dict", compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get("log_dir", "."), "compressed_graph.dot"))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model
def create_nncf_network(
model: torch.nn.Module,
config: NNCFConfig,
dummy_forward_fn: Callable[[Module], Any] = None,
wrap_inputs_fn: Callable = None,
wrap_outputs_fn: Callable = None,
) -> NNCFNetwork:
"""
The main function used to produce a model ready for adding compression from an original PyTorch
model and a configuration object.
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input
functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these
calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is
specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's input
tensors that are important for compression are not supplied as arguments to the model's forward call directly,
but instead are located in a container (such as list), and the model receives the container as an argument.
wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying
model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among
the supplied model's args and kwargs that is important for compression (e.g. quantization) with an
nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced
by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are
the same as were supplied in input, but each tensor in the original input. Must be specified if
dummy_forward_fn is specified.
:param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.
:return: A model wrapped by NNCFNetwork, which is ready for adding compression."""
if dummy_forward_fn is not None and wrap_inputs_fn is None:
raise ValueError(
"A custom dummy forward function was specified, but the corresponding input wrapping function "
"was not. In case a custom dummy forward function is specified for purposes of NNCF graph "
"building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with "
"the input wrapping done in dummy_forward_fn."
)
# Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode
with training_mode_switcher(model, is_training=False):
# Compress model that will be deployed for the inference on target device. No need to compress parts of the
# model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with
# weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode.
input_info_list = create_input_infos(config)
scopes_without_shape_matching = config.get("scopes_without_shape_matching", [])
ignored_scopes = config.get("ignored_scopes")
target_scopes = config.get("target_scopes")
nncf_network = NNCFNetwork(
model,
input_infos=input_info_list,
dummy_forward_fn=dummy_forward_fn,
wrap_inputs_fn=wrap_inputs_fn,
wrap_outputs_fn=wrap_outputs_fn,
ignored_scopes=ignored_scopes,
target_scopes=target_scopes,
scopes_without_shape_matching=scopes_without_shape_matching,
)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
synchronize_all_processes_in_distributed_mode()
return nncf_network
def synchronize_all_processes_in_distributed_mode():
if is_dist_avail_and_initialized():
try:
barrier()
# Exception can be raised during running barrier
# if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html
except RuntimeError as err:
nncf_logger.warning(
"Training pipeline spawned an error while synchronizing distributed training processes:"
)
nncf_logger.warning(err)
nncf_logger.warning("Desynchronization of distributed processes may occur.")
def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)
def create_compression_algorithm_builder_from_algo_names(
algo_names: List[str], config: NNCFConfig, should_init: bool
) -> PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param algo_names: list of algorithm names
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)
return builder
|
3,037 | 807f0094a9736abdfa3f5b629615a80f1e0d13ef | class Rect():
def __init__(self, w, h):
self.w = w
self.h = h
def half(self):
return self.w / 2;
bricks = [Rect(40, 25), Rect(30, 25), Rect(28, 25), Rect(13, 25)]
def setup():
size(500, 500)
noLoop()
def draw():
posx = 0
posy = 0
i = 0
for y in range(20):
posx = 0
for x in range(50):
fill(random(100, 250))
brick = get_brick(i)
rect(posx, posy, brick.w, brick.h)
posx += brick.w
i += 1
posy += brick.h
def get_brick(index):
i = int(random(len(bricks)))
# i = index % len(bricks)
return bricks[i]
|
3,038 | 6c8f690e1b43d459535238e24cccc8aa118e2d57 | # pylint: disable=W0621,C0114,C0116,W0212,W0613
import io
import textwrap
from typing import cast, Any, Dict
import toml
import pytest
from dae.testing import convert_to_tab_separated
from dae.configuration.gpf_config_parser import GPFConfigParser
from dae.configuration.schemas.person_sets import person_set_collections_schema
from dae.pedigrees.loader import FamiliesLoader
from dae.person_sets import PersonSetCollection
from impala_storage.schema1.impala_variants import ImpalaVariants
@pytest.fixture
def families_fixture():
ped_content = io.StringIO(convert_to_tab_separated(
"""
familyId personId dadId momId sex status role
f1 mom1 0 0 2 1 mom
f1 dad1 0 0 1 1 dad
f1 prb1 dad1 mom1 1 2 prb
f1 sib1 dad1 mom1 2 2 sib
f1 sib2 dad1 mom1 2 2 sib
f2 grmom2 0 0 2 0 maternal_grandmother
f2 grdad2 0 0 1 0 maternal_grandfather
f2 mom2 grdad2 grmom2 2 1 mom
f2 dad2 0 0 1 1 dad
f2 prb2 dad2 mom2 1 2 prb
f2 sib2_3 dad2 mom2 2 2 sib
"""))
families = FamiliesLoader(ped_content).load()
assert families is not None
return families
def get_person_set_collections_config(content: str):
return GPFConfigParser.process_config(
cast(Dict[str, Any], toml.loads(content)),
{"person_set_collections": person_set_collections_schema},
).person_set_collections
@pytest.fixture
def status_collection(families_fixture):
content = textwrap.dedent(
"""
[person_set_collections]
selected_person_set_collections = ["status"]
status.id = "status"
status.name = "Affected Status"
status.sources = [{ from = "pedigree", source = "status" }]
status.domain = [
{
id = "affected",
name = "Affected",
values = ["affected"],
color = "#aabbcc"
},
{
id = "unaffected",
name = "Unaffected",
values = ["unaffected"],
color = "#ffffff"
},
]
status.default = {id = "unknown",name = "Unknown",color = "#aaaaaa"}
""")
config = get_person_set_collections_config(content)
collection = PersonSetCollection.from_families(
config.status, families_fixture)
return collection
def test_status_person_set_collection(status_collection):
assert status_collection is not None
psc = status_collection
assert len(psc.person_sets) == 3
assert len(psc.person_sets["unknown"].persons) == 2
assert len(psc.person_sets["affected"].persons) == 5
assert len(psc.person_sets["unaffected"].persons) == 4
def test_status_person_set_collection_all_selected(
status_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_collection,
("status", {"affected", "unaffected", "unknown"})
)
assert query == ()
def test_status_person_set_collection_some_selected_no_default(
status_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_collection,
("status", {"affected"})
)
assert query == ([{"status": "affected"}], [])
def test_status_person_set_collection_some_selected_and_default(
status_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_collection,
("status", {"affected", "unknown"})
)
assert query == ([], [{"status": "unaffected"}])
@pytest.fixture
def status_sex_collection(families_fixture):
config = get_person_set_collections_config(textwrap.dedent("""
[person_set_collections]
selected_person_set_collections = ["status_sex"]
status_sex.id = "status_sex"
status_sex.name = "Affected Status and Sex"
status_sex.sources = [
{ from = "pedigree", source = "status" },
{ from = "pedigree", source = "sex" },
]
status_sex.domain = [
{ id = "affected_male", name = "Affected Male",
values = ["affected", "M"], color = "#ffffff" },
{ id = "affected_female", name = "Affected Female",
values = ["affected", "F"], color = "#ffffff" },
{ id = "unaffected_male", name = "Unaffected Male",
values = ["unaffected", "M"], color = "#ffffff" },
{ id = "unaffected_female", name = "Unaffected Female",
values = ["unaffected", "F"], color = "#ffffff" },
]
status_sex.default = { id="other", name="Other", color="#aaaaaa"}
"""))
return PersonSetCollection.from_families(
config.status_sex, families_fixture
)
def test_status_sex_person_set_collection_all_selected(
status_sex_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "affected_female",
"unaffected_male", "unaffected_female",
"other"})
)
assert query == ()
def test_status_sex_person_set_collection_some_selected_no_default(
status_sex_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "affected_female"})
)
assert query == (
[
{"sex": "F", "status": "affected"},
{"sex": "M", "status": "affected"},
], [])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"unaffected_male", "unaffected_female"})
)
assert query == (
[
{"sex": "F", "status": "unaffected"},
{"sex": "M", "status": "unaffected"}
], [])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "unaffected_female"})
)
assert query == ([
{"sex": "M", "status": "affected"},
{"sex": "F", "status": "unaffected"},
], [])
def test_status_sex_person_set_collection_some_selected_with_default(
status_sex_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "affected_female", "other"})
)
assert query == ([], [
{"sex": "F", "status": "unaffected"},
{"sex": "M", "status": "unaffected"},
])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"unaffected_male", "unaffected_female", "other"}))
assert query == ([], [
{"sex": "F", "status": "affected"},
{"sex": "M", "status": "affected"},
])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "unaffected_female", "other"})
)
assert query == ([], [
{"sex": "F", "status": "affected"},
{"sex": "M", "status": "unaffected"},
])
|
3,039 | 0d1fda864edc73cc6a9853727228c6fa3dfb19a1 | """
Author : Gülşah Büyük
Date : 17.04.2021
"""
import numpy as np
A = np.array([[22, -41, 2], [61, 17, -18], [-9, 74, -13]])
# For a square matrix A the QR Decomposition converts into the product of an orthogonal matrix Q
# (Q.T)Q= I and an upper triangular matrix R.
def householder_reflection(A):
# A Householder Reflection is a linear transformation that enables a
# vector to be reflected through a plane or hyperplane.
size = len(A)
# Set R equal to A, and create Q as a identity matrix of the same size
Q = np.identity(size)
R = np.copy(A)
for i in range(size - 1):
# Create the vectors x, e
# x is the ith column of the matrix A
x = R[i:, i]
# e is eigenvector
e = np.zeros_like(x)
e[0] = np.linalg.norm(x)
# Using anonymous functions, we create u and v
# u = x + (sigma)*e
# sigma= -sgn(x[k])(||x||)
u = x - e
# v = u /||u||
v = u / np.linalg.norm(u)
Q_count = np.identity(size)
# Q = I-2*v(v.T)
Q_count[i:, i:] -= 2.0 * np.outer(v, v)
# Q is now mxm householder matrix
R = np.dot(Q_count, R) # R=H(n-1)*...*H(2)*H(1)*A
Q = np.dot(Q, Q_count) # Q=H(n-1)*...*H(2)*H(1) H is the self-inverse matrix
return (Q, R)
(Q, R) = householder_reflection(A)
print("A:")
print(A)
print("Q:")
print(Q)
print("R:")
print(R)
print("A = QR control:")
print(np.dot(Q,R)) |
3,040 | d56c80b4822b1bd0f2d4d816ed29a4da9d19a625 | import collections
def solution(genres, plays):
answer = []
cache = collections.defaultdict(list) # 장르 : [고유번호, 재생횟수]
genre_order = collections.defaultdict(int) # 장르 : 전체재생횟수
order = collections.defaultdict() # 전체재생횟수 : 장르
# 첫번째 딕셔너리와 두번째 딕셔너리 생성
for i in range(len(genres)):
cache[genres[i]].append([i, plays[i]])
genre_order[genres[i]] += plays[i]
# 두번째 딕셔너리를 기반으로 세번째 딕셔너리 생성 -> key 와 value 를 뒤바꾼 구조임.
for k in genre_order:
order[genre_order[k]] = k
key_list = sorted(order) # Key 가 int 가 됐으므로 key 기반 정렬이 가능해짐.
# 첫번째 딕셔너리안에 value 에서 재생횟수를 기반으로 정렬, 같다면 고유번호 기반 정렬
for g in cache:
cache[g].sort(key=lambda x: (x[1]), reverse=True)
# 정렬된 장르 순서를 담고 있는 key_list 에서 하나씩 장르를 꺼내서 order 에 키값으로 주어, 고유번호 부분을 꺼내서 answer 에 추가. (이미 재생횟수 기반으로 정렬한 거라서 바로 꺼낼 수 있음)
while key_list:
od = order[key_list.pop()]
if len(cache[od]) == 1:
answer.append(cache[od][0][0])
else:
answer.append(cache[od][0][0])
answer.append(cache[od][1][0])
return answer
print(solution(["A", "A", "B", "A", "B", "B", "A", "A", "A", "A"],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))
'''
[베스트 앨범] hash
문제설명:
노래는 인덱스(고유번호)로 구분
각 노래들의 재생 횟수를 더해서 가장 횟수가 높은 장르의 곡들을 먼저 수록
해당 장르 내에서는 재생 횟수가 높은 노래를 먼저 수록
한 장르에서 최대 2곡까지 수록할 수 있고 1곡밖에 없다면 1곡만 수록
만약 재생횟수가 같은 노래가 있다면 고유번호가 낮은 것을 우선 수록
풀이과정:
해쉬를 어디에 적용해야 할지 고민을 많이 한 문제
결국 고유번호 배열, 장르 배열, 재생횟수 배열 이렇게 3가지 배열이 있는 것이고
해쉬도 3개가 만들어져야함.
그리고 최고 많이 재생된 장르, 최고 많이 재생된 노래 들이라서 정렬을 선택했는데
노래의 재생횟수는 중첩이 가능해서 같은 경우 고유번호가 낮아야한다.
이를 위해 고유번호와 재생횟수를 함께 배열화 해서 하나의 딕셔너리에 장르별로(키) 묶어두고
람다로 정렬 진행, 다만 리벌스로 정렬해야 같은 경우 고유번호가 낮은게 앞으로 감.
또, 최대 많이 재생된 장르도 해쉬로 만들어두면 쉽게 될 것 같지만
해쉬 구조 자체를 정렬해야해서 힘듬.
그래서 해쉬가 만들어진 후에는 역으로 int: string 형태로 또 하나의 해쉬를 만들어서 해결했음.
''' |
3,041 | 84a4a0a16aea08ee874b09de163fd777be925f18 | import numpy as np
import math
import matplotlib.pyplot as plt
def signif_conf(ts, p):
''' Given a timeseries (ts), and desired probability (p),
compute the standard deviation of ts (s) and use the
number of points in the ts (N), and the degrees of freedom (DOF)
to calculate chi. '''
s = np.std(ts)
N = ts.size
DOF = 2
# chi = chi_sqr(1.-p, DOF)
chi = chi_sqr(1.-p, DOF)
signif = ((s**2)*chi) / ((N/2)*DOF)
return signif
def fourier2(Flux, Delt,
pad=None, rad=None, norm=None, signif=0.95, display=None):
''' Subtract the mean '''
Flux = np.array(Flux)
newflux = np.array(Flux) - np.array(Flux).mean()
N = newflux.size
''' Start padding if keyword was specified '''
if pad:
base2 = int(np.log(N)/np.log(2)) + 1
if (N != 2.**(base2-1)):
np.append(newflux, np.array(long(2)**base2-N, dtype=float))
N = newflux.size
print ("Padded " + str(N) + " data points with " +
str(long(2)**(base2) - N) + " zeros.")
print ("**RECOMMEND checking against fourier spectrum of non padded "
"time series**")
''' make the frequency array '''
Freq = np.arange((N/2)+1) / (N*Delt)
''' Calculate the (forward) FFT of the form a(w) + ib(w) '''
V = np.fft.fft(newflux)
''' Calculate the power and amplitude '''
Power = 2*(abs(V)**2)
Amplitude = 2*(abs(V))
''' Since we are taking the FFT of a real time series, (not complex), the
second half is a duplicate, so it can be removed.
Also do not use the zero-eth element becuase it will just be equal to the
mean, which has been set to zero anyway '''
Freq = (Freq[1:]).flatten
Power = (Power[1:N/2]).flatten
Amplitude = (Amplitude[1:N/2]).flatten
'''
By Parseval's Theorem, the variance of a time series should be equal to the total
of its power spectrum (this is just conservation of energy). Check that you
have the correct normalization for your Power Spectrum by comparing the total
of your spectrum (with N/2 points) with the variance
print 'Variance of time series = ' + str(newflux.var)
print 'Total of Power Spectrum = ' + str(np.sum(Power))
'''
''' Get real and imaginary parts of V '''
imag = (V.imag)[1:N/2]
amp = (V.real)[1:N/2]
''' Calculate the the phase for each frequency.
In simple terms this is just arctan(y/x), since tan(phase)=y/x.
Gives phase in radians between -pi and pi, and converts to degrees
by default'''
if rad:
Phase = np.arctan2(amp, imag)
else:
Phase = np.degrees(np.arctan2(amp, imag))
'''
sig_lvl = 0.
if signif:
conf = signif
else:
conf = 0.95
sig_lvl = signif_conf(newflux, conf)
'''
conf = signif # The variable conf seems redundant...
sig_lvl = signif_conf(newflux, signif)
if norm:
var = np.var(newflux)
power = power * (N/var)
sig_lvl = sig_lvl * (N/var)
print "White noise has an expectation value of 1"
if display:
if sig_lvl != 0:
print ("Confidence level at " + str(int(conf*100)) +
" is: " + str(sig_lvl))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Freq, Power)
ax.plot(Freq, Power, '.')
# horline, sig_lvl .... ?
plt.show()
'''
The final output is an array containing the power and phase at each frequency
'''
Result = np.zeros(Power.size, 4)
Result[:,0] = Freq
Result[:,1] = Power
Result[:,2] = Phase
Result[:,3] = Amplitude
print "Result[:,0] is frequency"
print "Result[:,1] is the power spectrum"
print "Result[:,2] is the phase"
return Result
f = np.array([1, 3, 4, 5, 3, 2, 6, 4, 3, 4, 1])
blah = fourier2(f, 1)
|
3,042 | 882d265f14c04b2f2f626504d18e2cd07dcc8637 | """
This module is used to extract features from the lines extracted from documents
using BERT encodings. This package leverages the bert-as-a-server package to create the
embeddings.
Example:
feature_extractor = FeatureExtractor(document) # document is of class Document
encoded_doc = feature_extractor.encode()
feature_extractor.end()
Todo:
* lines --> sentences for a better representation of the embeddings
* try different BERT models
* train the BERT model for a specific task before encoding
"""
from bert_serving.client import BertClient
class FeatureExtractor:
"""Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.
Attributes:
document (Document): This object encompasses the extracted text from one of the
PDF documents. There is an encoding field on each Line which is where the
embedding from BERT will be included, and where the text that gets encoded will
be provided.
_bc (BertClient): Connection to the BertServer which can be used for encoding.
"""
def __init__(self, document):
self._document = document
self._bc = BertClient()
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for (line, encoding) in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
|
3,043 | 2e23225ec4cd693f5e9460a13d64206f184a86a0 | # -*- coding: utf-8 -*-
"""Code handling the concurrency of data analysis."""
|
3,044 | 836d712c811079f190eae9c2780131a844c9dddf | def twoSensorAvg(input_data, duration=1):
times = {}
for i in input_data:
data = i.split(',')
time = int(int(data[1]) / (duration * 1000))
if time not in times:
times[time] = [0, 0]
times[time][0] += int(data[2])
times[time][1] += 1
ans = []
for i, v in times.items():
i = int(i)
a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 * (duration - 1) + 999) + ': ' + str(
round(float(v[0] / v[1]), 2))
ans.append(a)
return ans
def test(input, output, duration):
results = twoSensorAvg(input, duration)
print(results)
if len(results) != len(output):
return False
for i in range(len(output)):
if results[i] != output[i]:
return False
return True
if __name__ == '__main__':
input_data = ['1,10000,40', '1,10002,45', '1,11015,50', '2,10005,42', '2,11051,45', '2,12064,42', '2,13161,42']
ans = ['10000-10999: 42.33', '11000-11999: 47.5', '12000-12999: 42.0', '13000-13999: 42.0']
print(test(input_data, ans, 1))
|
3,045 | fbb081fd52b14336ab4537bb795105bcd6a03070 | from os import environ
from flask import Flask
from flask_restful import Api
from flask_migrate import Migrate
from applications.db import db
from applications.gamma_api import add_module_gamma
app = Flask(__name__)
app.config["DEBUG"] = True
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["PROPAGATE_EXCEPTIONS"] = True
api = Api(app)
db.init_app(app)
migrate = Migrate(app, db)
@app.before_first_request
def create_tables():
pass
# db.create_all()
add_module_gamma(api)
|
3,046 | 2de12085ddc73fed85dda8ce3d6908b42fdc4bcc | ## Import modules
import matplotlib, sys, datetime, time
matplotlib.use('TkAgg')
from math import *
from numpy import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import dates
import matplotlib.pyplot as plt
from Tkinter import *
## Load the data
data = loadtxt("data/data011c.txt", unpack = True, skiprows=1, comments = '#')
temperature = data[7]
humidity = data[6]
light = data[8]
timer = data[9]
year, month, day, hour, minute, second = data[0], data[1], data[2], data[3], data[4], data[5]
## Make empty are to append the formatted dates
date_times = []
## Format the dates to dd.mm.yyyy hh:mm:ss
for i in range(len(year)): # can be the length of any arbitrary data set
# this makes a nice long string of the "day.month.year hour:min:sec"
date_times.append(str(int(day[i])).zfill(2) + "." + str(int(month[i])).zfill(2) + "." + str(int(year[i])) +
" " + str(int(hour[i])).zfill(2) + ":" + str(int(minute[i])).zfill(2) + ":" + str(int(second[i])).zfill(2) )
## String format of the date
pattern = '%d.%m.%Y %H:%M:%S'
## Convert the list of date_times to epoch time in seconds
epoch = []
for datetimes in date_times:
epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))
## Convert epoch time to list of dateformatter objects
dts = map(datetime.datetime.fromtimestamp, epoch)
fds = dates.date2num(dts)
hfmt = dates.DateFormatter('%m/%d %H:%M')
## Create interface object
master = Tk()
## Set the title and size
master.title("Room Sensor")
master.geometry("1200x600")
## Create figure to add onto interface window
f = Figure(figsize=(9,5), dpi=100,)# facecolor='black')
## Not sure what zorder does
f.zorder
## within the figure create subplot called a
a = f.add_subplot(111)
## Add figure onto interface window
dataPlot = FigureCanvasTkAgg(f, master)
dataPlot.draw()
## Turn figure into a widget
dataPlot.get_tk_widget().place(x = 240, y = 40)
## Add plot toolbar widget
toolbar = NavigationToolbar2TkAgg(dataPlot, master)
toolbar.update()
toolbar.place(x = 240, y = 560)
## Functions to switch between plots
def show_temp():
## Clear the figure
a.clear()
## Plot the temperature
## a.plot(timer,temperature, "r.--")
a.plot(fds,temperature, "r.--")
a.set_ylabel("Temperature (Degrees Celsius)", color = "r")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "r")
## a.set_ylim([20.0,30.0])
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("r")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("r")
## Reset the toolbar
toolbar.update()
f.canvas.draw()
def show_humidity():
a.clear()
a.plot(fds,humidity, "b.--")
a.set_ylabel("Humidity %", color = "b")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "blue")
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("b")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("b")
toolbar.update()
f.canvas.draw()
def show_light():
a.clear()
a.plot(fds,light, "g.--")
a.set_ylabel("Ambient Light", color = "g")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "g")
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("g")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("g")
toolbar.update()
f.canvas.draw()
## Load icon and button images
tempButton = PhotoImage(file="images/temp_button.gif")
hmdButton = PhotoImage(file="images/hmd_button.gif")
lightButton = PhotoImage(file="images/light_button.gif")
tempIcon = PhotoImage(file="images/temp_icon.gif")
hmdIcon = PhotoImage(file="images/hmd_icon.gif")
lightIcon = PhotoImage(file="images/light_icon.gif")
## Create button widgets
Button1 = Button(master, image = tempButton, command = show_temp, height = 50, width = 109)
Button2 = Button(master, image = hmdButton, command = show_humidity, height = 50, width = 109)
Button3 = Button(master, image = lightButton, command = show_light, height = 50, width = 109)
## Create labels
Label1 = Label(master, image = tempIcon, height = 50, width = 50)
Label2 = Label(master, image = hmdIcon, height = 50, width = 50)
Label3 = Label(master, image = lightIcon, height = 50, width = 50)
## Place the buttons and labels to specific location
Button1.place(x=60,y=110)
Button2.place(x=60,y=260)
Button3.place(x=60,y=410)
Label1.place(x=180, y=111)
Label2.place(x=180, y=261)
Label3.place(x=180, y=411)
## Start with the temperature graph showing
show_temp()
## Run the main interface loop
master.mainloop()
|
3,047 | 4d68b663933070cb287689b70d6ded07958cef22 | # Should print 516
def final_frequency():
frequency = 0
with open('input') as f:
for line in f:
frequency += int(line)
return frequency
print(final_frequency())
|
3,048 | 32499688db51f701173ec0ea212c483bf902c109 | from django.db import models
# Create your models here.
class Tutorial(models.Model):
web_title = models.CharField(max_length=200)
web_content = models.TextField()
web_published = models.DateTimeField("date published")
def __str__(self):
return self.web_title
|
3,049 | 22e6616fb98ecfb256587c3767c7c289decc6bf6 | #Copyright (c) 2020 Ocado. All Rights Reserved.
import vptree, itertools
import numpy as np
class _ExtendedVPTree(vptree.VPTree):
"""
VPTree class extended to include the list of points within the tree
"""
def __init__(self, points, dist_fn):
"""
:param points: List of points to add to the vp-tree
:param dist_fn: Metric distance function
"""
super().__init__(points, dist_fn)
self.points = points
self.size = len(points)
def get_n_nearest_neighbors(self, query, n_neighbors):
"""
Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance
"""
if not isinstance(n_neighbors, int) or n_neighbors < 1:
raise ValueError('n_neighbors must be strictly positive integer')
neighbors = vptree._AutoSortingList(max_size=n_neighbors)
nodes_to_visit = [(self, 0)]
furthest_d = np.inf
while len(nodes_to_visit) > 0:
node, d0 = nodes_to_visit.pop(0)
if node is None or d0 > furthest_d:
continue
d = self.dist_fn(query, node.vp)
if d <= furthest_d: #Replaced < with <=
neighbors.append((d, node.vp))
furthest_d, _ = neighbors[-1]
if node._is_leaf():
continue
if node.left_min <= d <= node.left_max:
nodes_to_visit.insert(0, (node.left, 0))
elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:
nodes_to_visit.append((node.left,
node.left_min - d if d < node.left_min
else d - node.left_max))
if node.right_min <= d <= node.right_max:
nodes_to_visit.insert(0, (node.right, 0))
elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:
nodes_to_visit.append((node.right,
node.right_min - d if d < node.right_min
else d - node.right_max))
if len(neighbors) == 0:
neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan
return list(neighbors)
class DynamicVPTree:
"""
Dynamic vp-tree implemented using index folding
"""
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in nearest_trees + distances_pool:
if cost <= best_cost:
best = near
best_cost = cost
return best
def neighbourhood(self, query, radius):
"""
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
"""
tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))
neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))
return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))
|
3,050 | c69dcffc06146af610a7976e522b6e35cabde1aa | # class User:
# def __init__(self, name_parameter, email_parameter):
# self.nameofPerson = name_parameter
# self.emailofPerson = email_parameter
# self.account_balance = 0
# def depositMoney(self, amount);
# self.account_balance += amount
# return self
# def transferMoney(self, otherUser, amount):
# self.account_balance -= 5
# otherUser.account_balance += 5
# return self
# To allow user1, user2 or user3 overdraw their account
# def withdrawMoney_overdraw(self, amount):
# self.account_balance -= amount
# To not allow user1, user2, user3 overdraw their account
# def withdrawMoney_no_overdraw(self, amount):
# if self.account_balance > amount:
# self.account_balance -= amount
# else:
# print("insufficient funds")
# user1 = User("Ben", "benjamin@yahoo.com")
# user2 = User("Tom", "tommy@yahoo.com")
# user3 = User("Sarah", "sarah@yahoo.com")
# print(user1.nameofPerson)
# prints the name of the user1
# print(user2.emailfPerson)
# prints the email of the user2
# print(user1.account_balance)
# prints the account balance of the user3 which in this case is 0 by default according to the class User
# user1.depositMoney(50)
# print(user1.account_balance)
# prints the account balance of user1 which by default is 0 and then adds the function depositMoney which is giving an arguemnt of 50 (0 + 50 / account_balance + depositMoney) The output is ($50)
# user1.transferMoney(user 2, 5)
# print(user2.account_balance)
# print(user1.account_balance)
# prints user1 account balance which is now 50 then transfers money to user2 (50 - 5) which is now 5 to be added to the default account balance of 0 (0 + 5 / account_balance + transferMoney from user1)
# Also user1 account_balance (50 - 5) which is now 45 ($45)
# user1.depositMoney(50).depositMoney(30).transferMoney(user2, 5)
# print(user1.account_balance)
# prints user1 account banlace (50 + 30) which is 80 ($80), assuming user1 depositedMoney twice. we use "return self" at the end of the declared functions to add a "chain method" of repeating a chain of function or various types of functions, i.e repeating a particular function for user1 as many times as possible or even adding other functions to modify the final account balance for user 1)
# The final output for account balance user1 will be (80 - 50) which is 75 ($75) because we transfered money 5 (80 - 5) to user2 at the end in the "chain mathod of functions" for user1. This will only work since we added "return self"and this means (updating all the chain methods to the very last function of command in the declared function which in this case we tranfered 5 from user1 to user2)
# user1.withdrawMoney_overdraw(100)
# print(user1.account_balance)
# prints user1 current account balance which is currently 75 and then withdraws 100 which means (75 - 100) which is -25.
# the new user1 account balance is ( -25 which is -$25)
# The above assuming user1 is allowed to overdraw their account
# user1.withdrawMoney_no_overdraw(100)
# print(user1.account_balance)
# prints "insufficient funds" for user1 since user1 current account balance which is currently 75 and then wants to withdraw 100 which means (75 - 100) but is not allowed to because user1 still needs an additional 25 to fulfil the withdrawMoney function of 100. we give a conditional statement in our def withdrawMoney_no_overdraw above saying if user1 account balance is greater than amount allow user1 to withdraw money if not do not allow user1 to redraw money instead give "insuffiecient funds" (if 75 is greater than 100 which in this case is false go to the else statement which is "insufficient funds")
# The above assuming user1 is not allowed to overdraw their account if account balance for user1 is not greater than the withdraw amount and then user1 will get a message "insufficient funds"
|
3,051 | a91d2f32afdc20516e56036c352cc267c728e886 | import numpy as np
import matplotlib.pyplot as plt
import csv
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
# print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
reward[line_count-1] = row[1]
err[line_count-1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_err(x, y_map, filename):
labels = list(y_map.keys())
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
for l in labels:
ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_plot1():
reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')
draw_plot(reward, err, filename='sarsa_grid.png')
reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')
draw_plot(reward, err, filename='sarsa_cartpole.png')
reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')
draw_plot(reward, err, filename='qlearning_grid.png')
reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')
draw_plot(reward, err, filename='qlearning_cartpole.png')
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
qgrid_map['softmax'] = read_cp_csvdata(100, 'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
def draw_plot5():
pass
# draw_plot1()
|
3,052 | eb403fbb307332c18ffdcdf52589c714f0719960 | import xarray as xr
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
#dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')
dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)
#dsClm = dsClm.where(dsMsk == nyear)
#dsMsk.to_netcdf('era5.count.nc4')
print (dsClm)
filo = fili.replace('annual','annual.clm')
print (f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )
parser.add_argument('fili', type=str, help='path to annual file')
parser.add_argument('--start_year', '-sy', default=1981,
help='First year for climatology')
parser.add_argument('--end_year', '-ey', default=2015,
help='Last year for climatology')
args = parser.parse_args()
precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)
|
3,053 | e05dac901228e6972c1cb48ce2def3d248b4c167 | # # -*- coding: utf-8 -*-
#
# """
# Py40 PyQt5 tutorial
#
# This example shows three labels on a window
# using absolute positioning.
#
# author: Jan Bodnar
# website: py40.com
# last edited: January 2015
# """
#
# import sys
# from PyQt5.QtWidgets import QWidget, QLabel, QApplication
#
#
# class Example(QWidget):
#
# def __init__(self):
# super().__init__()
#
# self.initUI()
#
# def initUI(self):
# lbl1 = QLabel('Zetcode', self)
# lbl1.move(15, 10)
#
# lbl2 = QLabel('tutorials', self)
# lbl2.move(35, 40)
#
# lbl3 = QLabel('for programmers', self)
# lbl3.move(55, 70)
#
# self.setGeometry(300, 300, 250, 150)
# self.setWindowTitle('Absolute')
# self.show()
#
#
# if __name__ == '__main__':
# app = QApplication(sys.argv)
# ex = Example()
# sys.exit(app.exec_())
import psycopg2
def absent(lectureid,sectionid):
connection = psycopg2.connect(database="profmate", user="python", password="python", host="34.74.217.167",
port="5432")
cursor = connection.cursor()
postgreSQL_select_Query ="select * from lec_%s \
where student_id not in (select base.studentid\
from (select S.SectionID,Lectures.Lecture_Name,P.StudentID\
from Sections As S\
Join POOL as P\
On (P.Time > S.Time_Start)\
and (P.Time < S.Time_End)\
Join Lectures\
ON S.LectureID = Lectures.Lecture_ID\
Order By SectionID) as base\
join Students \
ON base.studentid = Students.Student_ID\
where sectionid = '%s' );"
cursor.execute(postgreSQL_select_Query,(lectureid,sectionid))
print("Selecting rows from POOL table using cursor.fetchall")
current_table = cursor.fetchall()
print("Print each row and it's columns values")
longstring = str('')
for row in current_table:
# print("Student ID = ", row[0])
# print("Family Name = ", row[1])
# print("Given Name = ", row[2], "\n")
longstring = "".join((longstring, "Student ID = ",str(row[0]),"\n"))
longstring = "".join((longstring, "Family Name = ", row[1], "\n"))
longstring = "".join((longstring, "Given Name = ", row[2], "\n"))
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return longstring
if __name__ == '__main__':
a = '234567890'
b = 'Tester'
c = 'One'
# insert_students(a, b, c)
print(absent(101, 1001)) |
3,054 | f2c53efa4b7c2df592582e3093ff269b703be1e0 | # Generated by Django 3.2.5 on 2021-08-05 07:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organization', '0010_auto_20210801_1623'),
('quote', '0004_auto_20210805_1032'),
]
operations = [
migrations.CreateModel(
name='FollowUp',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ثبت')),
('text', models.TextField(default=None, verbose_name='متن پیگیری')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.organization', verbose_name='سازمان')),
],
),
]
|
3,055 | 130f49028833bf57d7e4f9fbb0764801c3508c3b | print("n:",end="")
n=int(input())
print("a:",end="")
a=list(map(int,input().split()))
ans=0
for i in range(n):
for j in range(i+1,n):
for k in range(j+1,n):
ai,aj,ak=sorted([a[i],a[j],a[k]])
if(ai+aj>ak and ai+aj+ak>ans):
ans=ai+aj+ak
print(ans) |
3,056 | 5a181b0c22faa47c6c887daac675dd7374037f30 | from typing import List, Optional
from backend.domain.well import FacilityState, Well
from backend.repository.persistence.well import WellPersistenceSchema
class WellRepository:
schema = WellPersistenceSchema()
def __init__(self, db):
self._db = db
def list(self) -> List[Well]:
return [self.schema.load(doc) for doc in self._db.wells.find({})]
def save_many(self, wells: List[Well]):
self._db.wells.insert_many([self.schema.dump(well) for well in wells])
def filter_by_facility_status(self, statuses: List[FacilityState]) -> List[Well]:
return [
self.schema.load(doc)
for doc in self._db.wells.find({"facility.lifecycle.name": {"$in": [status.value for status in statuses]}})
]
def find_well_by_facility_id(self, identifier: str) -> Optional[Well]:
doc = self._db.wells.find_one({"facility.id": identifier})
if doc:
return self.schema.load(doc)
|
3,057 | 22b6ea64cdb109e1c6b2536b50935d09d37a7e1a | from nmigen import *
class Top(Elaboratable):
def __init__(self):
self.counter = Signal(3)
self.led = Signal()
def elaborate(self, platform):
m = Module()
m.d.comb += self.led.eq(self.counter[2])
m.d.sync += self.counter.eq(self.counter + 1)
return m
|
3,058 | 4246773a8da61ff21d5faa8ab8ad2d7e75fafb60 | import sqlite3
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
if __name__ == '__main__':
con = sqlite3.connect('lab05-ex01.sqlite')
cursor = con.cursor()
cursor.execute("SELECT * FROM Pessoa")
print(cursor.fetchall())
nome = input("Nome da pessoa: ")
clausula = (nome,)
cursor.execute("SELECT * FROM Pessoa WHERE nome = ?", clausula)
pessoa = cursor.fetchall()
to_string(pessoa)
cursor.close()
con.close() |
3,059 | e652196f9c74be6f05c6148de152996e449670ea | import numpy as np
from input_parameters.program_constants import ITERATIONS_NUM, TIMESTEPS_NUMB
def init_zero_arrays():
radius_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
dot_radius_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
dotdot_radius_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
delta_radius_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
mass_out_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
total_mass_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
dot_mass_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
dot_rt_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
time_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
dot_time_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
luminosity_AGN_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
pressure_contact_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB)) # ;array to hold information about pressures at contact discontinuity
pressure_outer_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB)) # ;array to hold information about pressures at outer shock
bulge_mass_arr = np.zeros((ITERATIONS_NUM, TIMESTEPS_NUMB))
return radius_arr, dot_radius_arr, dotdot_radius_arr, delta_radius_arr, mass_out_arr, total_mass_arr, dot_mass_arr, \
dot_rt_arr, time_arr, dot_time_arr, luminosity_AGN_arr, pressure_contact_arr, pressure_outer_arr, bulge_mass_arr
|
3,060 | 1e4d21998b9f8915167166e5965b0c8c87fcf61d | def search_way(adjacency_list, points):
use = [False for i in range(points.__len__())]
way = [0 for i in range(points.__len__())]
cost = [100000 for i in range(points.__len__())]
cost[0] = 0
checkVar = 0
test = True
while test:
min = 100000
for i in range(points.__len__()):
if (cost[i] < min) and (not use[i]):
checkVar = i
min = cost[i]
for i in range(adjacency_list[checkVar + 1].__len__()):
bestStation = adjacency_list[checkVar + 1][i].arrivalPointId - 1
bestValue = adjacency_list[checkVar + 1][i].price
if(cost[i] + bestValue < cost[bestStation]):
way[bestStation] = adjacency_list[checkVar + 1][i]
cost[bestStation] = cost[checkVar] + bestValue
use[checkVar] = True
test = False
for i in range(adjacency_list[checkVar + 1].__len__()):
if use[i] == False:
test = True
print(cost)
print(points)
return way; |
3,061 | 052574be3f4a46bceefc0a54b1fe268a7cef18a9 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
"""
Using the django shell:
$ python manage.py shell
from django.contrib.auth.models import User
from accounts.models import Profile
from papers.models import Paper, Comment, Rating, UserSavedPaper
users = User.objects.all()
profiles = Profile.objects.all()
papers = Paper.objects.all()
comments = Comment.objects.all()
ratings = Rating.objects.all()
usps = UserSavedPaper.objects.all()
comments.create(text='this is an awesome paper!', profile=profiles[0], paper=papers[0])
"""
# Reversing migrations
# https://docs.djangoproject.com/en/3.0/topics/migrations/#reversing-migrations
# - Ex) $ python manage.py migrate papers zero <- reverses all migrations for app "papers", see all migrations with "$ python manage.py showmigrations"
# -> python manage.py makemigrations -> python manage.py migrate
# https://docs.djangoproject.com/en/3.0/ref/models/fields/
class Paper(models.Model):
title = models.CharField(max_length=200) # About data storage space when specifying a max_length: https://stackoverflow.com/questions/30663791/do-setting-the-max-length-to-a-very-large-value-consume-extra-space
authors = models.CharField(max_length=200)
abstract = models.CharField(max_length=2000, blank=True)
journal = models.CharField(max_length=80, blank=True)
date_published = models.DateField(blank=True) # https://www.django-rest-framework.org/api-guide/fields/#datefield
doi = models.CharField(max_length=32, blank=True)
pdflink = models.CharField(max_length=80, blank=True)
avg_rating = models.FloatField(default=0)
num_ratings = models.PositiveIntegerField(default=0)
# Useful example for many-to-many in django: https://www.revsys.com/tidbits/tips-using-djangos-manytomanyfield/
# - TO DO: Get rid of these fields below in the Paper model? The Comment/Rating/UserSavedPaper tables can exist without them being here!?
commented_by_users = models.ManyToManyField(
'accounts.Profile',
related_name='comments_made',
through='Comment',
blank=True
)
rated_by_users = models.ManyToManyField(
'accounts.Profile',
related_name='ratings_given',
through='Rating',
blank=True
)
saved_by_users = models.ManyToManyField(
'accounts.Profile',
related_name="papers_saved",
through='UserSavedPaper',
blank=True
)
def __str__(self):
return self.title
# Custom "through" models: https://docs.djangoproject.com/en/3.0/ref/models/fields/#django.db.models.ManyToManyField.through_fields
class Comment(models.Model):
text = models.CharField(max_length=500)
#rating = models.PositiveIntegerField(blank=True) # should rating be given simultaneously with posting a comment?
time = models.DateTimeField(default=timezone.now) # - TO DO: Look into and decide format for the timestamping
profile = models.ForeignKey('accounts.Profile', related_name='comments', on_delete=models.CASCADE)
paper = models.ForeignKey('Paper', related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.text
# No support for composite primary key, e.g. (profile_id, paper_id) in django? https://stackoverflow.com/questions/15440593/tell-djangos-model-to-use-as-primary-key-a-set-of-foreign-keys
# - https://code.djangoproject.com/wiki/MultipleColumnPrimaryKeys
# - possible to enforce it using SQL commands, using something other than the Django ORM, e.g. SQLAlchemy)
# - there are validators that can be used with a Serializer to enforce "unique together" - https://www.django-rest-framework.org/api-guide/validators/#uniquetogethervalidator
class Rating(models.Model):
rating = models.PositiveIntegerField()
profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE)
paper = models.ForeignKey('Paper', related_name='ratings', on_delete=models.CASCADE)
def __str__(self):
return f"{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}"
#
class UserSavedPaper(models.Model):
profile = models.ForeignKey('accounts.Profile', related_name='saved_papers', on_delete=models.CASCADE)
paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE)
comment = models.CharField(max_length=500, blank=True)
def __str__(self):
return f"user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}"
|
3,062 | 602a7676129721dbfd318407dd972f80d681146c | class CardHolder:
acctlen = 8
retireage = 59.5
def __init__(self, acct, name, age, addr):
self.acct = acct
self.name = name
self.age = age
self.addr = addr
def __getattribute__(self, item): # __getattribute__ intercepts calls for all
# attributes (defined and undefined)
superget = object.__getattribute__ # We have to use __getattribute__ of object
# class (superclass) to prevent looping
if item == 'acct':
return superget(self, 'acct')[:-3] + '***'
elif item == 'remain':
return superget(self, 'retireage') - superget(self, 'age')
else:
return superget(self, item)
def __setattr__(self, key, value):
if key == 'name':
value = value.lower().replace(' ', '_')
elif key == 'age':
if value < 0 or value > 130:
raise ValueError('invalid age')
elif key == 'acct':
value = value.replace('-', '')
if len(value) != self.acctlen:
raise TypeError('invalid acct number')
elif key == 'remain':
raise TypeError('cannot set remain')
self.__dict__[key] = value
if __name__ == '__main__':
bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')
print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')
bob.name = 'Bob Q. Smith'
bob.age = 50
bob.acct = '23-45-67-89'
print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')
sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')
print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')
try:
sue.age = 200
except Exception:
print('Bad age for Sue')
try:
sue.remain = 5
except Exception:
print("Can't set sue.remain")
try:
sue.acct = '1234567'
except Exception:
print('Bad acct for Sue')
|
3,063 | 9bb8e0f732eac474dbc01c374f9c74178f65dc36 | import sys
from bs4 import BeautifulSoup
def get_classes(html):
"""
returns a list of classes and titles, parsing through 'html'
"""
# elements = html.find_all("span", "code")
# titles = html.find_all("span", "title")
# classes = []
# for i in range(len(elements)):
# item = elements[i]
# tit = titles[i]
# classes += [(item.text.replace('\xa0', ' '), tit.text.replace('\xa0', ' '))]
# return classes
|
3,064 | fa271d3888dc60582fa0883eaf9f9ebbdffeed9d | # ELABORE UM PROGRAMA QUE CALCULE O A SER PAGO POR UM PRODUTO CONSIDERANDO O PRECO NORMAL E A FORMA DE PAGAMENTO
# a vista dinehiro ou cheque: 10%
# a vista no cartao: 5%
# 2x: preco normal
# 3x ou mais: 20% de juros |
3,065 | 7d3a33968a375141c1c451ecd531ce8d97906c7f | import FitImport as imp
import numpy as np
from math import *
from sklearn.kernel_ridge import KernelRidge
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
GSFOLDS = 3
FOLDS = 5
NPTS = 25
#Method Select function that allows you tyo use a differnet learning method
# (NN,decision trees, etc) Simply replace GKRR Methods section with functions
#that run your method and have method select return the rms. MethodSelect
#provides the X to test in TestSet and the target values in Y.
def MethodSelect(TestSet,Y):
return SplitFitGKRR(TestSet,Y)
##--------GKRR METHODS---------#
def GetPrediction(X,regr):
return regr.predict(X)
def GetRMSE(Y,YP):
return sqrt(mean_squared_error(Y,YP))
def SplitFitGKRR(X,Y):
Xt,XT,Yt,YT = cross_validation.train_test_split(
X, Y, test_size = 0.2)
regr = setBestParameters(len(Xt))
regr.fit(Xt,Yt)
return GetRMSE(YT,GetPrediction(XT,regr))
def setBestParameters(L,ker=0,npts=NPTS,f=GSFOLDS):
if f > L:
f = L-2
return GridSearchCV(KernelRidge(kernel='rbf'), cv=f,
param_grid={"alpha": np.logspace(-6,3,npts),
"gamma": np.logspace(-10,0,npts)})
#--------Descriptor Methods---------#
#Gets Descriptor from position n and returns it.
#If a label Array from fitimport is passed in as well gets grabs the label that
#corresponds to the descriptor returned
def GODesc(X,n,label=None):
if label != None:
L = label[0][n]
else:
L = "None"
X = X[:,n]
return list(X),L
##---Main-----##
def FWDS(i = 50):
X,Y,L = imp.FullImport(1)
T = len(X[0]) #Number Of Descriptors
BestDesc = [] #Holds Best Descriptors
while ((len(BestDesc) != 2) and len(BestDesc) < T) :
#continue until found 30 Best Descriptors or until no more Descr to add
D = np.arange(T)
TestRMS = []
for z in range(T): #Test Each descriptor
#print("Testing Descr ",z,"",L[0][z])
TestSet = []
setRMS = []
for n in range(len(BestDesc)): #Add already known bests
desc,_ = GODesc(X,BestDesc[n])
TestSet.append(desc)
tryfit = True #add step new descriptors?
if BestDesc.count(z) == 0: #Test if already one of best before try
desc,_ = GODesc(X,z)
TestSet.append(desc)
else: tryfit = False
print(TestSet)
if tryfit == True: #If not best, test with current best
TestSet = np.swapaxes(TestSet,0,1)
for n in range(i):
rms = MethodSelect(TestSet,Y)
setRMS.append(rms)
TestRMS.append(np.mean(setRMS))
print("Descr ",z,"",L[0][z]+" has rms",np.mean(setRMS))
else: #Skip if already a best descriptor
TestRMS.append(10)
##print("Descr ",z,"",L[0][z]+" already added")
#Sorts and prints sorted list by worst Descriptor
print("\n");
TestRMS, D = (list(t) for t in zip(*sorted(zip(TestRMS, D))))
for num in range(len(TestRMS)):
print("Descr ",D[num],"",L[0][D[num]]+" has rms",TestRMS[num])
#Add best Descriptor to list
BestDesc.append(D[t])
print("Desc",D[t],"("+str(L[0][D[t]])+
") added as descriptor number",len(BestDesc),"\n\n")
#When done print best Descriptors in order
print("Best Descs are:")
for n in range(len(BestDesc)-1):
print(L[0][BestDesc[n]])
#FWS take 1 argument which is the number of iterations to test each Descriptor
#each time will be with a different training and testing set.
if __name__ == '__main__':
FWS(1)
|
3,066 | 16d86c48c45ab0441046e968ea364d27f6dcfd12 | # -*- coding: utf-8 -*-
# 导入包
import matplotlib.pyplot as plt
import numpy as np
# 显示中文和显示负号
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# X轴和Y轴数据,票房单位亿
a = ["战狼2","速度与激情8","功夫瑜伽","西游伏妖篇","变形金刚5:最后的骑士","摔跤吧!爸爸","加勒比海盗5:死无对证","金刚:骷髅岛","极限特工:终极回归","生化危机6:终章","乘风破浪","神偷奶爸3","智取威虎山","大闹天竺","金刚狼3:殊死一战","蜘蛛侠:英雄归来","悟空传","银河护卫队2","情圣","新木乃伊",]
b = [56.01,26.94,17.53,16.49,15.45,12.96,11.8,11.61,11.28,11.12,10.49,10.3,8.75,7.55,7.32,6.99,6.88,6.86,6.58,6.23]
# 设置图形的大小
plt.figure(figsize=(20, 8), dpi=128)
# 绘制横置条形图,x轴参数是一个可迭代对象,一般为列表
# 竖直条形图,用的是width设置宽度
plt.barh(a, b, height=0.5, color='red')
# 设置图片,X轴,Y轴标题
plt.title("2018年电影票房纪录", fontsize=24)
plt.xlabel("票房(亿元)", fontsize=14)
# 设置坐标轴刻度,刻度间隔,range不能设置步长
my_x_ticks = np.arange(0, 61, 5)
plt.xticks(my_x_ticks)
# 设置网格
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
# 显示图形
plt.show() |
3,067 | 6ca2a9040897e49c6407b9b0760240fec93b4df0 | from redstork import PageObject
class AnnotController:
def get_annotations(self, project, page_index):
page = project.doc[page_index]
yield from page.flat_iter()
|
3,068 | 9f0e286268732e8cabb028b7c84f5ba72a6e8528 | """
Python asyncio Protocol extension for TCP use.
"""
import asyncio
import logging
import socket
class TcpTestProtocol(asyncio.Protocol):
"""
Extension of asyncio protocol for TCP data
"""
def __init__(self, test_stream=None, no_delay=False, window=None, server=None):
"""
Initialize TCP Protocol object.
"""
self._transport = None
self._socket = None
self._stream = test_stream
self._logger = logging.getLogger('py3iperf3')
self._sock_id = None
self._no_delay = no_delay
self._window = window
self._server = server
@property
def socket_id(self):
"""Return socket id"""
return self._sock_id
def set_owner(self, owner, is_stream=False):
"""Update owner to test from server once ready"""
if is_stream:
self._logger.debug('TCP Proto Stream is set!')
self._stream = owner
else:
self._server = owner
def connection_made(self, transport):
"""Connection established call-back"""
self._transport = transport
self._socket = transport.get_extra_info('socket')
self._sock_id = self._socket.fileno()
if self._server is None:
# This is client connecting to the server
self.connection_to_server_made(transport)
else:
# This is incomming connection from the client
self.connection_from_client(transport)
def connection_from_client(self, transport):
"""Connection from the client established to the server"""
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] incomming connection from %s port %s',
self._sock_id, peer_data[0], peer_data[1])
self._server.tcp_connection_established(self)
def connection_to_server_made(self, transport):
"""Connecton to the server established"""
local_data = self._socket.getsockname()
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] local %s:%s connected to %s:%s',
self._sock_id, local_data[0], local_data[1],
peer_data[0], peer_data[1])
# No delay OFF -> Nagle's alg used
self._socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_NODELAY,
0)
# If required - turn off Nagle's alg (No Delay ON)
if self._no_delay:
self._socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
# Set Socket TX/RX buffer sizes if specified
if self._window:
self._logger.debug('Setting socket buffer sizes to %s B', self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self._window)
# Print current buf sizes:
rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;', tx_buf, rx_buf)
self._stream.connection_established(self)
def data_received(self, data):
"""
Data received call-back.
"""
# Inform the server that we have data until the stream is ready
if self._stream is None:
self._server.control_data_received(self, data)
else:
self._stream.data_received(data)
def connection_lost(self, exc):
"""
Callback on connection lost.
"""
if self._stream.done:
# Stream is done, no need to panic
pass
else:
self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)
def send_data(self, data):
"""
Write data to transport.
"""
self._transport.write(data)
def pause_writing(self):
"""
Pause writing callback from transport.
"""
self._stream.pause_writing()
def resume_writing(self):
"""
Resume writing callback from transport.
"""
self._stream.resume_writing() |
3,069 | 1438a268780217e647999ba031aa4a50a6912d2f | """ AuthService class module.
"""
from urllib.parse import urlencode
from http.client import HTTPConnection, HTTPResponse, HTTPException
from dms2021sensor.data.rest.exc import NotFoundError
class AuthService():
""" REST client to connect to the authentication service.
"""
def __init__(self, host: str, port: int):
""" Constructor method.
Initializes the client.
---
Parameters:
- host: The authentication service host string.
- port: The authentication service port number.
"""
self.__host: str = host
self.__port: int = port
def __get_connection(self) -> HTTPConnection:
""" Creates a new connection to the authentication server.
---
Returns:
The connection object.
"""
return HTTPConnection(self.__host, self.__port)
def has_right(self, username: str, right: str) -> bool:
""" Determines whether a given user from the authentication server
has a certain right or not.
---
Parameters:
- username: The user name string.
- right: The right name.
Returns:
True if the user has the given right
Throws:
- NotFoundError: if the user does not have the right, the user does not
exist, or the right does not exist.
- HTTPException: On an unhandled 500 error.
"""
form: str = urlencode({'username': username, 'right': right})
headers: dict = {
'Content-type': 'application/x-www-form-urlencoded'
}
connection: HTTPConnection = self.__get_connection()
connection.request('GET', '/users/'+str(username)+'/rights/'+str(right), form, headers)
response: HTTPResponse = connection.getresponse()
if response.status == 200:
return True
if response.status == 404:
raise NotFoundError()
if response.status == 500:
raise HTTPException('Server error')
return False
|
3,070 | 1e7789b154271eb8407a027c6ddf6c941cc69a41 | import json
import time
from keySender import PressKey,ReleaseKey,dk
config = {
"Up": "W",
"Down": "S",
"Left": "A",
"Right": "D",
"Grab": "LBRACKET",
"Drop": "RBRACKET"
}
### Commands
# Move
def Move(direction,delay=.2):
PressKey(dk[config[direction]])
time.sleep(delay) # Replace with a better condition
ReleaseKey(dk[config[direction]])
# Push/Pull
def Action(direction,pull=None):
delay = .6
# If pulling - ensure you are grabbing the right block
# I.e. 'Pull Right' needs to face left first
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config["Grab"]])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config["Grab"]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config["Grab"]])
# References for keywords in file
moveKeys = ["Up","Down","Left","Right"]
climbKeys = ["Climb Up", "Climb Down", "Climb Left", "Climb Right"]
turnKeys = ["Turn Up", "Turn Down", "Turn Left", "Turn Right"]
pullKeys = ["Pull Up", "Pull Down","Pull Left", "Pull Right"]
pushKeys = ["Push Up", "Push Down", "Push Left", "Push Right"]
# Simplify turning
inverseDirections = {
"Up": "Down",
"Down": "Up",
"Left": "Right",
"Right": "Left",
}
### Interpreter
def init(filePath):
data = json.load(open(filePath))
pushed_keys = {"Up": False, "Down": False, "Left": False, "Right": False, "Grab": False}
if data['Style'] == "Manual":
for c in data['Main']:
try:
if c in moveKeys:
Move(c)
elif c in climbKeys:
Move(c.split(" ")[1],delay=.6)
elif c in turnKeys:
Move(c.split(" ")[1],delay=.1)
elif c in pullKeys:
direction = c.split(" ")[1]
Action(direction,pull=inverseDirections[direction])
elif c in pushKeys:
Action(c.split(" ")[1])
else:
print(c+" is not recognized as a command")
print(c)
except Exception as e:
print(e)
elif data['Style'] == "Recorded":
print("Reading Recorded file")
total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']
start_time = round(time.time(),2)
print("length of recording: "+str(total_time))
while time.time() < start_time+total_time:
timer = round(time.time() - start_time,2)
for c in data['Main']:
if timer > c['Start'] and timer < c['End'] and not pushed_keys[c['State']]:
print("pressing key "+ c['State'])
PressKey(dk[config[c['State']]])
pushed_keys[c['State']] = True
elif timer == c['End'] and pushed_keys[c['State']]:
print("releasing "+c['State'])
ReleaseKey(dk[config[c['State']]])
pushed_keys[c['State']] = False |
3,071 | 2101299d6f6bfcd4726591fc256317968373ca1f | REGION_LIST = [
'Центральный',
'Северо-Западный',
'Южный',
'Северо-Кавказский',
'Приволжский',
'Уральский',
'Сибирский',
'Дальневосточный',
]
CITY_LIST = {
'Абакан': 7,
'Альметьевск': 5,
'Ангарск': 7,
'Архангельск': 2,
'Астрахань': 3,
'Барнаул': 7,
'Батайск': 3,
'Белгород': 1,
'Бийск': 7,
'Благовещенск': 8,
'Братск': 7,
'Брянск': 1,
'Великий Новгород': 2,
'Владивосток': 8,
'Владикавказ': 4,
'Владимир': 1,
'Волгоград': 3,
'Волжский': 3,
'Вологда': 2,
'Воронеж': 1,
'Грозный': 4,
'Дзержинск': 5,
'Екатеринбург': 6,
'Иваново': 1,
'Ижевск': 5,
'Иркутск': 7,
'Йошкар-Ола': 5,
'Казань': 5,
'Калининград': 2,
'Калуга': 1,
'Кемерово': 7,
'Киров': 5,
'Комсомольск-на-Амуре': 8,
'Кострома': 1,
'Краснодар': 3,
'Красноярск': 7,
'Курган': 6,
'Курск': 1,
'Липецк': 1,
'Магнитогорск': 6,
'Махачкала': 4,
'Миасс': 6,
'Минеральные Воды': 4,
'Москва и Подмосковье': 1,
'Москва': 1,
'Мурманск': 2,
'Набережные Челны': 5,
'Нальчик': 4,
'Нефтекамск': 5,
'Нижневартовск': 6,
'Нижнекамск': 5,
'Нижний Новгород': 5,
'Нижний Тагил': 6,
'Новокузнецк': 7,
'Новомосковск': 1,
'Новороссийск': 3,
'Новосибирск': 7,
'Ноябрьск': 6,
'Обнинск': 1,
'Октябрьский': 5,
'Омск': 7,
'Орел': 1,
'Оренбург': 5,
'Орск': 5,
'Пенза': 5,
'Пермь': 5,
'Петрозаводск': 2,
'Петропавловск-Камчатский': 8,
'Прокопьевск': 7,
'Псков': 2,
'Пятигорск': 4,
'Ростов-на-Дону': 3,
'Рязань': 1,
'Самара': 5,
'Санкт-Петербург': 2,
'Саранск': 5,
'Саратов': 5,
'Севастополь': 3,
'Северодвинск': 2,
'Симферополь': 3,
'Смоленск': 1,
'Сочи': 3,
'Ставрополь': 4,
'Старый Оскол': 1,
'Стерлитамак': 5,
'Сургут': 6,
'Сыктывкар': 2,
'Таганрог': 3,
'Тамбов': 1,
'Тверь': 1,
'Тольятти': 5,
'Томск': 7,
'Тула': 1,
'Тюмень': 6,
'Улан-Удэ': 7,
'Ульяновск': 5,
'Уфа': 5,
'Хабаровск': 8,
'Чебоксары': 5,
'Челябинск': 6,
'Череповец': 2,
'Чита': 7,
'Шахты': 3,
'Энгельс': 5,
'Южно-Сахалинск': 8,
'Якутск': 8,
'Ярославль': 1,
}
|
3,072 | b236abaa5e206a8244083ee7f9dcdb16741cb99d | from typing import List, Tuple
import pytest
def fit_transform(*args: str) -> List[Tuple[str, List[int]]]:
if len(args) == 0:
raise TypeError('expected at least 1 arguments, got 0')
categories = args if isinstance(args[0], str) else list(args[0])
uniq_categories = set(categories)
bin_format = f'{{0:0{len(uniq_categories)}b}}'
seen_categories = dict()
transformed_rows = []
for cat in categories:
bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories)))
seen_categories.setdefault(cat, list(bin_view_cat))
transformed_rows.append((cat, seen_categories[cat]))
return transformed_rows
def test_str_fit_transformr():
assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [
('Moscow', [0, 0, 1]),
('New York', [0, 1, 0]),
('Moscow', [0, 0, 1]),
('London', [1, 0, 0]),
]
def test_int_fit_str_transformr():
assert fit_transform([1, 2, 1, 3]) == [
(1, [0, 0, 1]),
(2, [0, 1, 0]),
(1, [0, 0, 1]),
(3, [1, 0, 0]),
]
# чтобы проверить, что код вызывает исключение, нужно использовать менеджер контекста pytest.raises
def test_error_type_fit_transformr():
with pytest.raises(TypeError):
fit_transform(1)
@pytest.fixture()
def randomize():
from random import randint
return [randint(0, 9) for _ in range(randint(0, 10))]
def test_intv2_fit_transformr(randomize):
print(randomize)
result = fit_transform(randomize)
assert (len(result) == len(randomize))
|
3,073 | cc6f70e328b774972e272e9600274dfd9fca93ee | import cv2
import matplotlib.pyplot as plt
import numpy as np
ball = plt.imread('ball.png')
albedo = plt.imread('ball_albedo.png')
shading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)
x,y,z = np.where(albedo != 0)
print('Albedo:', albedo[x[0],y[0]])
print("Albedo in RGB space:", albedo[x[0],y[0]]*255)
# conversion of shading to RGB mapped the values to [0,1], therefore (0,255,0) = (0,1,0)
albedo[np.where(albedo[:,:,] != (0,0,0))[:-1]] = (0,1.,0)
plt.subplot(1,2,1)
plt.imshow(ball)
plt.subplot(1,2,2)
plt.imshow(albedo * shading)
plt.show() |
3,074 | c6cbd4d18363f00b73fac873ba45d6063bee7e64 | # -*- encoding: utf-8 -*-
"""
views: vistas sistema recomendador
@author Camilo Ramírez
@contact camilolinchis@gmail.com
camilortte@hotmail.com
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
from django.views.generic import TemplateView
from apps.recommender_system.models import EstablecimientosRecommender
from apps.establishment_system.models import Establecimiento
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from apps.externals.djangoratings.models import Vote
class RecomendacionView(TemplateView):
template_name = 'recommender/recomendacion.html'
def get_context_data(self, **kwargs):
context = super(RecomendacionView, self).get_context_data(**kwargs)
#context['now'] = timezone.now()
context['recomendaciones']=self.obtener_recomendacion(self.request.user)
return context
def obtener_recomendacion(self,user):
print "Prediciendo recomendacion"
recomendador_instance=EstablecimientosRecommender()
recomendaciones=recomendador_instance.storage.get_recommendations_for_user(user)
print recomendaciones
if recomendaciones:
print "Recomendando"
result=[]
for recomendacion in recomendaciones:
result.append(recomendacion.object)
recomendaciones=result
recomendaciones_leng=len(recomendaciones)
if recomendaciones_leng <10:
query=Establecimiento.objects.all().order_by('-rating_score')
for establecimiento in query:
if establecimiento not in recomendaciones:
if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):
recomendaciones.append(establecimiento)
if len(recomendaciones)>=10:
break
else:
query=Establecimiento.objects.all().order_by('-rating_score')
for establecimiento in query:
if establecimiento not in recomendaciones:
if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):
recomendaciones.append(establecimiento)
if len(recomendaciones)>=10:
print "Se completo la lista de 10 recomendaciones"
break
print "No se encontraron recomendaciones"
return recomendaciones
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RecomendacionView, self).dispatch(*args, **kwargs)
|
3,075 | bf83556b8e8855a0e410fcfb3b42161fbc681830 |
b=int(input('enter anum '))
for a in range(1,11,1):
print(b,'x',a,'=',a*b) |
3,076 | 4aefabf064cdef963f9c62bd5c93892207c301d3 | # Generated by Django 2.1.4 on 2019-04-17 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('historiasClinicas', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='actualizacion',
name='valoracion_medica',
field=models.CharField(choices=[('Apto para desempeñar el cargo sin patologia aparente', 'Apto para desempeñar el cargo sin patologia aparente'), ('Apto para desempañar el cargo con patologia que no limita la labor', 'Apto para desempañar el cargo con patologia que no limita la labor'), ('Apto con restricciones o adaptaciones para la labor', 'Apto con restricciones o adaptaciones para la labor'), ('Aplazado', 'Aplazado'), ('Apto para labor el alturas', 'Apto para labor el alturas'), ('Apto para continuar desempeñando su labor', 'Apto para continuar desempeñando su labor'), ('Examen de retiro', 'Examen de retiro'), ('Apto para manipulación de alimentos', 'Apto para manipulación de alimentos')], max_length=50, verbose_name='Concepto de valoracion medica'),
),
]
|
3,077 | 41013469e65e45f6c909d66c2a54eaf11dfd474c | """A number can be broken into different contiguous sub-subsequence parts.
Suppose, a number 3245 can be broken into parts like 3 2 4 5 32 24 45 324 245.
And this number is a COLORFUL number, since product of every digit of a contiguous subsequence is different
"""
def colorful(A):
sA = str(A)
len_sA = len(sA)
if len_sA == 1:
return (1)
dig_list = []
for i in range(len_sA):
for j in range(i, len_sA):
dig_list.append(int(sA[i:j + 1]))
mul = {}
for val in dig_list:
m = 1
for v in str(val):
m *= int(v)
if m in mul:
return (0)
else:
mul[m] = 1
return (1)
print (colorful(0))
print (colorful(111))
print (colorful(3245))
|
3,078 | 00afab442f56d364c785324f816b52b4a6be609d | '''
Take list of iam users in a csv file like
S_NO, IAM_User_Name,Programatic_Access,Console_Access,PolicyARN
1,XYZ, Yes,No,arn:aws:iam::aws:policy/AdministratorAccess
2.pqr,Yes,Yes,arn:aws:iam::aws:policy/AdministratorAccess
3.abc,No,Yes,arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess
'''
import boto3,sys
from pprint import pprint
while True:
session=boto3.session.Session(profile_name="dev_root")
iam_re=session.resource(service_name="iam")
for each in range(701,1100):
try:
iam_re.create_user(UserName="ixasisiidemo"+str(each))
if each==509:
sys.exit()
except:
continue
|
3,079 | 088c77e090d444e7057a91cac606995fb523c8ef | print("Enter string:")
s=input()
a = s.lower()
vowels = "aeiou"
consonants = "bcdfghjklmnpqrstvwxyz"
digits = "1234567890"
whitespace = " "
c = 0
v = 0
d = 0
ws= 0
for i in a:
if i in vowels:
v+=1
elif i in consonants:
c+=1
elif i in digits:
d+=1
elif i in whitespace:
ws+=1
print(v,c,d,ws) |
3,080 | 226fc85dc8b6d549fddef0ca43ad629875ac0717 | from django.db import models
class Course(models.Model):
cid = models.CharField(max_length=100)
title = models.CharField(max_length=500)
link = models.CharField(max_length=300)
|
3,081 | 4fa9c00a07c8263a6a3afd460b84f21637a771ec |
'''
This file creates the model of Post, which maps to the post table in the mysql database.
The model Provider contains four attributes: author, title, content, and created time.
'''
from django.db import models
class Post(models.Model):
'''
The education post by provider database model
'''
author = models.ForeignKey('Provider', on_delete=models.CASCADE)
title = models.CharField(max_length=255, null=False)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}".format(self.title)
|
3,082 | 8ff9961c1415c04899bbc15ba64811a1b3ade262 | from keras.preprocessing.image import img_to_array
from keras.models import load_model
import tensorflow as tf
import numpy as np
import argparse
import imutils
import pickle
import cv2
# USAGE
# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle
# --colorbin output/color_lb.pickle --image examples/black_dress.jpg
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to trained model model")
ap.add_argument("-l", "--categorybin", required=True, help="path to output category label binarizer")
ap.add_argument("-c", "--colorbin", required=True, help="path to output color label binarizer")
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
# load the image
image = cv2.imread(args["image"])
output = imutils.resize(image, width=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# pre-process the image for classification
image = cv2.resize(image, (96, 96))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# load the trained convolutional neural network from disk, followed
# by the category and color label binarizers, respectively
print("[INFO] loading network...")
model = load_model(args["model"], custom_objects={"tf": tf})
categoryLB = pickle.loads(open(args["categorybin"], "rb").read())
colorLB = pickle.loads(open(args["colorbin"], "rb").read())
# classify the input image using Keras' multi-output functionality
print("[INFO] classifying image...")
(categoryProba, colorProba) = model.predict(image)
# find indexes of both the category and color outputs with the
# largest probabilities, then determine the corresponding class
# labels
categoryIdx = categoryProba[0].argmax()
colorIdx = colorProba[0].argmax()
categoryLabel = categoryLB.classes_[categoryIdx]
colorLabel = colorLB.classes_[colorIdx]
# draw the category label and color label on the image
categoryText = "category: {} ({:.2f}%)".format(categoryLabel, categoryProba[0][categoryIdx] * 100)
colorText = "color: {} ({:.2f}%)".format(colorLabel, colorProba[0][colorIdx] * 100)
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# display the predictions to the terminal as well
print("[INFO] {}".format(categoryText))
print("[INFO] {}".format(colorText))
# show the output image
cv2.imshow("Output", output)
cv2.waitKey(0)
|
3,083 | 4f8bc19bb113c9eac7c2ac774ac7b16f569d9704 | # operatorTest02.py
x = 5
x += 3 #복함 대입 연산자
print("x : ", x)
print("-"*30)
total = 0
total += 1
total |
3,084 | 339506777f5471ec99b39c67c28df8ec3d06ce19 | from django.shortcuts import render,redirect
from . import download_function
from django.http import HttpResponse
# Create your views here.
def download(request):
if request.method == "GET":
session = request.GET['session']
title = request.GET['download_title']
download_quality = request.GET['download_quality']
file_url = download_function.download_generator(session,download_quality,title)
return HttpResponse(file_url)
|
3,085 | 3b803850418638bf65528088044918e93ecabff6 | class BucketSort:
def __init__(self,a):
self.a = a
def result(self,bucketCount = 10):
buckets = [[] for i in range(bucketCount+1)]
maxElement = max(self.a)
minElement = min(self.a)
bucketRange = (maxElement-minElement+1)/bucketCount
for i in range(len(self.a)):
bucketIndex = int((self.a[i]-minElement)/bucketRange)
buckets[bucketIndex].append(self.a[i])
for i in range(len(buckets)):
buckets[i] = sorted(buckets[i])
self.a = []
for bucket in buckets:
self.a.extend(bucket)
return self.a
|
3,086 | 82abed3a60829eeabf6b9e8b791085d130ec3dd4 | #Purpose: find the bonds, angles in Zr/GPTMS .xyz outpuf file from simulation
from Tkinter import Tk
from tkFileDialog import askopenfilename
Tk().withdraw()
from pylab import *
from scipy import *
from numpy import *
import numpy as np
import math
################################################################################
################################################################################
def distance(x1,x2,y1,y2,z1,z2):
d = ((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2)**0.5
return d
def dist(dx,dy,dz):
d = (dx**2+dy**2+dz**2)**0.5
return d
def magnitude(x,y,z):
mag = (x**2 + y**2 + z**2)**0.5
return mag
def angle(x1,x2,y1,y2,z1,z2):
mag1 = magnitude(x1,y1,z1)
mag2 = magnitude(x2,y2,z2)
theta = math.acos((x1*x2+y1*y2+z1*z2)/(mag1*mag2))*(180.0/math.pi)
return theta
def vector(x1,x2,y1,y2,z1,z2):
vx = x1-x2
vy = y1-y2
vz = z1-z2
return [vx,vy,vz]
def unique_atoms(atoms_involved):
atoms_unique = []
for x in atoms_involved:
if x not in atoms_unique:
atoms_unique.append(x)
return atoms_unique
def find_repeat(index1,index2,index3,index4,atom1,atom2,atom3,atom4):
if index1==index2:
return [atom1,atom3,atom4] #first index is center
elif index1==index3:
return [atom1,atom2,atom4]
elif index1==index4:
return [atom1,atom2,atom3]
elif index2==index3:
return [atom2,atom1,atom4]
elif index2==index4:
return [atom2,atom1,atom3]
elif index3==index4:
return [atom3,atom1,atom2]
#bin the data using the histogram function from numpy
def get_histogram_data(data_list,N_bins):
bins = linspace(min(data_list)-1,max(data_list)+1,N_bins+1)
events, edges = histogram(data_list,bins)
lower = resize(edges, len(edges)-1)
tmid = lower + 0.5*diff(edges)
most_prob_event = max(events)
for i in range(len(events)):
if most_prob_event == events[i]:
index = i
most_prob_tmid = tmid[index]
return [tmid,events,most_prob_tmid]
def lists_overlap(a,b):
overlap = []
for i in a:
if i in b:
overlap.append(i)
return overlap
def remove_overlap(intersection,a):
difference = []
for i in a:
if i not in intersection:
difference.append(i)
return difference
################################################################################
################################################################################
#import data
filename = askopenfilename()
print "Working with file:", filename
atom_data = []
with open(filename) as inputfile:
for line in inputfile:
atom_data.append(line.strip().split( ))
N_bins_bonds = input('How many bins do you want for the bond histrogram? Note: on average,\
10 bins will correspond to 0.1 Angstroms) ')
N_bins_angles = input('How many bins do you want for the angle histogram? Note: on average,\
25 bins will correspond to 0.1 degrees) ')
####Lx = input('What is the final x-dimension of the simulation cell? ')
##
##N_O = input('How many oxygen atoms are in the simulation? ')
##N_Zr = input('How many Zr atoms are in the simulation? ')
##N_silane = input('How many silane molecules are in the simulation? ')
##
##N_C = N_silane*6
##N_Si = N_silane
##N_Q = N_silane
#access number of atoms from file and remove first 2 lines
N_atoms = int(atom_data[0][0])
atom_data = atom_data[2:]
#get atom type indicies
O_atom_index = []
Zr_atom_index = []
Si_atom_index = []
C_atom_index = []
Q_atom_index = []
for i in range(len(atom_data)):
if atom_data[i][0]=='2':
O_atom_index.append(int(i))
elif atom_data[i][0]=='1':
Si_atom_index.append(int(i))
elif atom_data[i][0]=='3':
C_atom_index.append(int(i))
elif atom_data[i][0]=='5':
Q_atom_index.append(int(i))
elif atom_data[i][0]=='4':
Zr_atom_index.append(int(i))
N_O = len(O_atom_index)
N_Zr = len(Zr_atom_index)
N_silane = len(Si_atom_index)
N_Si = len(Si_atom_index)
N_Q = len(Q_atom_index)
N_C = len(C_atom_index)
print N_O
print N_Zr
print N_silane
print N_Si
print N_Q
print N_C
#transpose data into columns
atom_data = [list(x) for x in zip(*atom_data)]
for i in range(len(atom_data)):
for j in range(N_atoms):
atom_data[i][j] = float(atom_data[i][j])
#create first column to index the atoms (0 to N_atoms-1)
atom_index = []
for i in range(N_atoms):
atom_index.append(i)
#access column to get specific data
atom_type = atom_data[0]
x_coord = atom_data[1]
y_coord = atom_data[2]
z_coord = atom_data[3]
x_min = min(x_coord)
x_max = max(x_coord)
Lx = x_max - x_min
y_min = min(y_coord)
y_max = max(y_coord)
Ly = y_max - y_min
z_min = min(z_coord)
z_max = max(z_coord)
Lz = z_max - z_min
##print Lx,Ly,Lz
print ''
print 'Data imported!'
################################################################################
#Find bonds in data; 1=Si, 2=O, 3=C, 4=Zr, 5=Q; rows in data will be
# [index1, index2, atom1, atom2]
print ''
print 'Finding bonds...'
CC_bonds = []
CC_bonds_across_BC = []
CQ_bonds = []
CQ_bonds_across_BC = []
CSi_bonds = []
CSi_bonds_across_BC = []
SiO_bonds = []
SiO_bonds_across_BC = []
QQ_bonds = []
QQ_bonds_across_BC = []
ZrO_bonds = []
ZrO_bonds_across_BC = []
bond_index = []
for i in range(N_atoms):
index1 = atom_index[i]
atom1 = atom_type[i]
x1 = x_coord[i]
y1 = y_coord[i]
z1 = z_coord[i]
#to account for the PBC's, check if the distance between atoms
# is greater than Lx/2. If so, translate the larger coord by Lx.
for j in range(i+1,N_atoms):
index2 = atom_index[j]
atom2 = atom_type[j]
x2 = x_coord[j]
y2 = y_coord[j]
z2 = z_coord[j]
dx = abs(x1-x2)
dy = abs(y1-y2)
dz = abs(z1-z2)
#determine C-C bonds (only bonded on chains); cutoff 2.0A
if atom1==3 and atom2==3:
## if abs(index1-index2)<10: #only for non-parallel computation
d = dist(dx,dy,dz)
if d<=2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds.append(index_atom)
#account for PBCs
if dx > Lx-2.0:
d = dist(dx+Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds_across_BC.append(index_atom)
d = dist(dx-Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds_across_BC.append(index_atom)
if dy > Ly-2.0:
d = dist(dx,dy+Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds_across_BC.append(index_atom)
d = dist(dx,dy-Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds_across_BC.append(index_atom)
if dz > Lz-2.0:
d = dist(dx,dy,dz+Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds_across_BC.append(index_atom)
d = dist(dx,dy,dz-Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CC_bonds_across_BC.append(index_atom)
## d = distance(x1,x2,y1,y2,z1,z2)
## if d<=2.0:
## atoms = sorted([atom1,atom2])
## indicies = sorted([index1,index2])
## index_atom = indicies + atoms + [d]
## if index_atom not in CC_bonds:
## CC_bonds.append(index_atom)
##
## elif d>Lx/2:
## if abs(x1-x2)>Lx/2:
## if x1>x2:
## x1=x1-Lx
## else:
## x2=x2-Lx
## if abs(y1-y2)>Lx/2:
## if y1>y2:
## y1=y1-Lx
## else:
## y2=y2-Lx
## if abs(z1-z2)>Lx/2:
## if z1>z2:
## z1=z1-Lx
## else:
## z2=z2-Lx
##
## d = distance(x1,x2,y1,y2,z1,z2)
## if d<=2.0:
## atoms = sorted([atom1,atom2])
## indicies = sorted([index1,index2])
## index_atom = indicies + atoms + [d]
## if index_atom not in CC_bonds_across_BC:
## CC_bonds_across_BC.append(index_atom)
#determine C-Q bonds (next to each other in chain); cutoff 2.0A
elif (atom1==3 and atom2==5) or (atom1==5 and atom2==3):
## if abs(index1-index2)==1:
d = dist(dx,dy,dz)
if d<=2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds.append(index_atom)
#account for PBCs
if dx > Lx-2.0:
d = dist(dx+Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds_across_BC.append(index_atom)
d = dist(dx-Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds_across_BC.append(index_atom)
if dy > Ly-2.0:
d = dist(dx,dy+Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds_across_BC.append(index_atom)
d = dist(dx,dy-Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds_across_BC.append(index_atom)
if dz > Lz-2.0:
d = dist(dx,dy,dz+Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds_across_BC.append(index_atom)
d = dist(dx,dy,dz-Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CQ_bonds_across_BC.append(index_atom)
## d = distance(x1,x2,y1,y2,z1,z2)
## if d<=2.0:
## atoms = sorted([atom1,atom2])
## indicies = sorted([index1,index2])
## index_atom = indicies + atoms + [d]
## if index_atom not in CQ_bonds:
## CQ_bonds.append(index_atom)
##
## elif d>Lx/2:
## if abs(x1-x2)>Lx/2:
## if x1>x2:
## x1=x1-Lx
## else:
## x2=x2-Lx
## if abs(y1-y2)>Lx/2:
## if y1>y2:
## y1=y1-Lx
## else:
## y2=y2-Lx
## if abs(z1-z2)>Lx/2:
## if z1>z2:
## z1=z1-Lx
## else:
## z2=z2-Lx
##
## d = distance(x1,x2,y1,y2,z1,z2)
## if d<=2.0:
## atoms = sorted([atom1,atom2])
## indicies = sorted([index1,index2])
## index_atom = indicies + atoms + [d]
## if index_atom not in CQ_bonds_across_BC:
## CQ_bonds_across_BC.append(index_atom)
#determine C-Si bonds (next to each other in chain); cutoff 2.3A
elif (atom1==3 and atom2==1) or (atom1==1 and atom2==3):
## if abs(index1-index2)==1:
d = dist(dx,dy,dz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds.append(index_atom)
#account for PBCs
if dx > Lx-2.3:
d = dist(dx+Lx,dy,dz)
if d <= 2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds_across_BC.append(index_atom)
d = dist(dx-Lx,dy,dz)
if d <= 2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds_across_BC.append(index_atom)
if dy > Ly-2.3:
d = dist(dx,dy+Ly,dz)
if d <= 2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds_across_BC.append(index_atom)
d = dist(dx,dy-Ly,dz)
if d <= 2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds_across_BC.append(index_atom)
if dz > Lz-2.3:
d = dist(dx,dy,dz+Lz)
if d <= 2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds_across_BC.append(index_atom)
d = dist(dx,dy,dz-Lz)
if d <= 2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
CSi_bonds_across_BC.append(index_atom)
## d = distance(x1,x2,y1,y2,z1,z2)
## if d<=2.3:
## atoms = sorted([atom1,atom2])
## indicies = sorted([index1,index2])
## index_atom = indicies + atoms + [d]
## if index_atom not in CSi_bonds:
## CSi_bonds.append(index_atom)
##
## elif d>Lx/2:
## if abs(x1-x2)>Lx/2:
## if x1>x2:
## x1=x1-Lx
## else:
## x2=x2-Lx
## if abs(y1-y2)>Lx/2:
## if y1>y2:
## y1=y1-Lx
## else:
## y2=y2-Lx
## if abs(z1-z2)>Lx/2:
## if z1>z2:
## z1=z1-Lx
## else:
## z2=z2-Lx
##
## d = distance(x1,x2,y1,y2,z1,z2)
## if d<=2.3:
## atoms = sorted([atom1,atom2])
## indicies = sorted([index1,index2])
## index_atom = indicies + atoms + [d]
## if index_atom not in CSi_bonds_across_BC:
## CSi_bonds_across_BC.append(index_atom)
#determine Si-O bonds; cutoff at 2.0A; as a slight overestimate, we can say
# that if the difference in the x-coords is <= 2.0 (we assume that y and z
# coord are 0) then we will calculate the distance
elif (atom1==1 and atom2==2) or (atom1==2 and atom2==1):
d = dist(dx,dy,dz)
if d<=2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds.append(index_atom)
#account for PBCs
if dx > Lx-2.0:
d = dist(dx+Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds_across_BC.append(index_atom)
d = dist(dx-Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds_across_BC.append(index_atom)
if dy > Ly-2.0:
d = dist(dx,dy+Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds_across_BC.append(index_atom)
d = dist(dx,dy-Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds_across_BC.append(index_atom)
if dz > Lz-2.0:
d = dist(dx,dy,dz+Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds_across_BC.append(index_atom)
d = dist(dx,dy,dz-Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
SiO_bonds_across_BC.append(index_atom)
#determine Q-Q bonds; cuttoff at 2.0A; take difference in x-coord <=2.0
elif (atom1==5 and atom2==5):
d = dist(dx,dy,dz)
if d<=2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds.append(index_atom)
#account for PBCs
if dx > Lx-2.0:
d = dist(dx+Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds_across_BC.append(index_atom)
d = dist(dx-Lx,dy,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds_across_BC.append(index_atom)
if dy > Ly-2.0:
d = dist(dx,dy+Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds_across_BC.append(index_atom)
d = dist(dx,dy-Ly,dz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds_across_BC.append(index_atom)
if dz > Lz-2.0:
d = dist(dx,dy,dz+Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds_across_BC.append(index_atom)
d = dist(dx,dy,dz-Lz)
if d <= 2.0:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
QQ_bonds_across_BC.append(index_atom)
#determine Zr-O bonds; cutoff at 2.3A (Note: clustering affect will
# change the amount of calculated bonds - some atoms within cutoff that
# arnt actually bonded - source of error)
elif (atom1==2 and atom2==4) or (atom1==4 and atom2==2):
d = dist(dx,dy,dz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds.append(index_atom)
#account for PBCs
if dx > Lx-2.3:
d = dist(dx+Lx,dy,dz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds_across_BC.append(index_atom)
d = dist(dx-Lx,dy,dz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds_across_BC.append(index_atom)
if dy > Ly-2.3:
d = dist(dx,dy+Ly,dz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds_across_BC.append(index_atom)
d = dist(dx,dy-Ly,dz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds_across_BC.append(index_atom)
if dz > Lz-2.3:
d = dist(dx,dy,dz+Lz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds_across_BC.append(index_atom)
d = dist(dx,dy,dz-Lz)
if d<=2.3:
atoms = sorted([atom1,atom2])
indicies = sorted([index1,index2])
index_atom = indicies + atoms + [d]
if indicies not in bond_index:
bond_index.append(indicies)
ZrO_bonds_across_BC.append(index_atom)
bonds_data = CC_bonds+CC_bonds_across_BC+CQ_bonds+CQ_bonds_across_BC+\
CSi_bonds+CSi_bonds_across_BC+SiO_bonds+SiO_bonds_across_BC+\
QQ_bonds+QQ_bonds_across_BC+ZrO_bonds+ZrO_bonds_across_BC
bonds_data_no_BC = CC_bonds+CQ_bonds+CSi_bonds+SiO_bonds+QQ_bonds+ZrO_bonds
N_CC_bonds = len(CC_bonds)
N_CC_bonds_across_BC = len(CC_bonds_across_BC)
N_CC_tot = N_CC_bonds+N_CC_bonds_across_BC
N_CQ_bonds = len(CQ_bonds)
N_CQ_bonds_across_BC = len(CQ_bonds_across_BC)
N_CQ_tot = N_CQ_bonds+N_CQ_bonds_across_BC
N_CSi_bonds = len(CSi_bonds)
N_CSi_bonds_across_BC = len(CSi_bonds_across_BC)
N_CSi_tot = N_CSi_bonds+N_CSi_bonds_across_BC
N_SiO_bonds = len(SiO_bonds)
N_SiO_bonds_across_BC = len(SiO_bonds_across_BC)
N_SiO_tot = N_SiO_bonds+N_SiO_bonds_across_BC
N_QQ_bonds = len(QQ_bonds)
N_QQ_bonds_across_BC = len(QQ_bonds_across_BC)
N_QQ_tot = N_QQ_bonds+N_QQ_bonds_across_BC
N_ZrO_bonds = len(ZrO_bonds)
N_ZrO_bonds_across_BC = len(ZrO_bonds_across_BC)
N_ZrO_tot = N_ZrO_bonds+N_ZrO_bonds_across_BC
#for clustering algorithm, the indicies of the Zr-O bonds and Si-O bonds are needed
N_CC_CQ_CSi = N_CC_tot + N_CQ_tot + N_CSi_tot
SiO_index = [str(N_CC_CQ_CSi), str(N_CC_CQ_CSi+N_SiO_tot)]
N_CC_CQ_CSi_SiO_QQ = N_CC_tot + N_CQ_tot + N_CSi_tot + N_SiO_tot + N_QQ_tot
ZrO_index = [str(N_CC_CQ_CSi_SiO_QQ), str(N_CC_CQ_CSi_SiO_QQ+N_ZrO_tot)]
#write out bond data
for bond in bonds_data:
bond[0]=str(bond[0])
bond[1]=str(bond[1])
bond[2]=str(bond[2])
bond[3]=str(bond[3])
bond[4]=str(bond[4])
all_bond_data = [SiO_index]+[ZrO_index]+bonds_data
dataFile = open("bonds.txt", 'w')
for eachitem in all_bond_data:
dataFile.write("\t".join(eachitem)+'\n')
dataFile.close()
print 'Bond data written to file!'
#change bond data back to int/float
for bond in bonds_data:
bond[0]=int(bond[0])
bond[1]=int(bond[1])
bond[2]=int(float(bond[2]))
bond[3]=int(float(bond[3]))
bond[4]=float(bond[4])
#find number of types of bonds and transpose data to columns
CC_bonds_tot = CC_bonds+CC_bonds_across_BC
CC_bonds_tot = [list(x) for x in zip(*CC_bonds_tot)]
CC_bonds = [list(x) for x in zip(*CC_bonds)]
CQ_bonds_tot = CQ_bonds+CQ_bonds_across_BC
CQ_bonds_tot = [list(x) for x in zip(*CQ_bonds_tot)]
CQ_bonds = [list(x) for x in zip(*CQ_bonds)]
CSi_bonds_tot = CSi_bonds+CSi_bonds_across_BC
CSi_bonds_tot = [list(x) for x in zip(*CSi_bonds_tot)]
CSi_bonds = [list(x) for x in zip(*CSi_bonds)]
SiO_bonds_tot = SiO_bonds+SiO_bonds_across_BC
SiO_bonds_tot = [list(x) for x in zip(*SiO_bonds_tot)]
SiO_bonds = [list(x) for x in zip(*SiO_bonds)]
QQ_bonds_tot = QQ_bonds+QQ_bonds_across_BC
QQ_bonds_tot = [list(x) for x in zip(*QQ_bonds_tot)]
QQ_bonds = [list(x) for x in zip(*QQ_bonds)]
ZrO_bonds_tot = ZrO_bonds+ZrO_bonds_across_BC
ZrO_bonds_tot = [list(x) for x in zip(*ZrO_bonds_tot)]
ZrO_bonds = [list(x) for x in zip(*ZrO_bonds)]
print 'bonds found!'
#find the number of non-bonded O, non-bridging O (only connected to 1 ion), and connected O
print ''
print 'Finding number of non-bonded O, non-bridging O, and bridging O'
N_bonds = len(bonds_data)
N_bonds_no_BC = len(bonds_data_no_BC)
bonds_data = [list(x) for x in zip(*bonds_data)]
bonds_data_no_BC = [list(x) for x in zip(*bonds_data_no_BC)]
nonbonded_O = 0
for index in O_atom_index:
if index not in bonds_data[0] and index not in bonds_data[1]:
nonbonded_O=nonbonded_O+1
nonbridging_O=0
nonbridging_O_Si=0
nonbridging_O_Zr=0
bridging_O=0
bridging_O_Si=0
bridging_O_Zr=0
O_coord_3=0
O_coord_3_Si=0
O_coord_3_Zr=0
O_coord_4=0
O_coord_4_Si=0
O_coord_4_Zr=0
O_coord_5=0
O_coord_5_Si=0
O_coord_5_Zr=0
SiOZr_bridging=0
SiOZr_bridging_coord_3=0
index1=bonds_data[0]
index2=bonds_data[1]
atom1=bonds_data[2]
atom2=bonds_data[3]
for i in range(len(O_atom_index)):
k=0
l=0
m=0
for j in range(len(index1)):
if O_atom_index[i]==index1[j] or O_atom_index[i]==index2[j]:
m=m+1
if atom1[j]==2: #O before Zr from sorted()
k=k+1
if atom2[j]==2: #Si before O from sorted()
l=l+1
if m==1:
nonbridging_O=nonbridging_O+1
elif m==2:
bridging_O=bridging_O+1
elif m==3:
O_coord_3=O_coord_3+1
elif m==4:
O_coord_4=O_coord_4+1
elif m==5:
O_coord_5=O_coord_5+1
if l==1 and k==0:
nonbridging_O_Si=nonbridging_O_Si+1
elif l==2 and k==0:
bridging_O_Si=bridging_O_Si+1
elif l==3 and k==0:
O_coord_3_Si=O_coord_3_Si+1
elif l==4 and k==0:
O_coord_4_Si=O_coord_4_Si+1
elif l==5 and k==0:
O_coord_5_Si=O_coord_5_Si+1
elif k==1 and l==0:
nonbridging_O_Zr=nonbridging_O_Zr+1
elif k==2 and l==0:
bridging_O_Zr=bridging_O_Zr+1
elif k==3 and l==0:
O_coord_3_Zr=O_coord_3_Zr+1
elif k==4 and l==0:
O_coord_4_Zr=O_coord_4_Zr+1
elif k==5 and l==0:
O_coord_5_Zr=O_coord_5_Zr+1
elif l==1 and k==1:
SiOZr_bridging=SiOZr_bridging+1
elif l==1 and k==2 or l==2 and k==1:
SiOZr_bridging_coord_3=SiOZr_bridging_coord_3+1
percent_nonbonded_O = float(nonbonded_O)/float(N_O)
percent_nonbridging_O = float(nonbridging_O)/float(N_O)
percent_bridging_O = float(bridging_O)/float(N_O)
percent_interface = float(SiOZr_bridging+SiOZr_bridging_coord_3)/float(N_O)
percent_silane_to_ZrO = float(SiOZr_bridging+SiOZr_bridging_coord_3)/float(N_silane)
#Find percent of oxygen atoms that formed bonds
#find unique atoms in Si-O and Zr-O bond list; Note: for SiO index[0]=Si, index[1]=O
# and for ZrO, index[0]=O index[1]=Zr
O_atoms_in_SiO = SiO_bonds_tot[1]
O_atoms_in_SiO = unique_atoms(O_atoms_in_SiO)
##O_atoms_in_ZrO = ZrO_bonds_tot[0]
##O_atoms_in_ZrO = unique_atoms(O_atoms_in_ZrO)
##
###find intersection of SiO and ZrO lists
##O_atoms_at_interface = lists_overlap(O_atoms_in_SiO,O_atoms_in_ZrO)
##
###remove intersection from SiO and ZrO lists
##O_atoms_in_SiO = remove_overlap(O_atoms_at_interface,O_atoms_in_SiO)
##O_atoms_in_ZrO = remove_overlap(O_atoms_at_interface,O_atoms_in_ZrO)
N_O_atoms_in_SiO = float(len(O_atoms_in_SiO))
##N_O_atoms_in_ZrO = float(len(O_atoms_in_ZrO))
percent_SiO = N_O_atoms_in_SiO/float(N_O)
##percent_ZrO = N_O_atoms_in_ZrO/float(N_O)
#find percent of silane molecules that bonded via Q-Q bond
Q_atoms_in_QQbonds = []
for i in range(N_QQ_tot):
index1 = QQ_bonds_tot[0][i]
index2 = QQ_bonds_tot[1][i]
if index1 not in Q_atoms_in_QQbonds:
Q_atoms_in_QQbonds.append(index1)
if index2 not in Q_atoms_in_QQbonds:
Q_atoms_in_QQbonds.append(index2)
N_Q_atoms_in_QQbonds = float(len(Q_atoms_in_QQbonds))
percent_silane_via_QQ = N_Q_atoms_in_QQbonds/float(N_silane)
#find average size of ZrO cluster
print 'Oxygen atoms statistics finished!'
################################################################################
#create bond bins and then calculate the most probable bonds
print ''
print 'Caclulating bond bins and most probable bond lengths...'
CC_hist_data = get_histogram_data(CC_bonds_tot[4],N_bins_bonds)
CQ_hist_data = get_histogram_data(CQ_bonds_tot[4],N_bins_bonds)
CSi_hist_data = get_histogram_data(CSi_bonds_tot[4],N_bins_bonds)
SiO_hist_data = get_histogram_data(SiO_bonds_tot[4],N_bins_bonds)
QQ_hist_data = get_histogram_data(QQ_bonds_tot[4],N_bins_bonds)
##ZrO_hist_data = get_histogram_data(ZrO_bonds_tot[4],N_bins_bonds)
high_prob_CC = CC_hist_data[2]
high_prob_CQ = CQ_hist_data[2]
high_prob_CSi = CSi_hist_data[2]
high_prob_SiO = SiO_hist_data[2]
high_prob_QQ = QQ_hist_data[2]
##high_prob_ZrO = ZrO_hist_data[2]
print 'Most probable bond lengths obtained!'
################################################################################
#plot bond RDFs
print ''
print 'Plotting bond distribution functions...'
plt.figure(8)
plt.plot(CC_hist_data[0],CC_hist_data[1],'k')
plt.ylabel('g(r)')
plt.xlabel('Distance, r (A)')
plt.title('C-C RDF')
plt.savefig('CC_bonds.png')
plt.figure(9)
plt.plot(CQ_hist_data[0],CQ_hist_data[1],'k')
plt.ylabel('g(r)')
plt.xlabel('Distance, r (A)')
plt.title('C-Q RDF')
plt.savefig('CQ_bonds.png')
plt.figure(10)
plt.plot(CSi_hist_data[0],CSi_hist_data[1],'g')
plt.ylabel('g(r)')
plt.xlabel('Distance, r (A)')
plt.title('C-Si RDF')
plt.savefig('CSi_bonds.png')
plt.figure(11)
plt.plot(SiO_hist_data[0],SiO_hist_data[1],'b')
plt.ylabel('g(r)')
plt.xlabel('Distance, r (A)')
plt.title('Si-O RDF')
plt.savefig('SiO_bonds.png')
plt.figure(12)
plt.plot(QQ_hist_data[0],QQ_hist_data[1],'m')
plt.ylabel('g(r)')
plt.xlabel('Distance, r (A)')
plt.title('Q-Q RDF')
plt.savefig('QQ_bonds.png')
##plt.figure(13)
##plt.plot(ZrO_hist_data[0],ZrO_hist_data[1],'r')
##plt.ylabel('g(r)')
##plt.xlabel('Distance, r (A)')
##plt.title('Zr-O RDF')
##plt.savefig('ZrO_bonds.png')
print 'RDFs obtained!'
################################################################################
#from bond data, represent bonds as vectors; then for vectors that share a
# common atom, calculate the angle between them
print ''
print 'Calculating bond vectors...'
vectors = []
for i in range(len(bonds_data_no_BC[0])):
index1=bonds_data_no_BC[0][i]
index2=bonds_data_no_BC[1][i]
atom1=bonds_data_no_BC[2][i]
atom2=bonds_data_no_BC[3][i]
x1=x_coord[index1]
y1=y_coord[index1]
z1=z_coord[index1]
x2=x_coord[index2]
y2=y_coord[index2]
z2=z_coord[index2]
v=vector(x1,x2,y1,y2,z1,z2)
v_index = v+[index1,index2,atom1,atom2]
vectors.append(v_index)
N_vectors = len(vectors)
N_v_index = len(zip(*vectors))
vectors = [list(x) for x in zip(*vectors)]
print 'Bond vectors obtained!'
print N_vectors
print N_bonds_no_BC
print N_bonds
################################################################################
#calculate the angles; vectors must share a common atom to calculate angle
print ''
print 'Calculating the angles... '
CCC_angles = []
CQQ_angles = []
CCQ_angles = []
CCSi_angles = []
CSiO_angles = []
SiOZr_angles = []
OZrO_angles = []
SiOSi_angles = []
OSiO_angles = []
ZrOZr_angles = []
for i in range(N_vectors):
x1 = vectors[0][i]
y1 = vectors[1][i]
z1 = vectors[2][i]
index1=vectors[3][i]
index2=vectors[4][i]
atom1=vectors[5][i]
atom2=vectors[6][i]
for j in range(i+1,N_vectors):
x2 = vectors[0][j]
y2 = vectors[1][j]
z2 = vectors[2][j]
index3=vectors[3][j]
index4=vectors[4][j]
atom3=vectors[5][j]
atom4=vectors[6][j]
if index1==index3 or index1==index4 or index2==index3 or index2==index4:
atoms_involved = unique_atoms([index1,index2,index3,index4])
if len(atoms_involved)==3:
theta=angle(x1,x2,y1,y2,z1,z2)
if theta < 100:
theta = 180.0-theta
#determine the center atom by finding the repeat atom
nonrepeat_list = find_repeat(index1,index2,index3,index4,\
atom1,atom2,atom3,atom4)
center_atom = nonrepeat_list[0]
Latom = nonrepeat_list[1]
Ratom = nonrepeat_list[2]
if center_atom == 3:
#C-C-C angles
if Latom==3 and Ratom==3:
CCC_angles.append(theta)
#C-C-Q angles
elif (Latom==3 or Latom==5) and (Ratom==5 or Ratom==3):
CCQ_angles.append(theta)
#C-C-Si angles
elif (Latom==3 or Latom==1) and (Ratom==1 or Ratom==3):
CCSi_angles.append(theta)
#C-Q-Q angles
elif center_atom == 5:
if (Latom==3 or Latom==5) and (Ratom==5 or Ratom==3):
CQQ_angles.append(theta)
elif center_atom == 1:
#C-Si-O angles
if (Latom==3 or Latom==2) and (Ratom==3 or Ratom==2):
CSiO_angles.append(theta)
#O-Si-O angles
if (Latom==2 and Ratom==2):
OSiO_angles.append(theta)
elif center_atom == 2:
## #Si-O-Zr angles
## if (Latom==1 or Latom==4) and (Ratom==4 or Ratom==1):
## SiOZr_angles.append(theta)
#Si-O-Si angles
if (Latom==1 and Ratom==1):
SiOSi_angles.append(theta)
## #Zr-O-Zr angles
## if (Latom==4 and Ratom==4):
## ZrOZr_angles.append(theta)
##
## #O-Zr-O angles
## elif center_atom == 4:
## if Latom==2 and Ratom==2:
## OZrO_angles.append(theta)
N_CCC_angles = len(CCC_angles)
N_CQQ_angles = len(CQQ_angles)
N_CCQ_angles = len(CCQ_angles)
N_CCSi_angles = len(CCSi_angles)
N_CSiO_angles = len(CSiO_angles)
##N_SiOZr_angles = len(SiOZr_angles)
##N_OZrO_angles = len(OZrO_angles)
N_SiOSi_angles = len(SiOSi_angles)
N_OSiO_angles = len(OSiO_angles)
##N_ZrOZr_angles = len(ZrOZr_angles)
print 'Angles found!'
################################################################################
#Create bond distribution functions and plot them
print ''
print 'Cacluating the angle distributions and most probable angles...'
CCC_hist_data = get_histogram_data(CCC_angles,N_bins_angles)
CQQ_hist_data = get_histogram_data(CQQ_angles,N_bins_angles)
CCQ_hist_data = get_histogram_data(CCQ_angles,N_bins_angles)
CCSi_hist_data = get_histogram_data(CCSi_angles,N_bins_angles)
CSiO_hist_data = get_histogram_data(CSiO_angles,N_bins_angles)
##SiOZr_hist_data = get_histogram_data(SiOZr_angles,N_bins_angles)
##OZrO_hist_data = get_histogram_data(OZrO_angles,N_bins_angles)
SiOSi_hist_data = get_histogram_data(SiOSi_angles,N_bins_angles)
OSiO_hist_data = get_histogram_data(OSiO_angles,N_bins_angles)
##ZrOZr_hist_data = get_histogram_data(ZrOZr_angles,N_bins_angles)
high_prob_CCC = CCC_hist_data[2]
high_prob_CQQ = CQQ_hist_data[2]
high_prob_CCQ = CCQ_hist_data[2]
high_prob_CCSi = CCSi_hist_data[2]
high_prob_CSiO = CSiO_hist_data[2]
##high_prob_SiOZr = SiOZr_hist_data[2]
##high_prob_OZrO = OZrO_hist_data[2]
high_prob_SiOSi = SiOSi_hist_data[2]
high_prob_OSiO = OSiO_hist_data[2]
##high_prob_ZrOZr = ZrOZr_hist_data[2]
print 'Angle distributions and most probable angles finished!'
print ''
print 'Plotting angle distributions...'
plt.figure(1)
plt.plot(CCC_hist_data[0],CCC_hist_data[1],'k')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('C-C-C Angle Distribution')
plt.savefig('CCC_angles.png')
plt.figure(2)
plt.plot(CQQ_hist_data[0],CQQ_hist_data[1],'k')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('C-Q-Q Angle Distribution')
plt.savefig('CQQ_angles.png')
plt.figure(3)
plt.plot(CCQ_hist_data[0],CCQ_hist_data[1],'k')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('C-C-Q Angle Distribution')
plt.savefig('CCQ_angles.png')
plt.figure(4)
plt.plot(CCSi_hist_data[0],CCSi_hist_data[1],'g')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('C-Si-O Angle Distribution')
plt.savefig('CCSi_angles.png')
plt.figure(5)
plt.plot(CSiO_hist_data[0],CSiO_hist_data[1],'m')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('C-Si-O Angle Distribution')
plt.savefig('CSiO_angles.png')
##plt.figure(6)
##plt.plot(SiOZr_hist_data[0],SiOZr_hist_data[1],'b')
##plt.ylabel('g($\\theta$)')
##plt.xlabel('Angle, $\\theta$ (degrees)')
##plt.title('Si-O-Zr Angle Distribution')
##plt.savefig('SiOZr_angles.png')
##
##plt.figure(7)
##plt.plot(OZrO_hist_data[0],OZrO_hist_data[1],'r')
##plt.ylabel('$\\theta$')
##plt.xlabel('Angle, $\\theta$ (degrees)')
##plt.title('O-Zr-O Angle Distribution')
##plt.savefig('OZrO_angles.png')
plt.figure(14)
plt.plot(SiOSi_hist_data[0],SiOSi_hist_data[1],'g')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('Si-O-Si Angle Distribution')
plt.savefig('SiOSi_angles.png')
plt.figure(15)
plt.plot(OSiO_hist_data[0],OSiO_hist_data[1],'g')
plt.ylabel('g($\\theta$)')
plt.xlabel('Angle, $\\theta$ (degrees)')
plt.title('O-Si-O Angle Distribution')
plt.savefig('OSiO_angles.png')
##plt.figure(16)
##plt.plot(ZrOZr_hist_data[0],ZrOZr_hist_data[1],'b')
##plt.ylabel('$\\theta$')
##plt.xlabel('Angle, $\\theta$ (degrees)')
##plt.title('Zr-O-Zr Angle Distribution')
##plt.savefig('ZrOZr_angles.png')
print 'Plots finished!'
###############################################################################
#Create summary of results and then write to text file
print ''
print 'Creating summary file...'
atoms_in_sim = [['Number of atoms in the simulation: '+str(N_atoms)]]
bond_results = [['Bond Results:'],\
[''],\
['Total number of bonds: '+str(N_bonds)],\
[''],\
['Number of C-C bonds: '+str(N_CC_tot)],\
['Most probable C-C bond length: '+str(high_prob_CC)],\
['Number of C-C bonds across BCs: '+str(N_CC_bonds_across_BC)],\
[''],\
['Number of C-Q bonds: '+str(N_CQ_tot)],\
['Most probable C-Q bond length: '+str(high_prob_CQ)],\
['Number of C-Q bonds across BCs: '+str(N_CQ_bonds_across_BC)],\
[''],\
['Number of C-Si bonds: '+str(N_CSi_tot)],\
['Most probable C-Si bond length: '+str(high_prob_CSi)],\
['Number of C-Si bonds across BCs: '+str(N_CSi_bonds_across_BC)],\
[''],\
['Number of Si-O bonds: '+str(N_SiO_tot)],\
['Most probable Si-O bond length: '+str(high_prob_SiO)],\
['Number of Si-O bonds across BCs: '+str(N_SiO_bonds_across_BC)],\
[''],\
['Number of Q-Q bonds: '+str(N_QQ_tot)],\
['Most probable Q-Q bond length: '+str(high_prob_QQ)],\
['Number of Q-Q bonds across BCs: '+str(N_QQ_bonds_across_BC)]]
## [''],\
## ['Number of Zr-O bonds: '+str(N_ZrO_tot)],\
## ['Most probable Zr-O bond length: '+str(high_prob_ZrO)],\
## ['Number of Zr-O bonds across BCs: '+str(N_ZrO_bonds_across_BC)]]
O_bonding_results = [['Oxygen atoms reacting statistics: '],\
['Percent of Oxygen atoms that formed Si-O bonds: '+\
str(percent_SiO)],\
## ['Percent of Oxygen atoms that formed Zr-O bonds: '+\
## str(percent_ZrO)],\
## ['Percent of Oxygen atoms that formed Si-O-Zr (interface) bonds: '\
## +str(percent_interface)],\
[''],\
['Percent of non-bonded O: '+str(percent_nonbonded_O)],\
['Number of non-bonded O: '+str(nonbonded_O)],\
[''],\
['Percent of non-bridging O: '+str(percent_nonbridging_O)],\
['Number of non-bridging O: '+str(nonbridging_O)],\
['Number of non-bridging O with Si: '+str(nonbridging_O_Si)],\
## ['Number of non-bridging O with Zr: '+str(nonbridging_O_Zr)],\
[''],\
['Percent of bridging O: '+str(percent_bridging_O)],\
['Number of bridging O: '+str(bridging_O)],\
['Number of bridging O with Si: '+str(bridging_O_Si)],\
## ['Number of bridging O with Zr: '+str(bridging_O_Zr)],\
## ['Number of bridging O with Si and Zr: '+str(SiOZr_bridging)],\
## ['Number of bridging O with Si and Zr (coord 3): '+str(SiOZr_bridging_coord_3)],\
[''],\
['Number of O with coordination 3: '+str(O_coord_3)],\
['Number of O with coordination 3 with Si: '+str(O_coord_3_Si)],\
## ['Number of O with coordination 3 with Zr: '+str(O_coord_3_Zr)],\
[''],\
['Number of O with coordination 4: '+str(O_coord_4)],\
['Number of O with coordination 4 with Si: '+str(O_coord_4_Si)],\
## ['Number of O with coordination 4 with Zr: '+str(O_coord_4_Zr)],\
[''],\
['Number of O with coordination 5: '+str(O_coord_5)],\
['Number of O with coordination 5 with Si: '+str(O_coord_5_Si)]]
## ['Number of O with coordination 5 with Zr: '+str(O_coord_5_Zr)]]
Silane_bonding_results = [['Silane molecule reacting statistics: '],\
['Percent of silane molecules that bonded via Q-Q bonds: '\
+str(percent_silane_via_QQ)]]
## ['Percent of silane molecules that bonded to ZrO cluster '\
## +'via Si-O bonds: '+str(percent_silane_to_ZrO)]]
angle_results = [['Angle Results: '],\
[''],\
['Number of C-C-C angles: '+str(N_CCC_angles)],\
['Most probable C-C-C angle: '+str(high_prob_CCC)],\
[''],\
['Number of C-Q-Q angles: '+str(N_CQQ_angles)],\
['Most probable C-Q-Q angle: '+str(high_prob_CQQ)],\
[''],\
['Number of C-C-Q angles: '+str(N_CCQ_angles)],\
['Most probable C-C-Q angle: '+str(high_prob_CCQ)],\
[''],\
['Number of C-C-Si angles: '+str(N_CCSi_angles)],\
['Most probable C-C-Si angle: '+str(high_prob_CCSi)],\
[''],\
['Number of C-Si-O angles: '+str(N_CSiO_angles)],\
['Most probable C-Si-O angle: '+str(high_prob_CSiO)],\
## [''],\
## ['Number of Si-O-Zr angles: '+str(N_SiOZr_angles)],\
## ['Most probable Si-O-Zr angle: '+str(high_prob_SiOZr)],\
## [''],\
## ['Number of O-Zr-O angles: '+str(N_OZrO_angles)],\
## ['Most probable O-Zr-O angle: '+str(high_prob_OZrO)],\
[''],\
['Number of Si-O-Si angles: '+str(N_SiOSi_angles)],\
['Most probable Si-O-Si angle: '+str(high_prob_SiOSi)],\
[''],\
['Number of O-Si-O angles: '+str(N_OSiO_angles)],\
['Most probable O-Si-O angle: '+str(high_prob_OSiO)]]
## [''],\
## ['Number of Zr-O-Zr angles: '+str(N_ZrOZr_angles)],\
## ['Most probable Zr-O-Zr angles: '+str(high_prob_ZrOZr)]]
results = atoms_in_sim+[['']]+[['']]+bond_results+[['']]+O_bonding_results+\
[['']]+Silane_bonding_results+[['']]+[['']]+angle_results
################################################################################
#write to a text file
#the .join() method takes an array, i, and concantenates all the elements together
# with a space " " between each element. Then a newline "\n" is added to make sure
# your output is broken up into separate lines
dataFile = open(filename[:-4]+"_results.txt", 'w')
for eachitem in results:
dataFile.write("\t".join(eachitem)+'\n')
dataFile.close()
print "All done!"
|
3,087 | 779445aa22145d5076940ea5b214c25ad233dd0e | """This module provides constants for locale-dependent providers."""
import typing as t
from mimesis.enums import Locale
from mimesis.exceptions import LocaleError
__all__ = ["Locale", "validate_locale"]
def validate_locale(locale: t.Union[Locale, str]) -> Locale:
if isinstance(locale, str):
try:
return Locale(locale)
except ValueError:
raise LocaleError(locale)
if not isinstance(locale, Locale):
raise LocaleError(locale)
return locale
|
3,088 | 74843dea00a88513c3a9237eb024e1e14e8b1ff8 | """
实战练习:
1.打开网页
https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable
2.操作窗口右侧页面,将元素1拖拽到元素2
3.这时候会有一个alert弹框,点击弹框中的‘确定’
3.然后再按’点击运行’
4.关闭网页
"""
import pytest
from selenium import webdriver
from time import sleep
from selenium.webdriver import ActionChains
class TestFrame:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get("https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable")
self.driver.implicitly_wait(5)
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def test_frame(self):
# 检查要打印的元素,可以发现他们属于iframe元素,也就是需要先使用switch_to.frame("新frame的id")切换到对应的frame页
self.driver.switch_to.frame("iframeResult")
# 拖拽需要调用ActionChains方法
action=ActionChains(self.driver)
drag=self.driver.find_element_by_id("draggable")
drop=self.driver.find_element_by_id("droppable")
action.drag_and_drop(drag,drop).perform()
sleep(2)
# 拖拽完成后会弹出一个alert弹框,所以需要切换到alert,并调用.accept()进行确认操作
self.driver.switch_to.alert.accept()
# 点击确认后,alert弹框消失,默认还是在拖拽的iframe页面,接下来要点击运行,所以要再次进行切换
self.driver.switch_to.default_content() # 切换到默认frame,第一种方式
#self.driver.switch_to.parent_frame() # 切换到父frame第二种方式,两种方式都可以
self.driver.find_element_by_id("submitBTN").click()
sleep(3)
if __name__ == '__main__':
pytest.main()
|
3,089 | 0f37baf3b08ecf7bd8db43ecc2f29c3ca6e00af0 | version https://git-lfs.github.com/spec/v1
oid sha256:26be7fc8be181fad8e821179cce6be14e37a5f303e532e6fb00f848d5f33fe41
size 752
|
3,090 | a95e64877a1fc9f8109f1293b4ae9176f4f64647 | import requests
import json
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.types import VARCHAR,INT,FLOAT,BIGINT
import time
from tqdm import tqdm
#数据库联接设置
connect_info = 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4'
engine = create_engine(connect_info)
sql = '''
select * from smzdm;
'''
#从数据库中读取数据
df = pd.read_sql_query(sql, engine)
#排除字数小于5的评论
df_new = df[df['comment'].str.len()>=5]
#设置百度情感分析api
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'
response = requests.get(host)
if response:
print(response.json())
access_token = response.json()['access_token']
url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token
print(url)
headers={'Content-Type':'application/json'}
#情感分析函数
def sentiment(text):
global url
global headers
body={'text':text}
try:
r = requests.post(url,headers = headers,data=json.dumps(body))
dic=r.json()
except Exception as e:
print('分析失败')
pass
time.sleep(0.3)#设置分析频率,不设置引发QPS超限额错误
return dic['items'][0]['sentiment']
tqdm.pandas()
df_new_senti = df_new.copy()
df_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)#使用tqdm进度条
df_new_senti.sort_values(by='author',inplace=True)
df_new_senti['id']=df_new_senti.index
#保存到数据库
df_new_senti.to_sql(name = 'smzdm_senti',con = engine,if_exists = 'replace',index = False,dtype = {'id':BIGINT,'author': VARCHAR(length=255),'comment':VARCHAR(length=255),'sentiment':FLOAT(12,10)}) |
3,091 | b9608208f71f25ae05ed9bd7bdf94b8882a26e06 | # Generated by Django 2.1.4 on 2019-04-23 23:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('machine', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AboutRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),
('approved', models.BooleanField(default=False)),
('active', models.BooleanField(default=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-quantity',),
},
),
]
|
3,092 | 3f23a50f44ba17c9b0241a4e3b0e939afeb1f5f0 | from django import forms
class ListingForm(forms.Form):
text = forms.CharField(
max_length=50,
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Things to Buy"}
),
)
|
3,093 | 3e1ca6ed4668e75a62baa65ef44346dd86a16491 | import sqlite3
conn = sqlite3.connect("19-BD/prove.db")
cursor = conn.cursor()
dipendenti = [
("Sofia","commessa"),
("Diego","tecnico"),
("Lucia","cassiera"),
("Luca","Magazziniere"),
("Pablo","Capo reparto")
]
cursor.executemany("INSERT INTO persone VALUES (null,?,?)", dipendenti)
conn.commit()
conn.close() |
3,094 | 9a672c17ee22a05e77491bc1449c1c1678414a8c | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.animation as animation
import pylab
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
class Hexapod:
def __init__(self, axis):
"""
Инициализация начальных параметров системы
:param axis: ось вращения системы (x, y, z)
"""
self.axis = axis # ось вращения тела
self.alpha = 30. # угол между приводами в точке крпления
self.beta = 30. # двугранный угол между приводами и платформой
self.L = 1.5 # длина привода
self.h_c = 2. # высота центра масс тела
self.r = 1. # радиус тела
self.m_p = 1000. # масса платформы
self.m = 4000. # масса тела
self.nu = 0.5 # частота
# тензор инерции тела для решения обратной задачи
self.J = np.array([[5000, 0, 0],
[0, 5000, 0],
[0, 0, 3500]], np.float32)
# начальное положение точек крепления приводов на ВЕРХНЕЙ платформе
self.A_0 = np.round([[self.r*np.sin(2*np.pi/3*i + np.pi),
self.r*np.cos(2*np.pi/3*i + np.pi),
-self.h_c] for i in range(-1, 2)], 5)
# точки крепления приводов на НИЖНЕЙ платформе (const)
self.B = np.array([])
# положение точек крепления приводов на ВЕРХНЕЙ платформе за все время
self.A = np.array([])
# длина каждого привода за все время
self.all_full_lengths = np.array([])
# плечи сил приводов за все время
self.r = np.array([])
# ампилитуда вращения, закон изменения угла и его производные по OX
self.fi_x_0 = 4. # градусы
self.fi_x = lambda t: self.fi_x_0 * np.sin(2*np.pi*self.nu*t)
self.prime_fi_x = lambda t: self.fi_x_0 * 2*np.pi*self.nu * np.cos(2*np.pi*self.nu*t)
self.prime2_fi_x = lambda t: -self.fi_x_0 * (2*np.pi*self.nu)**2 * np.sin(2*np.pi*self.nu*t)
# ампилитуда вращения, закон изменения угла и его производные по OY и OZ
self.fi_y_0 = 4. # градусы
self.fi_y = lambda t: self.fi_y_0 * np.sin(2*np.pi*self.nu*t)
self.prime_fi_y = lambda t: self.fi_y_0 * 2*np.pi*self.nu * np.cos(2*np.pi*self.nu*t)
self.prime2_fi_y = lambda t: -self.fi_y_0 * (2*np.pi*self.nu)**2 * np.sin(2*np.pi*self.nu*t)
# матрица поворота вокруг оси OX
self.R_matrix_x = lambda t: np.round([[np.cos(self.fi_x(t)*np.pi/180.), -np.sin(self.fi_x(t)*np.pi/180.), 0],
[np.sin(self.fi_x(t)*np.pi/180.), np.cos(self.fi_x(t)*np.pi/180.), 0],
[0, 0, 1]], 5)
# матрица поворота вокруг оси OY
self.R_matrix_y = lambda t: np.round([[1, 0, 0],
[0, np.cos(self.fi_y(t)*np.pi/180.), -np.sin(self.fi_y(t)*np.pi/180.)],
[0, np.sin(self.fi_y(t)*np.pi/180.), np.cos(self.fi_y(t)*np.pi/180.)]], 5)
# матрица поворота вокруг оси OZ
self.R_matrix_z = lambda t: np.round([[np.cos(self.fi_y(t)*np.pi/180.), 0, np.sin(self.fi_y(t)*np.pi/180.)],
[0, 1, 0],
[-np.sin(self.fi_y(t)*np.pi/180.), 0, np.cos(self.fi_y(t)*np.pi/180.)]], 5)
# для построения геометрии точек B
self.H = np.cos(np.pi/180. * self.beta) * np.cos(np.pi/180. * self.alpha/2) * self.L
self.h = self.L * np.cos(np.pi/180.*self.alpha/2) * np.sin(np.pi/180.*self.beta)
self.a = self.L * np.sin(np.pi/180.*self.alpha/2) # основание треугольника
self.r = (self.h**2 + self.a**2)**0.5
# отсчет времени для расчета законов
self.end_time = 2.0
self.start_time = 0.
self.steps = 100
self.time = np.linspace(self.start_time, self.end_time, self.steps)
# связь индексов нижней и верхней платформы
self.indexes = [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]]
def set_B(self):
"""
Расчет геометрии стенда - положение точек B_i.
Задается: self.B
:return: None
"""
for i, A in enumerate(self.A_0):
a = A[:2]
b1 = np.array([self.h, self.a])
b2 = np.array([self.h, - self.a])
kappa = np.array([[np.cos(np.pi / 180 * (30-120*i)), -np.sin(np.pi / 180 * (30-120*i))],
[np.sin(np.pi / 180 * (30-120*i)), np.cos(np.pi / 180 * (30-120*i))]])
p1 = np.dot(kappa, b1) + a
p2 = np.dot(kappa, b2) + a
p1 = np.append(p1, - self.H - self.h_c)
p2 = np.append(p2, - self.H - self.h_c)
self.B = np.hstack((self.B, p1))
self.B = np.hstack((self.B, p2))
self.B = self.B.reshape(6, 3)
# проверка длин приводов
i = 0
for A in self.A_0:
assert np.linalg.norm(np.subtract(A, self.B[i])) - self.L <= 1e-4
assert np.linalg.norm(np.subtract(A, self.B[i + 1])) - self.L <= 1e-4
# print(np.linalg.norm(np.subtract(A, self.B[i])))
# print(np.linalg.norm(np.subtract(A, self.B[i + 1])))
i += 2
def get_delta_L(self):
"""
Расчет геометрии положения точек A_i в каждый момент времени.
Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.
Задается: self.A, self.all_full_lengths, self.set_r
:return: None
"""
print('####################################################')
print('[INFO] solve delta L, Velocity, Acceleration ...')
print('####################################################')
# матрица поворота вокруг зазадной оси
R_matrix = None
if self.axis == 'x':
R_matrix = self.R_matrix_x
elif self.axis == 'y':
R_matrix = self.R_matrix_y
elif self.axis == 'z':
R_matrix = self.R_matrix_z
# удлинения каждого цилиндра за заданное время
dL_all = []
# длины всех цилиндров за все время
L_all = []
# координаты точек крепления на ВЕРХНЕЙ платформе
coordinates_A = []
# легенда для графиков
colors = {0: 'r+--', 1: 'rx-',
2: 'g+--', 3: 'gx-',
4: 'b+--', 5: 'bx-'}
for i, j in self.indexes:
print('[INFO] Поршень №{}'.format(j+1))
dl = [] # изменение длины поршня в момент времени t
l = [] # длины поршня в момент времени t
coord = [] # координата точки A_i в момент времени t
for t in self.time:
try:
A = np.dot(R_matrix(t), self.A_0[i])
except Exception:
print('Type error axis')
# текущая длина привода
L = np.linalg.norm(self.B[j] - A)
print(self.B[j] - A)
print(self.L, L)
# L = np.sum((A - self.B[j])**2)**0.5
print('dL[мм] = {:.5f}'.format((L - self.L) * 1e3))
l.append(L)
dl.append(round(((L - self.L) * 1e3), 5))
coord.append(A)
dL_all.append(dl)
L_all.append(l)
coordinates_A.append(coord)
# численно находим СКОРОСТЬ изменения длины приводов
v = [0.0]
for k in range(self.steps - 1):
v.append((dl[k+1] - dl[k]) / (self.time[k+1] - self.time[k]))
pylab.figure(1)
pylab.plot(self.time[5:], v[5:], colors[j])
print('[INFO] v_max =', np.max(np.abs(v[5:])))
# численно находим УСКОРЕНИЕ изменения длины приводов
a = [0.0]
for k in range(self.steps - 1):
a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))
pylab.figure(2)
pylab.plot(self.time[5:], a[5:], colors[j])
print('[INFO] a_max =', np.max(np.abs(a[5:])))
print('****************************************************')
# легенда для графика со скоростями
pylab.figure(1)
pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)
pylab.title('Velocity')
pylab.xlabel('Time [s]')
pylab.ylabel('Velocity [mm/s]')
pylab.grid()
# plt.savefig("output/velocity_{}.png".format(self.axis))
# легенда для графика с ускорениями
pylab.figure(2)
pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)
pylab.title('Acceleration')
pylab.xlabel('Time [s]')
pylab.ylabel('Acceleration [mm/s^2]')
pylab.grid()
# pylab.savefig("output/acceleration_{}.png".format(self.axis))
# график удлинения каждого поршня
pylab.figure(3)
for i in range(6):
pylab.plot(self.time, dL_all[i], colors[i])
pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)
pylab.title('Delta length')
pylab.xlabel('Time [s]')
pylab.ylabel('dL [mm]')
pylab.grid()
# pylab.savefig("output/length_{}.png".format(self.axis))
plt.show()
# исключим повторение вершин
self.A = np.array(coordinates_A[0::2])
self.all_full_lengths = np.array(L_all)
self.set_r()
# покадровая отрисовка геометрии стенда
self.plot_3d_lines()
# self.plot_animate(coordinates_A)
def plot_3d_lines(self):
"""
Покадровая отрисовка геометрии стенда в 3D.
:return: None
"""
pylab.figure(figsize=(12, 10))
ax = pylab.axes(projection='3d')
colors = {0: 'r', 1: 'orange',
2: 'g', 3: 'olive',
4: 'b', 5: 'navy'}
markers = {0: '^', 1: '^',
2: 'o', 3: 'o',
4: '*', 5: '*'}
# задать легенду
for i, j in self.indexes:
df_A = pd.Series(data=self.A_0[i], index=['x', 'y', 'z'])
df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])
x = [df_A.x, df_B.x]
y = [df_A.y, df_B.y]
z = [df_A.z, df_B.z]
ax.scatter(x, y, z, c=colors[j], marker=markers[j], s=20.)
ax.legend([r'1', '2', '3', '4', '5', '6'], loc=0)
# indexes = [[0, 0], [1, 2], [2, 4]]
# построить смещения каждого поршня
for i, j in self.indexes:
k = 0
for (a, r) in zip(self.A[i], self.r[j]):
df_A = pd.Series(data=a, index=['x', 'y', 'z'])
df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])
df_r = pd.Series(data=r, index=['x', 'y', 'z'])
# геометрия длины цилиндров
x = [df_A.x, df_B.x]
y = [df_A.y, df_B.y]
z = [df_A.z, df_B.z]
# геометрия плеч сил
x1 = [df_r.x, 0]
y1 = [df_r.y, 0]
z1 = [df_r.z, 0]
# продолжение оси цилиндров
x2 = [df_r.x, df_B.x]
y2 = [df_r.y, df_B.y]
z2 = [df_r.z, df_B.z]
# частичная раскадровка
if k % int(self.steps-1) == 0:
# if k:
# ax.plot(x1, y1, z1, c=colors[j], marker=markers[j])
# ax.plot(x2, y2, z2, c='gray', marker='+')
ax.plot(x, y, z, c=colors[j], marker=markers[j])
# print('H_A =', z[0])
k += 1
# посторить смещение верхней плтаформы
for i in range(0, self.steps, 9):
a = np.array([self.A[0, i], self.A[1, i], self.A[2, i]])
df_A = pd.DataFrame(data=a, columns=['x', 'y', 'z'])
df_A = pd.concat((df_A, df_A.take([0])), axis=0)
ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='gray')
# отрисовать начальные положения верхней и нижней платформы
df_B = pd.DataFrame(data=self.B, columns=['x', 'y', 'z'])
df_B = pd.concat((df_B, df_B.take([0])))
df_A = pd.DataFrame(data=self.A_0, columns=['x', 'y', 'z'])
df_A = pd.concat((df_A, df_A.take([0])), axis=0)
ax.plot(df_B.x.values, df_B.y.values, df_B.z.values, c='black', linewidth=4.)
ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='black', linewidth=4.)
ax.view_init(30, -39)
# pylab.savefig("output/plot_3d_{}.png".format(self.axis))
plt.show()
def calculate_angles(self, l1, l2):
"""
Решение теоремы косинусов для поиска угла по трем сторонам
:param l1: прилежащая сторона к вычисляемому углу
:param l2: противолежащая сторона к углу
:return: (alpha, teta, gamma) - углы в треугольнике сил
"""
cos_teta = (l1**2 + (2*self.a)**2 - l2**2) / 2*l1*2*self.a
teta = np.arccos(cos_teta) * 180. / np.pi
b = l1**2 + self.a**2 - 2*l1*self.a*cos_teta
cos_alpha = (l1**2 + b**2 - l2**2) / 2*l1*self.a
alpha = np.arccos(cos_alpha) * 180. / np.pi
gamma = 180. - teta - alpha
return alpha, teta, gamma
def set_r(self):
"""
Вычисление радиус-векторов плеч сил для каждого цилиндра
:return: None
"""
r_all = []
for i, j in self.indexes:
r = []
for a in self.A[i]:
L = np.array(a - self.B[j])
direct_L = L / np.linalg.norm(L)
t1 = np.array([0, -direct_L[2], direct_L[1]])
b1 = np.array([a[2]*direct_L[1] - a[1]*direct_L[2]])
t2 = direct_L
b2 = np.array([0])
t3 = np.array([a[1]*direct_L[2] - a[2]*direct_L[1],
-a[0]*direct_L[2] + a[2]*direct_L[0],
a[0]*direct_L[1] - a[1]*direct_L[0]])
b3 = np.array([0])
T = np.stack((t1, t2, t3))
b = np.stack((b1, b2, b3))
r.append(np.linalg.solve(T, b).reshape((3,)))
r_all.append(r)
self.r = np.array(r_all)
def solve_dynamic_forces(self):
"""
решение обратной задачи стенда
Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х
:return: минимальная и максимальная нагрузка на каждый цилиндр
"""
print('####################################################')
print('[INFO] solve DYNAMIC forces ...')
print('####################################################')
A = []
for i in range(self.steps):
a = []
for j in range(3):
a_ = self.A[j, i, :]
a.append(a_)
A.append(a)
R = []
for i in range(self.steps):
r = []
for j in range(6):
r_ = self.r[j, i, :]
r.append(r_)
R.append(r)
A = np.array(A)
R = np.array(R)
forces = []
for a, r, t in zip(A, R, self.time):
L = []
direct = [] # направления сил
shoulder = [] # плечи сил
for i, j in self.indexes:
len = np.array(self.B[j] - a[i])
dir = len / np.linalg.norm(len)
L.append(len)
direct_force_try = self.B[j] - r[j]
# direct.append(dir)
direct.append(direct_force_try)
# direct.append(direct_force_try)
shoulder.append(np.cross(r[j], direct_force_try))
L = np.array(L)
T_static = np.array(direct).T
T_dynamics = np.array(shoulder).T
b_static = np.array([-self.m*9.8, 0, 0]).reshape((3, 1))
# определение направления действующих сил
dynamic_comp = None
if self.axis == 'x':
comp = self.J[2, 2] * self.prime2_fi_x(t)
dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))
elif self.axis == 'y':
comp = self.J[1, 1] * self.prime2_fi_y(t)
dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))
elif self.axis == 'z':
comp = self.J[0, 0] * self.prime2_fi_y(t)
dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))
b_dynamic = dynamic_comp
# T = np.vstack((T_static, T_dynamics))
# b = np.vstack((b_static, b_dynamic))
T = T_dynamics[:, :3]
b = b_dynamic[:, :3]
# print(T)
# print(b)
dynamic_f = np.linalg.solve(T, b).reshape((3,))
forces.append(dynamic_f)
print('[INFO] time:', t)
print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])
print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in np.array(shoulder)])
print('[INFO] forces:', [round(f, 4) for f in dynamic_f])
print('[INFO] dynamic component:', b_dynamic.T)
print('****************************************************')
forces = np.array(forces).T
# график приложенной силы к цилиндрам от времени
colors = {0: 'r+--', 1: 'rx-',
2: 'g+--', 3: 'gx-',
4: 'b+--', 5: 'bx-'}
for i, j in self.indexes:
pylab.plot(self.time, forces[i], colors[j])
# colors = {0: 'r+--', 1: 'g+--', 2: 'b+--'}
# for i in range(3):
# pylab.plot(self.time, forces[i], colors[i], label='$F_{}$'.format(i))
pylab.legend([r'$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'], loc=0)
# plt.legend(loc="lower right")
pylab.title('Dynamic forces')
pylab.xlabel('Time [s]')
pylab.ylabel('Force [kg*m/s^2]')
pylab.grid()
plt.show()
def solve_static_forces(self):
"""
Решение обратной задачи стедна для статических нагрузок
:return: компоненты силы для каждой опоры
"""
print('####################################################')
print('[INFO] solve STATIC forces ...')
print('####################################################')
x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]
forces = []
for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):
L1 = np.array(a[0] - self.B[0])
L2 = np.array(a[0] - self.B[1])
L3 = np.array(a[1] - self.B[2])
direct_L1 = L1 / np.linalg.norm(L1)
direct_L2 = L2 / np.linalg.norm(L2)
direct_L3 = L3 / np.linalg.norm(L3)
T = np.stack((direct_L1, direct_L2, direct_L3))
b = np.array([-self.m*9.8/2, 0, 0]).reshape((3, 1))
static_f = np.linalg.solve(T, b).reshape((3,)) / 2
forces.append(static_f)
print('[INFO] time:', t)
print('[INFO] length:',
round(np.linalg.norm(L1), 4),
round(np.linalg.norm(L2), 4),
round(np.linalg.norm(L3), 4))
print('[INFO] forces:',
round(static_f[0]/2, 4),
round(static_f[1]/2, 4),
round(static_f[2]/2, 4))
print('****************************************************')
forces = np.array(forces).T
# график приложенной силы к цилиндрам от времени
colors = {0: 'r+--', 1: 'g+--', 2: 'b+--'}
for i, j in x_symmetry_ind:
pylab.plot(self.time, forces[j], colors[j])
pylab.legend([r'$F_1$', '$F_2$', '$F_3$'], loc=0)
pylab.title('Static forces')
pylab.xlabel('Time [s]')
pylab.ylabel('Force [kg*m/s^2]')
pylab.grid()
plt.show()
def plot_animate(self, A):
""""
try to create animate function to plot mechanisms
"""
fig = plt.figure()
fig.set_tight_layout(False)
ax = plt.axes(projection='3d')
global cnt
cnt = ax
global cur_A
global cur_B
cur_A = A[0]
cur_B = self.B[0]
def steps(count=1):
for i in range(count):
df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])
df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])
x = [df_A.x, df_B.x]
y = [df_A.y, df_B.y]
z = [df_A.z, df_B.z]
cnt.plot(x, y, z)
def animate(frame):
steps(1)
return cnt
anim = animation.FuncAnimation(fig, animate, frames=100)
plt.show()
if __name__ == "__main__":
hex = Hexapod(axis='y')
hex.set_B()
hex.get_delta_L()
hex.solve_static_forces()
hex.solve_dynamic_forces() |
3,095 | d4e62950f10efeb27d19c3d9c672969342ef8c7c | """------------------------------------------------------------------------
MODULE
FContactRegulatoryInfoBase -
DESCRIPTION:
This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods
VERSION: 1.0.25(0.25.7)
RESTRICTIONS/ LIMITATIONS:
1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.
2. This module is not customizable
3. The component may not work as expected with any modifications done to this module at user end
--------------------------------------------------------------------------"""
import string
import acm
import FIntegrationUtils
import FRegulatoryLogger
import ael
import FRegulatoryUtils
import FRegulatoryInfoException
logger = 'FContactRegulatoryInfoBase'
VALUE_NOT_SET = ()
class FContactRegulatoryInfoBase(object):
def __init__(self, contact = None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger, "The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object")
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e :
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)
def Contact(self):
"""returns the contact for which this wrapper has all the addinfo/column values"""
return self.__contact
def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = "The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo"%reg_date_of_birth
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name = VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name = VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id = VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id = VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id = VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = "The ExchangeId provided <%s> is not of the expected integer format"%str(exchange_id)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name = VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(unique_name)
except:
pass
else:
msg = "The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name."%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
def JointAccount(self):
"""Another trader that jointly owns the account with this trader"""
joint_accounts = []
if self.IsGeneralPartner():
for contact in self.__contact.Party().Contacts():
if contact.AdditionalInfo().RegGeneralPartner():
joint_accounts.append(contact)
else:
FRegulatoryLogger.WARN(logger, "<%s> is not a General Partner. Hence JointAccount is None"%self.__contact.Fullname())
joint_accounts = None
return joint_accounts
def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is being set to <%s>."%(str(self.__is_general_partner)))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == "None":
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is None. Hence defaulting it to False")
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
else:
if hasattr(self, attr):
getattr(self, attr)(val)
def Commit(self):
"""Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact"""
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)
self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, "ABORTING TRANSACTION***********")
acm.AbortTransaction()
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, "Contact")
FRegulatoryLogger.DEBUG(logger, "Deleted all AdditionalInfos on Contact related to Regulatory Reporting")
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
def RegulatoryInfo(self):
"""returns the FContactRegulatoryInfoBase instance for the given contact"""
conactRegInfo = FContactRegulatoryInfo(self)
return conactRegInfo
def Select(query):
"""Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query"""
party = None
if query.find('and party') != -1:#it means there is an additional condition added
pos = query.find('and party')
party_name = query[(pos + len('and party')):]
query = query[0:pos]
party_name = party_name.replace('=', '').replace("'", '')
party_name = party_name.strip()
party = acm.FParty[party_name]
return_result = FRegulatoryUtils.Select(query, "FContact", party)
return return_result
|
3,096 | 4fb1ece28cd7c6e2ac3a479dcbf81ee09ba14223 | from farmfs.fs import Path, ensure_link, ensure_readonly, ensure_symlink, ensure_copy, ftype_selector, FILE, is_readonly
from func_prototypes import typed, returned
from farmfs.util import safetype, pipeline, fmap, first, compose, invert, partial, repeater
from os.path import sep
from s3lib import Connection as s3conn, LIST_BUCKET_KEY
import re
_sep_replace_ = re.compile(sep)
@returned(safetype)
@typed(safetype)
def _remove_sep_(path):
return _sep_replace_.subn("",path)[0]
#TODO we should remove references to vol.bs.reverser, as thats leaking format information into the volume.
def reverser(num_segs=3):
"""Returns a function which takes Paths into the user data and returns csums."""
r = re.compile("((\/([0-9]|[a-f])+){%d})$" % (num_segs+1))
def checksum_from_link(link):
"""Takes a path into the userdata, returns the matching csum."""
m = r.search(safetype(link))
if (m):
csum_slash = m.group()[1:]
csum = _remove_sep_(csum_slash)
return csum
else:
raise ValueError("link %s checksum didn't parse" %(link))
return checksum_from_link
@returned(safetype)
@typed(safetype, int, int)
def _checksum_to_path(checksum, num_segs=3, seg_len=3):
segs = [ checksum[i:i+seg_len] for i in range(0, min(len(checksum), seg_len * num_segs), seg_len)]
segs.append(checksum[num_segs*seg_len:])
return sep.join(segs)
class Blobstore:
def __init__(self):
raise NotImplementedError()
class FileBlobstore:
def __init__(self, root, num_segs=3):
self.root = root
self.reverser = reverser(num_segs)
def _csum_to_name(self, csum):
"""Return string name of link relative to root"""
#TODO someday when csums are parameterized, we inject the has params here.
return _checksum_to_path(csum)
def csum_to_path(self, csum):
"""Return absolute Path to a blob given a csum"""
#TODO remove callers so we can make internal.
return Path(self._csum_to_name(csum), self.root)
def exists(self, csum):
blob = self.csum_to_path(csum)
return blob.exists()
def delete_blob(self, csum):
"""Takes a csum, and removes it from the blobstore"""
blob_path = self.csum_to_path(csum)
blob_path.unlink(clean=self.root)
def import_via_link(self, path, csum):
"""Adds a file to a blobstore via a hard link."""
blob = self.csum_to_path(csum)
duplicate = blob.exists()
if not duplicate:
ensure_link(blob, path)
ensure_readonly(blob)
return duplicate
def fetch_blob(self, remote, csum):
src_blob = remote.csum_to_path(csum)
dst_blob = self.csum_to_path(csum)
duplicate = dst_blob.exists()
if not duplicate:
ensure_copy(dst_blob, src_blob)
def link_to_blob(self, path, csum):
"""Forces path into a symlink to csum"""
new_link = self.csum_to_path(csum)
ensure_symlink(path, new_link)
ensure_readonly(path)
def blobs(self):
"""Iterator across all blobs"""
blobs = pipeline(
ftype_selector([FILE]),
fmap(first),
fmap(self.reverser),
)(self.root.entries())
return blobs
def read_handle(self):
"""Returns a file like object which has the blob's contents"""
raise NotImplementedError()
def verify_blob_checksum(self, blob):
"""Returns True when the blob's checksum matches. Returns False when there is a checksum corruption."""
path = self.csum_to_path(blob)
csum = path.checksum()
return csum != blob
def verify_blob_permissions(self, blob):
"""Returns True when the blob's permissions is read only. Returns False when the blob is mutable."""
path = self.csum_to_path(blob)
return is_readonly(path)
class S3Blobstore:
def __init__(self, bucket, prefix, access_id, secret):
self.bucket = bucket
self.prefix = prefix
self.access_id = access_id
self.secret = secret
def blobs(self):
"""Iterator across all blobs"""
def blob_iterator():
with s3conn(self.access_id, self.secret) as s3:
key_iter = s3.list_bucket(self.bucket, prefix=self.prefix+"/")
for key in key_iter:
blob = key[len(self.prefix)+1:]
yield blob
return blob_iterator
def blob_stats(self):
"""Iterator across all blobs, retaining the listing information"""
def blob_iterator():
with s3conn(self.access_id, self.secret) as s3:
key_iter = s3.list_bucket2(self.bucket, prefix=self.prefix+"/")
for head in key_iter:
blob = head[LIST_BUCKET_KEY][len(self.prefix)+1:]
head['blob'] = blob
yield head
return blob_iterator
def read_handle(self):
"""Returns a file like object which has the blob's contents"""
raise NotImplementedError()
def upload(self, csum, path):
key = self.prefix + "/" + csum
def uploader():
with path.open('rb') as f:
with s3conn(self.access_id, self.secret) as s3:
#TODO should provide pre-calculated md5 rather than recompute.
result = s3.put_object(self.bucket, key, f)
return result
http_success = lambda status_headers: status_headers[0] >=200 and status_headers[0] < 300
s3_exception = lambda e: isinstance(e, ValueError)
upload_repeater = repeater(uploader, max_tries = 3, predicate = http_success, catch_predicate = s3_exception)
return upload_repeater
|
3,097 | 8a04447f12a9cb6ba31a21d43629d887a0d1f411 | """
Example 1:
Input: J = "aA", S = "aAAbbbb"
Output: 3
Example 2:
Input: J = "z", S = "ZZ"
Output: 0
Note:
S and J will consist of letters and have length at most 50.
The characters in J are distinct.
查找J中的每个字符在 S 出现的次数的总和。
改进:
J有可能有重复的数。
测试数据:
https://leetcode.com/problems/jewels-and-stones/description/
"""
c.. Solution o..
___ numJewelsInStones J, S
"""
:type J: str
:type S: str
:rtype: int
"""
S_dict = {i:S.c..(i) ___ i __ s..(S)}
r_ s..((S_dict.get(i, 0) ___ i __ J))
|
3,098 | 20518302b6a67f8f1ac01f1adf4fe06ab2eaf280 | """
Package for haasplugin.
"""
|
3,099 | 09bf7460b2c928bf6e1346d9d1e2e1276540c080 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [
('venue', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', versatileimagefield.fields.VersatileImageField(upload_to=b'images', verbose_name=b'Image')),
('created_at', models.DateTimeField(help_text=b'Date when category created.', verbose_name=b'Created At', auto_now_add=True)),
('updated_at', models.DateTimeField(help_text=b'Date when category updated.', verbose_name=b'Updated At', auto_now=True)),
('category', models.ForeignKey(related_name='images', blank=True, to='venue.Category', null=True)),
],
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.