content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
#!/usr/bin/python3
"""
Importing models using the FileStorage class
"""
import json
import models
import os.path
class FileStorage:
"""
Class that serializes instances to a JSON
file and deserializes JSON file to instances
"""
__file_path = "file.json"
__objects = {}
def all(self):
"""
Returns the dictionary __objects
"""
return FileStorage.__objects
def new(self, obj):
"""
Sets in __objects the obj with key <obj class name>.id
"""
objkey = '{}.{}'.format(obj.__class__.__name__, obj.id)
self.__objects[objkey] = obj
def save(self):
"""
Serializes __objects to the JSON file (path: __file_path)
"""
new_dict = {}
save_file = self.__file_path
"""
k for key
"""
for k, item in self.__objects.items():
new_dict[k] = item.to_dict()
with open(save_file, "w", encoding='utf-8') as new_file:
json.dump(new_dict, new_file)
def classes(self):
"""
Returns a dictionary of valid classes and their references.
"""
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
classes = {"BaseModel": BaseModel,
"User": User,
"State": State,
"City": City,
"Amenity": Amenity,
"Place": Place,
"Review": Review}
return classes
def reload(self):
"""
Deserializes the JSON file to __objects only if the JSON file exists
"""
try:
with open(self.__file_path, encoding="utf-8") as f:
data = json.loads(f.read())
new_dict = dict()
for key, value in data.items():
classes = value['__class__']
self.__objects[key] = globals()[classes](**value)
except Exception:
pass | nilq/baby-python | python |
#coding: utf-8
from lxml import etree as ET
import re
import plumber
SUPPLBEG_REGEX = re.compile(r'^0 ')
SUPPLEND_REGEX = re.compile(r' 0$')
ISO6392T_TO_ISO6392B = {
u'sqi': u'alb',
u'hye': u'arm',
u'eus': u'baq',
u'mya': u'bur',
u'zho': u'chi',
u'ces': u'cze',
u'nld': u'dut',
u'fra': u'fre',
u'kat': u'geo',
u'deu': u'ger',
u'ell': u'gre',
u'isl': u'ice',
u'mkd': u'mac',
u'msa': u'may',
u'mri': u'mao',
u'fas': u'per',
u'ron': u'rum',
u'slk': u'slo',
u'bod': u'tib',
u'cym': u'wel'
}
class SetupArticlePipe(plumber.Pipe):
def transform(self, data):
xml = ET.Element('records')
return data, xml
class XMLArticlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
article = ET.Element('record')
xml.append(article)
return data
class XMLJournalMetaJournalTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
journaltitle = ET.Element('journalTitle')
journaltitle.text = raw.journal.title
xml.find('./record').append(journaltitle)
return data
class XMLJournalMetaISSNPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
issn = ET.Element('issn')
issn.text = raw.any_issn()
xml.find('./record').append(issn)
return data
class XMLJournalMetaPublisherPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
for item in raw.journal.publisher_name or []:
publisher = ET.Element('publisher')
publisher.text = item
xml.find('./record').append(publisher)
return data
class XMLArticleMetaIdPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
uniquearticleid = ET.Element('publisherRecordId')
uniquearticleid.text = raw.publisher_id
xml.find('./record').append(uniquearticleid)
return data
class XMLArticleMetaArticleIdDOIPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.doi:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articleiddoi = ET.Element('doi')
articleiddoi.text = raw.doi
xml.find('./record').append(articleiddoi)
return data
class XMLArticleMetaTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.original_title():
title = ET.Element('title')
title.text = raw.original_title()
title.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
xml.find('./record').append(title)
elif raw.translated_titles() and len(raw.translated_titles()) != 0:
item = [(k,v) for k, v in raw.translated_titles().items()][0]
title = ET.Element('title')
title.text = item[1]
title.set('language', ISO6392T_TO_ISO6392B.get(item[0], item[0]))
xml.find('./record').append(title)
return data
class XMLArticleMetaAuthorsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
contribgroup = ET.Element('authors')
for author in raw.authors:
names = [author.get('given_names', ''), author.get('surname', '')]
contribname = ET.Element('name')
contribname.text = ' '.join(names)
contrib = ET.Element('author')
contrib.append(contribname)
for xr in author.get('xref', []):
xref = ET.Element('affiliationId')
xref.text = xr
contrib.append(xref)
contribgroup.append(contrib)
xml.find('./record').append(contribgroup)
return data
class XMLArticleMetaAffiliationPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
affs = ET.Element('affiliationsList')
for affiliation in raw.mixed_affiliations:
if 'institution' in affiliation:
aff = ET.Element('affiliationName')
aff.set('affiliationId', affiliation['index'])
aff.text = affiliation['institution']
affs.append(aff)
xml.find('./record').append(affs)
return data
class XMLArticleMetaPublicationDatePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
pubdate = ET.Element('publicationDate')
pubdate.text = raw.publication_date
xml.find('./record').append(pubdate)
return data
class XMLArticleMetaStartPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
startpage = ET.Element('startPage')
startpage.text = raw.start_page
xml.find('./record').append(startpage)
return data
class XMLArticleMetaEndPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
endpage = ET.Element('endPage')
endpage.text = raw.end_page
xml.find('./record').append(endpage)
return data
class XMLArticleMetaVolumePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.issue.volume:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
volume = ET.Element('volume')
volume.text = raw.issue.volume
xml.find('./record').append(volume)
return data
class XMLArticleMetaIssuePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
label_volume = raw.issue.volume.replace('ahead', '0') if raw.issue.volume else '0'
label_issue = raw.issue.number.replace('ahead', '0') if raw.issue.number else '0'
vol = ET.Element('volume')
vol.text = label_volume.strip()
label_suppl_issue = ' suppl %s' % raw.issue.supplement_number if raw.issue.supplement_number else ''
if label_suppl_issue:
label_issue += label_suppl_issue
label_suppl_volume = ' suppl %s' % raw.issue.supplement_volume if raw.issue.supplement_volume else ''
if label_suppl_volume:
label_issue += label_suppl_volume
label_issue = SUPPLBEG_REGEX.sub('', label_issue)
label_issue = SUPPLEND_REGEX.sub('', label_issue)
if label_issue.strip():
issue = ET.Element('issue')
issue.text = label_issue.strip()
xml.find('./record').append(issue)
return data
class XMLArticleMetaDocumentTypePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
documenttype = ET.Element('documentType')
documenttype.text = raw.document_type
xml.find('./record').append(documenttype)
return data
class XMLArticleMetaFullTextUrlPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.html_url:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
url = ET.Element('fullTextUrl')
url.set('format', 'html')
url.text = raw.html_url(language='en')
xml.find('./record').append(url)
return data
class XMLArticleMetaAbstractsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_abstract() and not raw.translated_abstracts():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.original_abstract():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
abstract.text = raw.original_abstract()
articlemeta.append(abstract)
if raw.translated_abstracts():
for lang, text in raw.translated_abstracts().items():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
abstract.text = text
articlemeta.append(abstract)
return data
class XMLArticleMetaKeywordsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.keywords():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.keywords():
for lang, keywords in raw.keywords().items():
kwdgroup = ET.Element('keywords')
kwdgroup.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
for keyword in keywords:
kwd = ET.Element('keyword')
kwd.text = keyword
kwdgroup.append(kwd)
articlemeta.append(kwdgroup)
return data
class XMLClosePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
data = ET.tostring(xml, encoding="utf-8", method="xml")
return data
| nilq/baby-python | python |
# Generated by Django 3.1.2 on 2020-10-12 22:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dominios', '0002_dominio_data_updated'),
]
operations = [
migrations.AddField(
model_name='dominio',
name='uid_anterior',
field=models.IntegerField(default=0, help_text='to be deleted after migration'),
),
]
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from datetime import datetime
DISCOUNT_RATE = 0.125
BASE_BID = {'NBUdiscountRate': DISCOUNT_RATE,
'annualCostsReduction': [92.47] + [250] * 20,
'yearlyPaymentsPercentage': 0.70,
'contractDuration': {'years': 2, 'days': 10},
'announcementDate': datetime(2017, 8, 18)}
CONTRACT_DURATION = {
'input': [
{'years': 0, 'days': 0}, {'years': 0, 'days': 1},
{'years': 0, 'days': 8}, {'years': 0, 'days': 31},
{'years': 0, 'days': 91}, {'years': 0, 'days': 92},
{'years': 0, 'days': 180}, {'years': 0, 'days': 182},
{'years': 0, 'days': 184}, {'years': 0, 'days': 256},
{'years': 0, 'days': 360}, {'years': 0, 'days': 361},
{'years': 0, 'days': 362}, {'years': 0, 'days': 363},
{'years': 0, 'days': 364}, {'years': 1, 'days': 0},
{'years': 2, 'days': 1}, {'years': 1, 'days': 8},
{'years': 2, 'days': 31},{'years': 1, 'days': 91},
{'years': 2, 'days': 92}, {'years': 1, 'days': 180},
{'years': 2, 'days': 182}, {'years': 1, 'days': 184},
{'years': 2, 'days': 256}, {'years': 1, 'days': 360},
{'years': 2, 'days': 361}, {'years': 1, 'days': 362},
{'years': 2, 'days': 363}, {'years': 1, 'days': 364},
{'years': 3, 'days': 0}, {'years': 7, 'days': 1},
{'years': 3, 'days': 8}, {'years': 8, 'days': 31},
{'years': 4, 'days': 91}, {'years': 9, 'days': 92},
{'years': 5, 'days': 180}, {'years': 10, 'days': 182},
{'years': 6, 'days': 184}, {'years': 11, 'days': 256},
{'years': 7, 'days': 360}, {'years': 12, 'days': 361},
{'years': 8, 'days': 362}, {'years': 13, 'days': 363},
{'years': 9, 'days': 364}, {'years': 10, 'days': 0},
{'years': 10, 'days': 1}, {'years': 11, 'days': 8},
{'years': 14, 'days': 30},{'years': 14, 'days': 31},
{'years': 14, 'days': 90}, {'years': 14, 'days': 91},
{'years': 14, 'days': 92}, {'years': 14, 'days': 180},
{'years': 14, 'days': 181}, {'years': 14, 'days': 182},
{'years': 14, 'days': 361}, {'years': 14, 'days': 362},
{'years': 14, 'days': 363}, {'years': 14, 'days': 364},
{'years': 15, 'days': 0}
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '1810.95435405817'},
{'amountContract': '0.47947407407',
'amountPerformance': '1810.49606787280'},
{'amountContract': '3.83579259259',
'amountPerformance': '1807.28806457523'},
{'amountContract': '14.86369629630',
'amountPerformance': '1796.74748231179'},
{'amountContract': '43.63214074074',
'amountPerformance': '1769.25031118977'},
{'amountContract': '44.11161481481',
'amountPerformance': '1768.79202500440'},
{'amountContract': '86.30434246575',
'amountPerformance': '1730.75511346897'},
{'amountContract': '87.26324657534',
'amountPerformance': '1729.94041988832'},
{'amountContract': '88.22215068493',
'amountPerformance': '1729.12572630767'},
{'amountContract': '122.74269863014',
'amountPerformance': '1699.79675740423'},
{'amountContract': '172.60571232877',
'amountPerformance': '1657.43269121038'},
{'amountContract': '173.08516438356',
'amountPerformance': '1657.02534442005'},
{'amountContract': '173.56461643836',
'amountPerformance': '1656.61799762973'},
{'amountContract': '174.04406849315',
'amountPerformance': '1656.21065083940'},
{'amountContract': '174.52352054795',
'amountPerformance': '1655.80330404908'},
{'amountContract': '175.00297260274',
'amountPerformance': '1655.39595725875'},
{'amountContract': '350.48242465753',
'amountPerformance': '1516.76226628463'},
{'amountContract': '178.83858904110',
'amountPerformance': '1652.13718293615'},
{'amountContract': '364.86598630137',
'amountPerformance': '1505.89968520929'},
{'amountContract': '218.63310958904',
'amountPerformance': '1618.32739933913'},
{'amountContract': '394.11256164384',
'amountPerformance': '1483.81243702275'},
{'amountContract': '261.30434246575',
'amountPerformance': '1584.11026895179'},
{'amountContract': '437.26324657534',
'amountPerformance': '1453.11558753945'},
{'amountContract': '263.22215068493',
'amountPerformance': '1582.66192480841'},
{'amountContract': '472.74269863014',
'amountPerformance': '1429.29837273721'},
{'amountContract': '347.60571232877',
'amountPerformance': '1518.93478249970'},
{'amountContract': '523.08516438356',
'amountPerformance': '1395.50367605835'},
{'amountContract': '348.56461643836',
'amountPerformance': '1518.21061042801'},
{'amountContract': '524.04406849315',
'amountPerformance': '1394.85996755018'},
{'amountContract': '349.52352054795',
'amountPerformance': '1517.48643835632'},
{'amountContract': '525.00297260274',
'amountPerformance': '1394.21625904202'},
{'amountContract': '1225.48242465753',
'amountPerformance': '1024.59792121607'},
{'amountContract': '528.83858904110',
'amountPerformance': '1391.64142500934'},
{'amountContract': '1414.86598630137',
'amountPerformance': '951.05679958049'},
{'amountContract': '743.63310958904',
'amountPerformance': '1258.93018757522'},
{'amountContract': '1619.11256164384',
'amountPerformance': '881.36067351043'},
{'amountContract': '961.30434246575',
'amountPerformance': '1143.34874877515'},
{'amountContract': '1837.26324657534',
'amountPerformance': '817.12906555064'},
{'amountContract': '1138.22215068493',
'amountPerformance': '1061.16755000753'},
{'amountContract': '2047.74269863014',
'amountPerformance': '763.74700436815'},
{'amountContract': '1397.60571232877',
'amountPerformance': '957.48662442295'},
{'amountContract': '2273.08516438356',
'amountPerformance': '714.14055865311'},
{'amountContract': '1573.56461643836',
'amountPerformance': '896.44297869644'},
{'amountContract': '2449.04406849315',
'amountPerformance': '680.27668462186'},
{'amountContract': '1749.52352054795',
'amountPerformance': '842.21724051892'},
{'amountContract': '1750.00297260274',
'amountPerformance': '842.07611953472'},
{'amountContract': '1750.48242465753',
'amountPerformance': '841.93499855052'},
{'amountContract': '1928.83858904110',
'amountPerformance': '793.16985845594'},
{'amountContract': '2464.38653424658',
'amountPerformance': '677.45744827975'},
{'amountContract': '2464.86598630137',
'amountPerformance': '677.36934714405'},
{'amountContract': '2493.15365753425',
'amountPerformance': '672.17138013828'},
{'amountContract': '2493.63310958904',
'amountPerformance': '672.08327900259'},
{'amountContract': '2494.11256164384',
'amountPerformance': '671.99517786690'},
{'amountContract': '2536.30434246575',
'amountPerformance': '664.68278360454'},
{'amountContract': '2536.78379452055',
'amountPerformance': '664.60447148393'},
{'amountContract': '2537.26324657534',
'amountPerformance': '664.52615936331'},
{'amountContract': '2623.08516438356',
'amountPerformance': '650.50828977336'},
{'amountContract': '2623.56461643836',
'amountPerformance': '650.42997765275'},
{'amountContract': '2624.04406849315',
'amountPerformance': '650.35166553213'},
{'amountContract': '2624.52352054795',
'amountPerformance': '650.27335341152'},
{'amountContract': '2625.00297260274',
'amountPerformance': '650.19504129090'}
]
}
ANNOUNCEMENT_DATE = {
'input': [
datetime(2017, 5, 2), datetime(2017, 5, 3), datetime(2017, 5, 4),
datetime(2017, 5, 5), datetime(2017, 5, 6), datetime(2017, 5, 7),
datetime(2017, 5, 8), datetime(2017, 5, 9), datetime(2017, 5, 10),
datetime(2017, 5, 11), datetime(2017, 12, 30), datetime(2018, 1, 1),
datetime(2018, 1, 31), datetime(2018, 2, 1), datetime(2018, 12, 30),
],
'expected_results': [
{'amountContract': '303.01667123288',
'amountPerformance': '1493.11261864549'},
{'amountContract': '303.49612328767',
'amountPerformance': '1493.29714530232'},
{'amountContract': '303.97557534247',
'amountPerformance': '1493.48174786072'},
{'amountContract': '304.45502739726',
'amountPerformance': '1493.66642643300'},
{'amountContract': '304.93447945205',
'amountPerformance': '1493.85118113158'},
{'amountContract': '305.41393150685',
'amountPerformance': '1494.03601206895'},
{'amountContract': '305.89338356164',
'amountPerformance': '1494.22091935769'},
{'amountContract': '306.37283561644',
'amountPerformance': '1494.40590311049'},
{'amountContract': '306.85228767123',
'amountPerformance': '1494.59096344011'},
{'amountContract': '307.33173972603',
'amountPerformance': '1494.77610045941'},
{'amountContract': '419.04406849315',
'amountPerformance': '1540.63620088962'},
{'amountContract': '245.00297260274',
'amountPerformance': '1471.31191860622'},
{'amountContract': '259.38653424658',
'amountPerformance': '1476.62410121389'},
{'amountContract': '259.86598630137',
'amountPerformance': '1476.80218008027'},
{'amountContract': '419.04406849315',
'amountPerformance': '1540.63620088962'},
]
}
PAYMENTS_PERCENTAGE = {
'input': [
0.0000, 0.0001, 0.0009, 0.0010, 0.0100, 0.1000, 0.0499, 0.0500, 0.4900,
0.4999, 0.5000, 0.7100, 0.7200, 0.7300, 0.7400, 0.7500, 0.7600, 0.7700,
0.7800, 0.7900, 0.8000, 0.8900, 0.8990, 0.8999, 0.9000
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '1810.95435405817'},
{'amountContract': '0.05068535616',
'amountPerformance': '1810.91186107787'},
{'amountContract': '0.45616820548',
'amountPerformance': '1810.57191723547'},
{'amountContract': '0.50685356164',
'amountPerformance': '1810.52942425517'},
{'amountContract': '5.06853561644',
'amountPerformance': '1806.70505602822'},
{'amountContract': '50.68535616438',
'amountPerformance': '1768.46137375872'},
{'amountContract': '25.29199272603',
'amountPerformance': '1789.75035688874'},
{'amountContract': '25.34267808219',
'amountPerformance': '1789.70786390844'},
{'amountContract': '248.35824520548',
'amountPerformance': '1602.73875059087'},
{'amountContract': '253.37609546575',
'amountPerformance': '1598.53194554123'},
{'amountContract': '253.42678082192',
'amountPerformance': '1598.48945256093'},
{'amountContract': '359.86602876712',
'amountPerformance': '1509.25419393209'},
{'amountContract': '364.93456438356',
'amountPerformance': '1505.00489590214'},
{'amountContract': '370.00310000000',
'amountPerformance': '1500.75559787220'},
{'amountContract': '375.07163561644',
'amountPerformance': '1496.50629984225'},
{'amountContract': '380.14017123288',
'amountPerformance': '1492.25700181231'},
{'amountContract': '385.20870684932',
'amountPerformance': '1488.00770378236'},
{'amountContract': '390.27724246575',
'amountPerformance': '1483.75840575242'},
{'amountContract': '395.34577808219',
'amountPerformance': '1479.50910772247'},
{'amountContract': '400.41431369863',
'amountPerformance': '1475.25980969253'},
{'amountContract': '405.48284931507',
'amountPerformance': '1471.01051166258'},
{'amountContract': '451.09966986301',
'amountPerformance': '1432.76682939308'},
{'amountContract': '455.66135191781',
'amountPerformance': '1428.94246116613'},
{'amountContract': '456.11752012329',
'amountPerformance': '1428.56002434343'},
{'amountContract': '456.16820547945',
'amountPerformance': '1428.51753136313'},
]
}
DISCOUNT_RATES = {
'input': [
0.0000, 0.0001, 0.0010, 0.0100, 0.1000, 1.0000, 0.1249, 0.1250, 0.1300,
0.1500, 0.1800, 0.2000, 0.2200, 0.3000, 0.4000, 0.5000, 0.6000, 0.7000,
0.8000, 0.9000, 0.9900, 0.9909, 0.9990, 0.9999
],
'expected_results': [
{'amountContract': '354.79749315068',
'amountPerformance': '4645.20675342466'},
{'amountContract': '354.79749315068',
'amountPerformance': '4640.02004460226'},
{'amountContract': '354.79749315068',
'amountPerformance': '4593.68225030323'},
{'amountContract': '354.79749315068',
'amountPerformance': '4162.36042333301'},
{'amountContract': '354.79749315068',
'amountPerformance': '1821.63775269194'},
{'amountContract': '354.79749315068',
'amountPerformance': '117.87571646511'},
{'amountContract': '354.79749315068',
'amountPerformance': '1514.57663165387'},
{'amountContract': '354.79749315068',
'amountPerformance': '1513.50349196203'},
{'amountContract': '354.79749315068',
'amountPerformance': '1461.26832468564'},
{'amountContract': '354.79749315068',
'amountPerformance': '1277.36248402751'},
{'amountContract': '354.79749315068',
'amountPerformance': '1061.29444236423'},
{'amountContract': '354.79749315068',
'amountPerformance': '947.18616001753'},
{'amountContract': '354.79749315068',
'amountPerformance': '851.26540354660'},
{'amountContract': '354.79749315068',
'amountPerformance': '588.29996204648'},
{'amountContract': '354.79749315068',
'amountPerformance': '407.85685451746'},
{'amountContract': '354.79749315068',
'amountPerformance': '303.61355861378'},
{'amountContract': '354.79749315068',
'amountPerformance': '237.36786431275'},
{'amountContract': '354.79749315068',
'amountPerformance': '192.32300937991'},
{'amountContract': '354.79749315068',
'amountPerformance': '160.12224139859'},
{'amountContract': '354.79749315068',
'amountPerformance': '136.19991118181'},
{'amountContract': '354.79749315068',
'amountPerformance': '119.50923564960'},
{'amountContract': '354.79749315068',
'amountPerformance': '119.36059287924'},
{'amountContract': '354.79749315068',
'amountPerformance': '118.03729764291'},
{'amountContract': '354.79749315068',
'amountPerformance': '117.89185707063'},
]
}
ANNUAL_COSTS_REDUCTION = {
'input': [
[0] * 20 + [0.01],
[0] * 18 + [0.01] * 3,
[0] * 11 + [0.01] * 10,
[0] * 3 + [0.01] * 18,
[0] * 2 + [0.01] * 19,
[0] + [0.01] * 20,
[0.01] * 21,
[0] * 20 + [1],
[0] * 18 + [1] * 3,
[0] * 11 + [1] * 10,
[0] * 3 + [1] * 18,
[0] * 2 + [1] * 19,
[0] + [1] * 20,
[1] * 21,
[i * 100 for i in (range(1, 22))],
[2200 - i * 100 for i in (range(1, 22))],
[123456789] * 21
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '0.00059563606'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.00276250500'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.01598505603'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.05285465322'},
{'amountContract': '0.00460273973',
'amountPerformance': '0.05693070745'},
{'amountContract': '0.01160273973',
'amountPerformance': '0.05947953451'},
{'amountContract': '0.01860273973',
'amountPerformance': '0.06234696495'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.05956360564'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.27625049981'},
{'amountContract': '0.00000000000',
'amountPerformance': '1.59850560258'},
{'amountContract': '0.00000000000',
'amountPerformance': '5.28546532151'},
{'amountContract': '0.46027397260',
'amountPerformance': '5.69307074472'},
{'amountContract': '1.16027397260',
'amountPerformance': '5.94795345066'},
{'amountContract': '1.86027397260',
'amountPerformance': '6.23469649485'},
{'amountContract': '348.08219178082',
'amountPerformance': '5211.30198080864'},
{'amountContract': '3744.52054794521',
'amountPerformance': '8505.03030786802'},
{'amountContract': '229663451.31780821085',
'amountPerformance': '769715609.64411020279'}
]
}
BIDS = {
'input': [
{
'contractDuration': {'years': 0, 'days': 1},
'NBUdiscountRate': 0.0000,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [0] * 20 + [0.01]
},
{
'contractDuration': {'years': 0, 'days': 1},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 20 + [10000]
},
{
'contractDuration': {'years': 0, 'days': 1},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 1},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 135},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 136},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 136},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 11 + [10000] * 10
},
{
'contractDuration': {'years': 10, 'days': 136},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 11 + [10000] * 10
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [92.47] + [250] * 20,
'announcementDate': datetime(2017, 12, 30)
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [92.47] + [250] * 20,
'announcementDate': datetime(2017, 12, 31)
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [92.47] + [250] * 20,
'announcementDate': datetime(2018, 1, 1)
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [0] + [250] * 20,
'announcementDate': datetime(2018, 12, 31)
},
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '0.00630136986'},
{'amountContract': '0.00000000000',
'amountPerformance': '595.63605641337'},
{'amountContract': '0.00000000000',
'amountPerformance': '18928.43655328417'},
{'amountContract': '0.00000000000',
'amountPerformance': '18928.43655328417'},
{'amountContract': '0.00000000000',
'amountPerformance': '18928.43655328417'},
{'amountContract': '24.65479452055',
'amountPerformance': '18921.17970907397'},
{'amountContract': '0.00000000000',
'amountPerformance': '15985.05602575644'},
{'amountContract': '24.65479452055',
'amountPerformance': '15978.60549756960'},
{'amountContract': '419.04406849315',
'amountPerformance': '1540.63620088962'},
{'amountContract': '354.79452054795',
'amountPerformance': '1513.14383477073'},
{'amountContract': '245.00297260274',
'amountPerformance': '1471.31191860622'},
{'amountContract': '354.79452054795',
'amountPerformance': '1513.14383477073'},
]
}
| nilq/baby-python | python |
import copy
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from dashboard.models import Place
from api_v1.containers.place.serializers import PlaceSerializer
from api_v1.serializers import BatchRequestSerializer
class PlaceViewSet(viewsets.ModelViewSet):
serializer_class = PlaceSerializer
http_method_names = ['get', 'head']
def get_queryset(self):
return_places = Place.objects.all()
if(self.request.GET.get('name')):
placeName = self.request.GET.get('name')
return_place = return_places.filter(name__iexact=placeName).order_by('name').distinct('name')
return return_place
if(self.request.GET.get('id')):
placeId = self.request.GET.get('id')
return_place = return_places.filter(id=id)
return return_place
return return_places
@api_view(['POST'])
def request_multiple_places(request):
data = copy.deepcopy(request.data)
request_serializer = BatchRequestSerializer(data=data)
request_serializer.is_valid(raise_exception=True)
query_set = Place.objects.filter(pk__in=request_serializer.data['ids']).distinct('id')
response_serializer = PlaceSerializer(query_set, many=True)
return Response(response_serializer.data)
| nilq/baby-python | python |
import re
import string
import numpy as np
from math import log
from typing import List
from collections import Counter
from .document import Document
class CountVectorizer:
@staticmethod
def split_iter(document_content: str):
"""
Splits document in words and returns it as generator.
Args:
document_content: Cleaned document content.
Returns: Generator of document terms.
"""
return (x.group(0) for x in re.finditer(r"[A-Za-z0-9]+", document_content))
def clean_document(self, document: str) -> str:
"""
Cleans text from any punctuation characters and lowers it.
Args:
document: Text to be cleaned.
Returns: Lowered string wihout punctuation sings.
"""
return document.lower().translate(str.maketrans("", "", string.punctuation))
def count_term_freq(self, document: Document) -> dict:
"""
Counts term frequency inside document.
Args:
document: Loaded document object.
Returns: Counter with term: count items.
"""
document = self.clean_document(document.content)
tokens = self.split_iter(document)
return Counter(tokens)
def vectorize(self, document: Document) -> np.ndarray:
"""
Counts document term frequency and returns it as vecotr.
Args:
document: Loaded document object.
Returns: Numpy array with term frequency values.
"""
return np.array(list(self.count_term_freq(document).values()))
class TfidfVectorizer(CountVectorizer):
def calculate_tfidf(self, term_freq: int, inverse_doc_freq: float) -> float:
"""
Calculates term frequency - inverse document frequency.
Args:
term_freq: Term frequency.
inverse_doc_freq: Inverse document frequency.
Returns: Product of term and inverse document frequency (float).
"""
return term_freq * inverse_doc_freq
def calculate_inverse_doc_freq(self, doc_num: int, term_doc_freq: int) -> float:
"""
Calculates inverse document frequency.
Args:
doc_num: Number of documents.
term_doc_freq: Number of term apperances in documents.
Returns: Inverse document frequency (float).
"""
return 0 if not term_doc_freq else log(doc_num / term_doc_freq)
def count_term_doc_freq(self, term: str, document: Document) -> int:
"""
Returns number of appearances of term for given document.
Args:
term: String.
document: Loaded document object.
Returns: Number of appearances of term for given document.
"""
return self.count_term_freq(document).get(term, 0)
def vectorize(self, document: Document, comp_documents: List[Document]) -> np.ndarray:
"""
Calculates TFIDF for given documents and returns it as matrix (numpy array).
Args:
document: Loaded document.
comp_documents: List of loaded documents.
Returns: Matrix (numpy array) representing TFIDF.
"""
term_frequencies = self.count_term_freq(document)
doc_number = len(comp_documents)
term_docs_frequencies = dict()
for comp_doc in comp_documents:
for term in term_frequencies.keys():
if term not in term_docs_frequencies:
term_docs_frequencies[term] = 1
term_docs_frequencies[term] += 1 if self.count_term_doc_freq(term, comp_doc) else 0
_tfidf = list()
for term in term_frequencies.keys():
term_freq = term_frequencies.get(term)
term_doc_freq = term_docs_frequencies.get(term)
inverse_term_freq = self.calculate_inverse_doc_freq(doc_number, term_doc_freq)
_tfidf.append(self.calculate_tfidf(term_freq, inverse_term_freq))
return np.array(_tfidf)
| nilq/baby-python | python |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.models import User
from django.http import Http404,HttpResponse
from django.contrib import messages
from django.db.models import Q
from .forms import *
from .models import *
from .email import *
from .delete_notify import *
from .utils import *
from django.urls import reverse
import xlwt,datetime
from notifications.signals import notify
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt #checked
def home(request):
"""Landing Page"""
if request.user.is_authenticated:
return redirect(reverse('homepage'))
else:
if request.method=='POST':
name=request.POST.get('name')
email = request.POST.get('email')
message =f"{name} \n {email} \n {request.POST.get('message')} "
mail_subject = 'Contact us : Sent by ' + name
if(send_mail(mail_subject,message,'guru.online.classroom.portal@gmail.com',['guru.online.classroom.portal@gmail.com'])):
messages.add_message(request,messages.SUCCESS,'Your message sent successfully.')
else:
messages.add_message(request,messages.ERROR,"An Error while sending your message.\
Please try again or contact using given contact details.")
return render(request,'intro.html')
@login_required#checked
def homepage(request):
"""
Create a classroom, Join A classroom,
"""
user = request.user
if request.POST.get('join_key'):
join_key = request.POST.get('join_key')
try:
classroom = Classroom.objects.get(unique_id=join_key)
except Classroom.DoesNotExist:
messages.add_message(request, messages.WARNING,"No such classroom exists.")
return redirect(reverse('homepage'))
if classroom.members.all().filter(username=user.username).exists():
messages.add_message(request, messages.INFO,"You are already member of this class.")
return redirect(reverse('homepage'))
if classroom.need_permission:
classroom.pending_members.add(user)
messages.add_message(request, messages.SUCCESS,"Your request is sent.\
You can access classroom material when someone lets you in.")
user.profile.pending_invitations.add(classroom)
notify.send(sender=user,verb=f"{user.username} wants to join {classroom.class_name}",recipient=classroom.special_permissions.all(),
url=reverse('classroom_page',kwargs={
'unique_id':classroom.unique_id
}
))
else:
recipients = User.objects.filter(username__in=classroom.members.values_list('username', flat=True))
url = reverse('profile',kwargs={'username':user.username})
notify.send(sender=user,recipient=recipients,verb=f"{request.user.username} has joined {classroom.class_name}",url= url)
classroom.members.add(user)
return redirect(reverse('homepage'))
#create classroom
if request.method=='POST':
createclassform = CreateclassForm(request.POST ,request.FILES)
if createclassform.is_valid():
classroom=createclassform.save(commit=False)
classroom.unique_id = unique_id()
classroom.created_by = request.user
classroom.save()
classroom.members.add(request.user)
classroom.special_permissions.add(request.user)
return redirect(reverse('homepage'))
else:
createclassform = CreateclassForm()
#queryset
params={
'createclassform':createclassform,
}
return render(request,'homepage.html',params)
@login_required#checked
def admin_status(request,unique_id,username):
"""
Toggles admin status of users from a classroom
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin = classroom.special_permissions.filter(username=request.user.username).exists()
if admin:
check = classroom.special_permissions.filter(username = username).exists()
user = User.objects.get(username=username)
url = reverse('classroom_page',kwargs={ 'unique_id':unique_id})
if check:
if classroom.created_by == user:
messages.add_message(request,messages.WARNING,"This user have created\
this class. He can't be dropped")
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
classroom.special_permissions.remove(user)
notify.send(sender=request.user,recipient = user,verb=f"You are no longer admin of {classroom.class_name}",url=url)
else:
classroom.special_permissions.add(user)
notify.send(sender=request.user,recipient = user,verb=f"Now you are admin of {classroom.class_name}",url=url)
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
else:
raise Http404()
@login_required#checked
def classroom_page(request,unique_id):
"""
Classroom Setting Page.
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
pending_members = classroom.pending_members.all()
admins = classroom.special_permissions.all()
members = admins | classroom.members.all()
is_admin = classroom.special_permissions.filter(username = request.user.username).exists()
#classroom_update
if request.method=="POST":
form = CreateclassForm(request.POST,request.FILES,instance=classroom)
if form.is_valid():
form.save()
return redirect(reverse('subjects',kwargs={'unique_id':classroom.unique_id}))
else:
form = CreateclassForm(instance=classroom)
params={
'members':members.distinct(),
'admins':admins,
'pending_members':pending_members,
'classroom':classroom,
'is_admin':is_admin,
'form':form,
}
return render(request,'classroom_settings.html',params)
@login_required#checked
def subjects(request, unique_id,form=None):
"""
Enlists all the subjects of a classroom ,
subjects can be added by admins
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
#querysets
members = classroom.members.all()
subjects = Subject.objects.filter(classroom=classroom)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
# Admins can add a subject and assign a teacher to it
if admin_check and request.method=="POST":
form = SubjectForm(request.POST)
teacher = User.objects.get(username=request.POST.get('teacher'))
if form.is_valid():
subject=form.save(commit=False)
subject.classroom=classroom
subject.teacher = teacher
subject.save()
subject.upload_permission.add(teacher)
recipients=User.objects.filter(username__in=classroom.members.values_list('username', flat=True))
url = reverse('subjects',kwargs={'unique_id':classroom.unique_id})
notify.send(sender=request.user,verb=f"subject {subject.subject_name} added in {classroom.class_name}",
recipient=recipients,url=url)
messages.add_message(request,messages.INFO,f"A new Subject {subject.subject_name} added")
classroom.teacher.add(teacher)
return redirect(url)
else:
form = SubjectForm()
params = {
'subjects':subjects,
'form':form,
'classroom':classroom,
'is_admin':admin_check,
'members':members
}
return render(request,'subjects_list.html',params)
@login_required#checked
def notes_list(request,unique_id,subject_id,form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
#querysets
subject = Subject.objects.get(id=subject_id)
notes = Note.objects.filter(subject_name=subject).order_by('-id')
if request.GET.get('search'):
search = request.GET.get('search')
notes = notes.filter(Q(topic__icontains=search)|Q(description__icontains=search))
query,page_range = pagination(request, notes)
upload_permission = subject.upload_permission.all().filter(username=request.user.username).exists()
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or upload_permission or request.user==subject.teacher
#Add note form handling
if is_teacher:
if request.method=="POST":
form = NoteForm(request.POST,request.FILES)
if form.is_valid:
data=form.save(commit=False)
data.subject_name = subject
data.uploaded_by = request.user
data.save()
messages.add_message(request,messages.SUCCESS,f"Your Note {data.topic} is added")
return redirect(reverse('resources',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
form= NoteForm()
params={
'form':form,
'subject':subject,
'classroom':classroom,
'notes':notes,
'page':query,
'page_range':page_range,
'is_teacher':is_teacher,
}
return render(request,'notes/notes_list.html',params)
@login_required#checked
def note_details(request, unique_id, subject_id, id, form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
#queryset
subject = Subject.objects.get(id=subject_id)
note = Note.objects.get(id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher or note.uploaded_by == request.user
if is_teacher:
if request.method=="POST":
form = NoteForm(request.POST,request.FILES,instance=note)
if form.is_valid():
form.file = request.POST.get('file')
form.save()
return redirect(reverse('read_note',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id,
'id':note.id
}))
else:
form= NoteForm(instance=note)
params={
'subject':subject,
'updateform':form,
'note':note,
'classroom':classroom,
'is_teacher': is_teacher,
'extension':extension_type(note.file)
}
return render(request,'notes/note_detail.html',params)
@login_required#checked
def note_delete(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
note = get_object_or_404(Note,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or note.uploaded_by==request.user or request.user==subject.teacher
if is_teacher:
note.delete()
note_delete_notify(request,note)
return redirect(reverse('resources',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
raise Http404()
@login_required#checked
def assignments_list(request ,unique_id, subject_id, form=None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
subject = Subject.objects.get(id=subject_id)
assignments = Assignment.objects.filter(subject_name=subject).reverse()
search = request.GET.get('search')
if search:
assignments = assignments.filter(Q(topic__icontains=search)|Q(description__icontains=search))
query,page_range = pagination(request,assignments)
assignments=query.object_list
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or subject.teacher==request.user
if is_teacher:
if request.method=="POST":
form = AssignmentForm(request.POST,request.FILES)
if form.is_valid():
assignment = form.save(commit=False)
assignment.subject_name = subject
assignment.assigned_by = request.user
assignment.save()
return redirect(reverse('assignments',kwargs=
{'unique_id':classroom.unique_id,'subject_id':subject.id,}))
else:
form= AssignmentForm()
params={
'form':form,
'subject':subject,
'classroom':classroom,
'assignments':assignments,
'page':query,
'page_range':page_range,
}
return render(request,'assignments/assignment_list.html',params)
@login_required#checked
def assignment_details(request,unique_id,subject_id,id):
updateform = form = submission = submission_object = None
classroom = Classroom.objects.get(unique_id=unique_id)
if member_check(request.user, classroom):
subject = Subject.objects.get(id=subject_id)
assignment = Assignment.objects.get(id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
if is_teacher:
if request.method=="POST":
updateform = AssignmentForm(request.POST,request.FILES,instance=assignment)
if updateform.is_valid():
updateform.save()
return redirect(reverse('assignment_page',kwargs={
'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id}))
else:
updateform= AssignmentForm(instance=assignment)
#submitting assignment
else:
submission_object = Submission.objects.filter(Q(submitted_by=request.user) & Q(assignment=assignment)).first()
if request.method=="POST":
if assignment.submission_link:
form = SubmitAssignmentForm(request.POST, request.FILES,instance=submission_object)
if form.is_valid():
data=form.save(commit=False)
data.submitted_by=request.user
data.assignment= assignment
data.save()
assignment.submitted_by.add(request.user)
return redirect(reverse('assignment_page',kwargs=
{'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id}))
else:
messages.add_message(request,messages.WARNING,"Submission link is closed.")
else:
form = SubmitAssignmentForm(instance=submission_object)
params={
'assignment':assignment,
'extension':extension_type(assignment.file),
'subject':subject,
'form':form,
'updateform':updateform,
'classroom':classroom,
'submissionform':form,
'submission':submission,
'submission_object':submission_object,
'is_teacher':is_teacher,
}
return render(request,'assignments/assignment_detail.html',params)
@login_required#checked
def assignment_handle(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
is_admin = classroom.special_permissions.filter(username = request.user.username).exists()
subject = get_object_or_404(Subject,id=subject_id)
is_teacher = request.user==subject.teacher
if is_admin or is_teacher:
assignment = Assignment.objects.get(id=id)
if request.POST.get('marks_assigned'):
id = request.POST.get('id')
submission = Submission.objects.get(id=id)
marks = request.POST.get('marks_assigned')
submission.marks_assigned = marks
submission.save()
url = reverse('assignment_page',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id})
notify.send(sender=request.user,verb=f'You got {marks} for your assignment {assignment.topic}',recipient=submission.submitted_by,url =url)
email_marks(request,submission,assignment)
return redirect(reverse('assignment-handle',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id,
'id':assignment.id
}))
#list of submissions
all_submissions = Submission.objects.filter(assignment=assignment)
late_submissions = all_submissions.filter(submitted_on__gt=assignment.submission_date)
ontime_submissions = all_submissions.filter(submitted_on__lte=assignment.submission_date)
members = classroom.members.all()
teachers = classroom.teacher.all()
students = members.difference(teachers)
submitted = assignment.submitted_by.all()
not_submitted = students.difference(submitted)
if request.POST.get('send_reminder')=='1':
recepients = User.objects.filter(username__in=not_submitted.values_list('username', flat=True))
url = reverse('assignment_page',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id})
notify.send(sender=request.user,verb=f"Reminder to submit your assignment",recipient=recepients,url=url)
send_reminder(request,assignment,not_submitted.values_list('email', flat=True))
if request.POST.get('toggle_link'):
if assignment.submission_link:
assignment.submission_link = False
else:
assignment.submission_link = True
assignment.save()
params = {
'assignment':assignment,
'all_submissions':all_submissions,
'late_submissions':late_submissions,
'ontime_submissions':ontime_submissions,
'is_teacher':is_teacher,
'submitted':submitted,
'not_submitted':not_submitted,
'subject':subject,
'classroom':classroom,
}
return render(request,'assignments/assignment_handle.html',params)
else:
raise Http404()
@login_required#checked
def assignment_delete(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
assignment = get_object_or_404(Assignment,id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
if is_teacher:
assignment.delete()
assignment_delete_notify(request,assignment)
return redirect(reverse('assignment_page',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
raise Http404()
@login_required#checked
def announcements_list(request, unique_id, subject_id,form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
#querysets
subject = get_object_or_404(Subject,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
announcements = Announcement.objects.all().filter(subject_name=subject).reverse()
if request.GET.get('search'):
search = request.GET.get('search')
announcements = announcements.filter(Q(subject__icontains=search)|Q(description__icontains=search))
query,page_range = pagination(request,announcements)
announcements=query.object_list
#announcement form handling
if is_teacher:
if request.method=="POST":
form = AnnouncementForm(request.POST,request.FILES)
if form.is_valid():
announcement = form.save(commit=False)
announcement.subject_name = subject
announcement.announced_by = request.user
announcement.save()
return redirect(reverse('announcement',kwargs=
{'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
form= AnnouncementForm()
params={
'form':form,
'subject':subject,
'classroom':classroom,
'announcements':announcements,
'page':query,
'page_range':page_range,
'is_teacher':is_teacher
}
return render(request,'announcements/announcement_list.html',params)
@login_required#checked
def announcement_details(request,unique_id,subject_id,id,form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
subject = get_object_or_404(Subject,id=subject_id)
announcement = get_object_or_404(Announcement,id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
#announcement update handling
if is_teacher:
if request.method=="POST":
form = AnnouncementForm(request.POST,request.FILES,instance=announcement)
if form.is_valid():
announcementform = form.save(commit=False)
announcementform.subject_name = subject
announcementform.save()
return redirect(reverse('announcement_page',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id,
'id':announcement.id
}))
else:
form= AnnouncementForm(instance=announcement)
params={
'announcement':announcement,
'extension':extension_type(announcement.file),
'subject':subject,
'updateform':form,
'classroom':classroom,
'is_teacher':is_teacher,
}
return render(request,'announcements/announcement_details.html',params)
@login_required #checked
def announcement_delete(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
announcement = get_object_or_404(Announcement,id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
#notify
if is_teacher:
announcement.delete()
announcement_delete_notify(request,announcement)
return redirect(reverse('announcement',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id
}))
else:
raise Http404()
@login_required #checked
def subject_details(request,unique_id, subject_id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
subject = get_object_or_404(Subject,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
upload_permission = subject.upload_permission.all()
members = classroom.members.all()
admins = classroom.special_permissions.all()
teachers = classroom.teacher.all()
teacher = subject.teacher
members = list((admins| members.difference(teachers)).distinct())
if teacher not in members:
members.append(teacher)
activities = Subject_activity.objects.filter(subject=subject).reverse()
query,page_range = pagination(request,activities)
activities=query.object_list
if request.method=='POST':
form = SubjectEditForm(request.POST , request.FILES,instance=subject)
if form.is_valid():
form.save()
else:
form = SubjectEditForm(instance=subject)
params={
'subject':subject,
'classroom':classroom,
'is_teacher':admin_check,
'members':members,
'upload_permissions':upload_permission,
'admins':admins,
'teacher':teacher,
'page':query,
'page_range':page_range,
'form':form
}
return render(request,'subject_details.html',params)
@login_required #checked
def delete_subject(request,unique_id, subject_id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
if admin_check:
verb = "A Subject "+subject.subject_name + " is deleted by "+ request.user.username
url =reverse('subjects',kwargs={'unique_id':classroom.unique_id})
recipient = User.objects.filter(username__in=classroom.members.values_list('username', flat=True))
notify.send(sender=request.user,verb=verb,recipient=recipient,url=url)
subject.delete()
return redirect(url)
else:
raise Http404()
@login_required #checked
def remove_member(request,unique_id,username):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
remove_this_user = get_object_or_404(User,username=username)
url = reverse('classroom_page',kwargs={'unique_id':classroom.unique_id})
if admin_check or request.user==remove_this_user:
if remove_this_user == classroom.created_by:
messages.add_message(request,messages.WARNING,"This user can't be dropped. He has created this classroom.")
return redirect(url)
classroom.members.remove(remove_this_user)
classroom.teacher.remove(remove_this_user)
classroom.special_permissions.remove(remove_this_user)
verb = f"You are removed from {classroom.class_name}"
notify.send(sender=request.user,verb=verb,recipient=remove_this_user,url='#')
if request.user==remove_this_user:
return redirect(reverse('homepage'))
else:
return redirect(url)
else:
raise Http404()
@login_required #checked
def accept_request(request,unique_id,username):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
if admin_check:
user = get_object_or_404(User,username=username)
classroom.members.add(user)
classroom.pending_members.remove(user)
user.profile.pending_invitations.remove(classroom)
url = reverse('subjects',kwargs={'unique_id':classroom.unique_id})
verb = f'Yor request to join classroom {classroom.class_name} is accepted'
notify.send(sender=request.user,verb=verb,recipient=user,url=url)
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
@login_required#checked
def delete_request(request,unique_id,username):
""" If you don't want to accept the request """
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
if admin_check:
user = User.objects.get(username=username)
classroom.pending_members.remove(user)
verb = "Your request to join class {classroom.class_name} is rejected"
url = "#"
notify.send(sender=request.user,verb=verb,recipient=user,url=url)
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
@login_required #checked
def manage_upload_permission(request,unique_id,subject_id,username):
classroom = Classroom.objects.get(unique_id=unique_id)
if member_check(request.user,classroom):
user = User.objects.get(username=username)
subject = Subject.objects.get(id=subject_id)
check = subject.upload_permission.filter(username = user.username).exists()
url = reverse('subjects',kwargs={'unique_id':classroom.unique_id})
if check:
verb = f"You can't upload notes in {subject.subject_name} of {classroom.class_name} anymore"
notify.send(sender=request.user,verb=verb,recipient=user,url = url)
subject.upload_permission.remove(user)
else:
verb = f"You got permission to upload notes in {subject.subject_name} of {classroom.class_name}"
subject.upload_permission.add(user)
notify.send(sender=request.user,verb=verb,recipient=user,url = url)
return redirect(reverse('subject_details',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
@login_required#checked
def unsend_request(request,unique_id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if classroom in request.user.profile.pending_invitations.all():
request.user.profile.pending_invitations.remove(classroom)
classroom.pending_members.remove(request.user)
return redirect(reverse('profile',kwargs={
'username':request.user.username
}))
else:
raise Http404()
@login_required#checked
def export_marks(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
subject = get_object_or_404(Subject,id=subject_id)
if admin_check or request.user==subject.teacher:
assignment = get_object_or_404(Assignment,id=id)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = f'attachment; filename="mark_sheet of {assignment.topic}.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Submissions')
# Sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['Username','submitted_on','marks_obtained']
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# Sheet body, remaining rows
font_style = xlwt.XFStyle()
rows = Submission.objects.all().filter(assignment=assignment).values_list('submitted_by','submitted_on','marks_assigned')
rows = [[x.strftime("%Y-%m-%d %H:%M") if isinstance(x, datetime.datetime) else x for x in row] for row in rows ]
for row in rows:
row_num += 1
row[0]=str(User.objects.get(id=row[0]))
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
wb.save(response)
return response
else:
raise Http404()
def features(request):
return render(request, 'features.html')
def privacy(request):
return render(request, 'privacy.html') | nilq/baby-python | python |
import healpy as hp
import numpy as np
def iqu2teb(IQU, nside, lmax=None):
alms = hp.map2alm(IQU, lmax=lmax, pol=True)
return hp.alm2map(alms, nside=nside, lmax=lmax, pol=False)
def teb2iqu(TEB, nside, lmax=None):
alms = hp.map2alm(TEB, lmax=lmax, pol=False)
return hp.alm2map(alms, nside=nside, lmax=lmax, pol=True)
def messenger_1(data_vec, T_pixel, n_iter, s_cov_diag_grade, nside, noise_bar_diag, noise_diag):
s = np.zeros(data_vec.shape, dtype='complex')
T_harmonic_grade = np.ones(hp.map2alm(hp.ud_grade(data_vec.real, nside),
lmax=nside * 3 - 1).shape) * T_pixel[0] / np.float(nside * nside)
harmonic_operator = (s_cov_diag_grade / (s_cov_diag_grade + T_harmonic_grade))
pixel_operator_signal = (noise_bar_diag / (noise_bar_diag + T_pixel))
pixel_operator_data = (T_pixel / (T_pixel + noise_diag))
for i in range(n_iter):
t = pixel_operator_data * data_vec + pixel_operator_signal * s
# t = hp.ud_grade(t,512)
t_alm1 = hp.map2alm(t.real, lmax=3 * nside - 1)
t_alm2 = hp.map2alm(t.imag, lmax=3 * nside - 1)
s1 = hp.alm2map(harmonic_operator * t_alm1, nside=nside, lmax=nside * 3 - 1, verbose=False)
s2 = hp.alm2map(harmonic_operator * t_alm2, nside=nside, lmax=nside * 3 - 1, verbose=False)
s = s1 + 1j * s2
# s = hp.ud_grade(s, 128)
# _ = hp.mollview(s.imag), plt.show()
print(np.var(s))
return s
def messenger_2(data_vec, s_cov_diag, T_ell, noise_diag, T_pixel, noise_bar_diag, nside, n_iter):
data_vec_QU = np.concatenate([data_vec.real, data_vec.imag])
s = np.zeros(data_vec_QU.shape, dtype='complex')
convergence_test = [0.]
harmonic_operator = s_cov_diag / (s_cov_diag + T_ell)
pixel_operator_signal = (noise_bar_diag / (noise_bar_diag + T_pixel))
pixel_operator_data = (T_pixel / (T_pixel + noise_diag))
for i in range(n_iter):
t = pixel_operator_data * data_vec_QU + pixel_operator_signal * s # here t = concat[t_Q, t_U]
t = np.real(t)
t = [t[int(t.shape[0] / 2):] * 0., t[:int(t.shape[0] / 2)], t[int(t.shape[0] / 2):]] # here t = {t_I = 0, t_Q, t_U}
t = hp.ud_grade(t, nside) # now upgrade
t_alm = hp.map2alm(t, lmax=3 * (nside) - 1, pol=True)
s = harmonic_operator * np.concatenate([t_alm[1], t_alm[2]])
s = [s[int(s.shape[0] / 2):] * 0., s[:int(s.shape[0] / 2)], s[int(s.shape[0] / 2):]]
print(np.var(s[0]), np.var(s[1]), np.var(s[2]))
convergence_test.append(np.var(s[1]))
s = hp.alm2map(s, nside=nside, lmax=nside * 3 - 1, verbose=False, pol=True)
# s_qu = np.copy(s)
s = np.concatenate([s[1], s[2]])
return s | nilq/baby-python | python |
# Copyright 2014 Pierre de Buyl
#
# This file is part of pmi-h5py
#
# pmi-h5py is free software and is licensed under the modified BSD license (see
# LICENSE file).
import test_pmi_mod
mytest = test_pmi_mod.MyTest('myllfile.h5', 1024)
mytest.fill()
mytest.close()
| nilq/baby-python | python |
#!/usr/bin/env python
#-*- mode: Python;-*-
import ConfigParser
import json
import logging
import os
import sys
import tempfile
import traceback
import click
from requests.exceptions import HTTPError
from ecxclient.sdk import client
import util
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'commands'))
class MyCLI(click.MultiCommand):
def list_commands(self, ctx):
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and filename.startswith('cmd_'):
rv.append(filename[4:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
try:
if sys.version_info[0] == 2:
name = name.encode('ascii', 'replace')
mod = __import__('ecxclient.cli.commands.cmd_' + name, None, None, ['cli'])
except ImportError:
logging.error(traceback.format_exc())
return
return mod.cli
def get_existing_session(username):
parser = ConfigParser.RawConfigParser()
parser.read([cfgfile])
try:
return parser.get(username, 'sessionid')
except ConfigParser.NoSectionError:
raise Exception('Please provide login credentials.')
def save_config(username, sessionid):
parser = ConfigParser.RawConfigParser()
parser.add_section(username)
parser.set(username, 'sessionid', sessionid)
parser.write(open(cfgfile, 'wb'))
@click.command(cls=MyCLI)
@click.option('--url', envvar='ECX_URL', default='http://localhost:8082', metavar='URL', help='ECX url.')
@click.option('--user', envvar='ECX_USER', default='admin', metavar='USERNAME', help='ECX user.')
@click.option('--passwd', envvar='ECX_PASSWD', default=None, metavar='PASSWORD', help='ECX password.')
@click.option('--json', is_flag=True, help='Show raw json.')
@click.option('--links', is_flag=True, help='Include links in output. Implies --json option.')
@click.version_option('0.43')
@util.pass_context
def cli(ctx, url, user, passwd, json, links):
"""ecx is a command line tool with which ECX operations
can be carried out.
"""
if user and passwd:
ctx.ecx_session = client.EcxSession(url, username=user, password=passwd)
save_config(user, ctx.ecx_session.sessionid)
else:
ctx.ecx_session = client.EcxSession(url, sessionid=get_existing_session(user))
ctx.json = json
ctx.links = links
if ctx.links:
ctx.json = True
# cli = MyCLI(help='Script to perform ECX operations. ')
def init_logging():
fd, logfile = tempfile.mkstemp(suffix='.txt', prefix='ecxclient')
os.close(fd)
logging.basicConfig(filename=logfile, level=logging.DEBUG, format='%(asctime)-15s: %(levelname)s: %(message)s')
def process_http_error(e):
if not isinstance(e, HTTPError):
return
if not e.response.content:
return
logging.error(e.response.content)
try:
d = json.loads(e.response.content)
click.secho('%s (%s)' % (d.get('id', 'Unknown'), d.get('description', 'Unknown')), fg='red')
except Exception:
pass
def main():
global cfgfile
init_logging()
cfgfile = os.path.join(click.get_app_dir("ecxcli"), 'config.ini')
cfgdir = os.path.dirname(cfgfile)
if not os.path.exists(cfgdir):
os.makedirs(cfgdir)
try:
cli()
except Exception as e:
logging.error(traceback.format_exc())
exctype, value = sys.exc_info()[:2]
click.secho(traceback.format_exception_only(exctype, value)[0], fg='red')
process_http_error(e)
| nilq/baby-python | python |
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import unittest.mock as mock
import unittest
import time
import pytest
import ly_test_tools.environment.waiter
pytestmark = pytest.mark.SUITE_smoke
@mock.patch('time.sleep', mock.MagicMock)
class TestWaitFor(unittest.TestCase):
def test_WaitForFunctionCall_GivenExceptionTimeoutExceeded_RaiseException(self):
input_func = mock.MagicMock()
input_func.return_value = False
with self.assertRaises(Exception):
ly_test_tools.environment.waiter.wait_for(input_func, .001, Exception, 0)
def test_WaitForFunctionCall_TimeoutExceeded_RaiseAssertionError(self):
input_func = mock.MagicMock()
input_func.return_value = False
with self.assertRaises(Exception):
ly_test_tools.environment.waiter.wait_for(input_func, .001, interval=0)
def test_WaitForFunctionCall_TimeoutExceeded_EnoughTime(self):
input_func = mock.MagicMock()
input_func.return_value = False
timeout_end = time.time() + 0.1
try:
ly_test_tools.environment.waiter.wait_for(input_func, 0.1, Exception, interval=0.01)
except Exception:
pass
# It should have taken at least 1/10 second
assert time.time() > timeout_end | nilq/baby-python | python |
# -*- encoding: utf-8 -*-
"""
keri.kli.commands module
"""
import argparse
import json
from hio.base import doing
from keri import kering
from keri.db import basing
from ... import habbing, keeping, agenting, indirecting, directing
parser = argparse.ArgumentParser(description='Rotate keys')
parser.set_defaults(handler=lambda args: interact(args))
parser.add_argument('--name', '-n', help='Human readable reference', required=True)
parser.add_argument('--proto', '-p', help='Protocol to use when propagating ICP to witnesses [tcp|http] (defaults '
'http)', default="tcp")
parser.add_argument('--data', '-d', help='Anchor data, \'@\' allowed', default=[], action="store", required=False)
def interact(args):
"""
Performs a rotation of the identifier of the environment represented by the provided name parameter
args (parseargs): Command line argument
"""
name = args.name
if args.data is not None:
try:
if args.data.startswith("@"):
f = open(args.data[1:], "r")
data = json.load(f)
else:
data = json.loads(args.data)
except json.JSONDecodeError:
raise kering.ConfigurationError("data supplied must be value JSON to anchor in a seal")
if not isinstance(data, list):
data = [data]
else:
data = None
ixnDoer = InteractDoer(name=name, proto=args.proto, data=data)
doers = [ixnDoer]
try:
directing.runController(doers=doers, expire=0.0)
except kering.ConfigurationError:
print(f"identifier prefix for {name} does not exist, incept must be run first", )
return -1
except kering.ValidationError as ex:
return -1
class InteractDoer(doing.DoDoer):
"""
DoDoer that launches Doers needed to create an interaction event and publication of the event
to all appropriate witnesses
"""
def __init__(self, name, proto, data: list = None):
"""
Returns DoDoer with all registered Doers needed to perform interaction event.
Parameters:
name is human readable str of identifier
proto is tcp or http method for communicating with Witness
data is list of dicts of committed data such as seals
"""
self.name = name
self.proto = proto
self.data = data
ks = keeping.Keeper(name=self.name, temp=False) # not opened by default, doer opens
self.ksDoer = keeping.KeeperDoer(keeper=ks) # doer do reopens if not opened and closes
db = basing.Baser(name=self.name, temp=False, reload=True) # not opened by default, doer opens
self.dbDoer = basing.BaserDoer(baser=db) # doer do reopens if not opened and closes
self.hab = habbing.Habitat(name=self.name, ks=ks, db=db, temp=False, create=False)
self.habDoer = habbing.HabitatDoer(habitat=self.hab) # setup doer
doers = [self.ksDoer, self.dbDoer, self.habDoer, doing.doify(self.interactDo)]
super(InteractDoer, self).__init__(doers=doers)
def interactDo(self, tymth, tock=0.0, **opts):
"""
Returns: doifiable Doist compatible generator method
Usage:
add result of doify on this method to doers list
"""
self.wind(tymth)
self.tock = tock
_ = (yield self.tock)
msg = self.hab.interact(data=self.data)
if self.proto == "tcp":
mbx = None
witDoer = agenting.WitnessReceiptor(hab=self.hab, klas=agenting.TCPWitnesser, msg=msg)
self.extend(doers=[witDoer])
yield self.tock
else: # "http"
mbx = indirecting.MailboxDirector(hab=self.hab)
witDoer = agenting.WitnessReceiptor(hab=self.hab, klas=agenting.HTTPWitnesser, msg=msg)
self.extend(doers=[mbx, witDoer])
yield self.tock
while not witDoer.done:
_ = yield self.tock
print(f'Prefix {self.hab.pre}')
print(f'New Sequence No. {self.hab.kever.sn}')
for idx, verfer in enumerate(self.hab.kever.verfers):
print(f'\tPublic key {idx+1}: {verfer.qb64}')
toRemove = [self.ksDoer, self.dbDoer, self.habDoer, witDoer]
if mbx:
toRemove.append(mbx)
self.remove(toRemove)
return
| nilq/baby-python | python |
from ucsmsdk.ucsexception import UcsException
import re, sys
# given an array and a string of numbers, make sure they are all in the array:
#
def check_values(array, csv):
indexes = csv.split(',')
for i in indexes:
try:
i = int(i) - 1
except:
print "bad value: " + i
return False
if i < 0 or i > len(array) - 1:
return False
return True
# get the available servers to put in the pool.
def select_kube_servers(handle):
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
print "Listing Available UCS Servers"
filter_string = '(presence, "equipped")'
# get blades
blades = handle.query_classid("fabricComputeSlotEp", filter_string)
# get all connected rack mount servers.
servers = handle.query_classid("computeRackUnit")
m = blades + servers
while True:
for i, s in enumerate(m):
if type(s) is FabricComputeSlotEp:
print "[%d]: Blade %s/%s type %s" % (i+1, s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "[%d]: Rack %s type %s" % (i+1, s.rn, s.model)
vals = raw_input("(E.g.: 2,4,8): ")
if check_values(m, vals) == True:
k8servers = [m[int(x)-1] for x in vals.split(',')]
print "Install Kubernetes on the following servers:"
for s in k8servers:
if type(s) is FabricComputeSlotEp:
print "\tBlade %s/%s type %s" % (s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "\tServer %s type %s" % (s.rn, s.model)
yn = raw_input("Is this correct? [N/y]: ")
if yn == "y" or yn == "Y":
return k8servers
def createKubeBootPolicy(handle):
print "Creating Kube Boot Policy"
from ucsmsdk.mometa.lsboot.LsbootPolicy import LsbootPolicy
from ucsmsdk.mometa.lsboot.LsbootVirtualMedia import LsbootVirtualMedia
from ucsmsdk.mometa.lsboot.LsbootStorage import LsbootStorage
from ucsmsdk.mometa.lsboot.LsbootLocalStorage import LsbootLocalStorage
from ucsmsdk.mometa.lsboot.LsbootDefaultLocalImage import LsbootDefaultLocalImage
mo = LsbootPolicy(parent_mo_or_dn="org-root", name="kube", descr="Kuberenetes", reboot_on_update="yes", policy_owner="local", enforce_vnic_name="yes", boot_mode="legacy")
mo_1 = LsbootVirtualMedia(parent_mo_or_dn=mo, access="read-only-remote-cimc", lun_id="0", order="2")
mo_2 = LsbootStorage(parent_mo_or_dn=mo, order="1")
mo_2_1 = LsbootLocalStorage(parent_mo_or_dn=mo_2, )
mo_2_1_1 = LsbootDefaultLocalImage(parent_mo_or_dn=mo_2_1, order="1")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeBootPolicy(handle):
mo = handle.query_dn("org-root/boot-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeLocalDiskPolicy(handle):
print "Creating Kube Local Disk Policy"
from ucsmsdk.mometa.storage.StorageLocalDiskConfigPolicy import StorageLocalDiskConfigPolicy
mo = StorageLocalDiskConfigPolicy(parent_mo_or_dn="org-root", protect_config="no", name="kube", descr="Kubernetes", flex_flash_raid_reporting_state="disable", flex_flash_state="disable", policy_owner="local", mode="raid-mirrored")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeLocalDiskPolicy(handle):
print "Deleting Kube Local Disk Policy"
mo = handle.query_dn("org-root/local-disk-config-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeUUIDPools(handle):
print "Creating Kube UUID Pools"
from ucsmsdk.mometa.uuidpool.UuidpoolPool import UuidpoolPool
from ucsmsdk.mometa.uuidpool.UuidpoolBlock import UuidpoolBlock
mo = UuidpoolPool(parent_mo_or_dn="org-root", policy_owner="local", prefix="derived", descr="Kubernetes Pool", assignment_order="default", name="kube")
mo_1 = UuidpoolBlock(parent_mo_or_dn=mo, to="C888-888888888100", r_from="C888-888888888001")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeUUIDPools(handle):
print "Deleting Kube UUID Pool"
mo = handle.query_dn("org-root/uuid-pool-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeServerPool(handle):
print "Creating Kubernetes Compute Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def addServersToKubePool(handle, servers):
print "Adding servers to Kubernetes Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
from ucsmsdk.mometa.compute.ComputePooledSlot import ComputePooledSlot
from ucsmsdk.mometa.compute.ComputePooledRackUnit import ComputePooledRackUnit
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
for s in servers:
if type(s) is FabricComputeSlotEp:
ComputePooledSlot(parent_mo_or_dn=mo, slot_id=re.sub("slot-","", s.slot_id), chassis_id=str(s.chassis_id))
if type(s) is ComputeRackUnit:
ComputePooledRackUnit(parent_mo_or_dn=mo, id=re.sub("rack-unit-","", s.rn))
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeServerPool(handle):
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/compute-pool-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServiceProfileTemplate(handle):
print "Creating Kubernetes Service Profile Template"
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.vnic.VnicConnDef import VnicConnDef
from ucsmsdk.mometa.ls.LsRequirement import LsRequirement
from ucsmsdk.mometa.lstorage.LstorageProfileBinding import LstorageProfileBinding
mo = LsServer(parent_mo_or_dn="org-root",
policy_owner="local",
name="Kubernetes",
descr="Kubernetes Service Profile",
type="updating-template",
# Boot using Kubernetes Boot policy: local Disk, then Remote DVD
boot_policy_name="kube",
# Default Maintenance Policy
maint_policy_name="default",
# scrub policy
scrub_policy_name="kube",
# UUID Pool
ident_pool_name="kube",
# disks we use.
#local_disk_policy_name="kube",
#storage_profile_name="kube",
# virtual media policy
vmedia_policy_name="kube"
)
# create vNIC Connection Policy
VnicConnDef(parent_mo_or_dn=mo,
lan_conn_policy_name="kube")
# create server pool and add to template.
LsRequirement(parent_mo_or_dn=mo, name="Kubernetes")
# add storage profile.
mo_1 = LstorageProfileBinding(parent_mo_or_dn=mo, storage_profile_name="kube")
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
except Exception:
print Exception
def deleteServiceProfileTemplate(handle):
print "Deleting Kubernetes Service Profile Template"
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/ls-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServers(handle, servers):
print "Creating Kubernetes Service Profiles"
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_named_template
from ucsmsdk.ucsbasetype import DnSet, Dn
for i, s in enumerate(servers):
dn_set = DnSet()
dn = Dn()
sp_name = "kube0%d" % (i+1)
dn.attr_set("value",sp_name)
dn_set.child_add(dn)
elem = ls_instantiate_n_named_template(cookie=handle.cookie,
dn="org-root/ls-Kubernetes",
in_error_on_existing="true",
in_name_set=dn_set,
in_target_org="org-root",
in_hierarchical="false")
try:
mo_list = handle.process_xml_elem(elem)
except UcsException as err:
if err.error_code == "105":
print "\t" + sp_name + " already exists."
else:
print err
def deleteServers(handle):
print "Deleting Kubernetes Nodes"
filter_string = '(dn, "ls-kube[0-9]+", type="re")'
kube = handle.query_classid("lsServer", filter_string)
for k in kube:
print "Deleting " + k.name
handle.remove_mo(k)
try:
handle.commit()
except AttributeError:
print "\talready deleted"
except UcsException as err:
print "\t"+ k.name + ": " + err.error_descr
def createKubeVirtualMedia(handle):
print "Adding Virtual Media Policy"
from urlparse import urlparse
import os.path
yn = False
url = ""
while yn == False:
print "What is the URL for the Boot ISO image?"
url = raw_input("(E.g.: http://192.168.2.2/kubam/centos7.2-boot.iso) : ")
print "You entered: " + url
yn = raw_input("Is this correct? [y/N]: ")
if yn != "y":
yn = False
o = urlparse(url)
paths = os.path.split(o.path)
scheme = o.scheme # http, https
if scheme == "":
scheme = "http"
filename = paths[-1]
address = o.hostname
path = "/".join(paths[:-1])
name = ".".join(paths[-1].split(".")[:-1])
from ucsmsdk.mometa.cimcvmedia.CimcvmediaMountConfigPolicy import CimcvmediaMountConfigPolicy
from ucsmsdk.mometa.cimcvmedia.CimcvmediaConfigMountEntry import CimcvmediaConfigMountEntry
mo = CimcvmediaMountConfigPolicy(name="kube",
retry_on_mount_fail="yes",
parent_mo_or_dn="org-root",
policy_owner="local",
descr="Kubernetes Boot Media")
mo_1 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name=name,
device_type="cdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="none",
image_file_name=filename,
image_path=path)
mo_2 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name="kickstartImage",
device_type="hdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="service-profile-name",
image_path=path)
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteVirtualMedia(handle):
print "Deleting Kubernetes Virtual Media Policy"
mo = handle.query_dn("org-root/mnt-cfg-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createScrubPolicy(handle):
from ucsmsdk.mometa.compute.ComputeScrubPolicy import ComputeScrubPolicy
mo = ComputeScrubPolicy(flex_flash_scrub="no",
parent_mo_or_dn="org-root",
name="kube",
disk_scrub="yes",
bios_settings_scrub="no",
descr="Destroy data when SP is unassociated")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteScrubPolicy(handle):
print "Deleting Kubernetes Scrub Policy"
mo = handle.query_dn("org-root/scrub-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteDiskGroupConfig(handle):
print "Deleting Disk Group config"
mo = handle.query_dn("org-root/disk-group-config-Kube_Boot")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteStorageProfile(handle):
print "Deleting Storage Profile"
mo = handle.query_dn("org-root/profile-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createDiskGroupConfig(handle):
print "Adding Disk Group Config"
from ucsmsdk.mometa.lstorage.LstorageDiskGroupConfigPolicy import LstorageDiskGroupConfigPolicy
from ucsmsdk.mometa.lstorage.LstorageDiskGroupQualifier import LstorageDiskGroupQualifier
from ucsmsdk.mometa.lstorage.LstorageVirtualDriveDef import LstorageVirtualDriveDef
mo = LstorageDiskGroupConfigPolicy(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube_boot",
descr="Kubernetes Boot Disk",
raid_level="mirror")
mo_1 = LstorageDiskGroupQualifier(parent_mo_or_dn=mo,
use_remaining_disks="no",
num_ded_hot_spares="unspecified",
drive_type="unspecified",
num_drives="2",
min_drive_size="unspecified",
num_glob_hot_spares="unspecified")
mo_2 = LstorageVirtualDriveDef(parent_mo_or_dn=mo, read_policy="platform-default",
drive_cache="platform-default",
strip_size="platform-default",
io_policy="platform-default",
write_cache_policy="platform-default",
access_policy="platform-default")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createStorageProfile(handle):
from ucsmsdk.mometa.lstorage.LstorageProfile import LstorageProfile
from ucsmsdk.mometa.lstorage.LstorageDasScsiLun import LstorageDasScsiLun
mo = LstorageProfile(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube",
descr="Kubernetes Storage Profile")
mo_1 = LstorageDasScsiLun(parent_mo_or_dn=mo,
local_disk_policy_name="kube_boot",
auto_deploy="auto-deploy",
expand_to_avail="yes",
lun_map_type="non-shared",
size="1",
fractional_size="0",
admin_state="online",
deferred_naming="no",
order="not-applicable",
name="KubeLUN")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createKubeServers(handle, org):
createKubeBootPolicy(handle)
#createKubeLocalDiskPolicy(handle)
createDiskGroupConfig(handle)
createStorageProfile(handle)
createScrubPolicy(handle)
createKubeUUIDPools(handle)
createKubeServerPool(handle)
createKubeVirtualMedia(handle)
servers = select_kube_servers(handle)
addServersToKubePool(handle, servers)
createServiceProfileTemplate(handle)
createServers(handle, servers)
def deleteKubeServers(handle, org):
deleteServers(handle)
deleteServiceProfileTemplate(handle)
deleteKubeServerPool(handle)
deleteVirtualMedia(handle)
deleteScrubPolicy(handle)
deleteKubeBootPolicy(handle)
deleteStorageProfile(handle)
deleteDiskGroupConfig(handle)
#deleteKubeLocalDiskPolicy(handle)
deleteKubeUUIDPools(handle)
| nilq/baby-python | python |
from django.test import TestCase
class geopollTest(TestCase):
"""
Tests for django-geopoll
"""
def test_geopoll(self):
pass | nilq/baby-python | python |
import settings
import json
import unittest
import requests
from inventory.tests import fixture
class ApiTests(unittest.TestCase):
def setUp(self):
# Verify Server is running.
# Verify Elastic Search is running.
self.endpoint = 'http://{hostname}:{port}/v1/inventory'.format(
hostname=settings.ELASTIC_URL,
port=settings.ELASTIC_PORT)
def test_valid_request(self):
json_str = fixture('valid_request.json')
data = json.loads(json_str)
response = requests.post(self.endpoint + '/inventory', json=data)
self.assertEquals(response.status_code, 201)
if __name__ == "__main__":
unittest.main() | nilq/baby-python | python |
import requests
from bs4 import BeautifulSoup
import json
from smtp import send_mail
header = {"User-agent": "Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.9.1.1) Gecko/20090715 Firefox/3.5.1 "}
euro = 4.25
def items():
try:
with open('items.json','r') as file:
data = file.read()
global list
list = json.loads(data)
except:
print("Error when reading JSON file")
global min,min_link
def check_price(link):
# link = "www.cos2.pl/cos.html"
site_content = BeautifulSoup(requests.get(link, headers=header).content, 'html.parser')
try:
global price
site_url = link.split('/')[2]
if(site_url == 'www.x-kom.pl'):
price = int(site_content.find(attrs={'class':'u7xnnm-4 gHPNug'}).get_text().split(',')[0].replace(" ",''))
elif(site_url == 'www.komputronik.pl'):
price = site_content.find('span',attrs={'class':'price'}).find('span').get_text()
if(price == ''):
price = site_content.find('span',attrs={'class':'price'}).find('span',attrs={'ng-if':'!$ctrl.changeBaseData'}).get_text()
price = int(''.join([n for n in price if n.isdigit()]))
elif(site_url == 'www.al.to'):
name = site_content.find(attrs={'class':'sc-1x6crnh-5'}).get_text()
price = int(site_content.find(attrs={'class':'u7xnnm-4 gHPNug'}).get_text().split(',')[0].replace(" ",''))
elif(site_url == 'www.mediamarkt.pl'):
price = int(site_content.find(attrs={'itemprop':'price'}).get_text())
elif(site_url == 'www.empik.com'):
price = int(site_content.find(attrs={'class':'productPriceInfo__price ta-price withoutLpPromo'}).get_text().split(",")[0].strip())
elif(site_url == 'www.morele.net'):
try:
price = int(site_content.find('div','price-new').get_text().split(',')[0].replace(" ",''))
except:
price = site_content.find('div','price-new').get_text()
price = int(''.join([n for n in price if n.isdigit()]))
elif(site_url == 'www.euro.com.pl'):
price = site_content.find('div','price-normal selenium-price-normal').get_text()
price = int(''.join([n for n in price if n.isdigit()]))
elif(site_url == 'www.mediaexpert.pl'):
price = int(site_content.find('span','a-price_price').findNext('span','a-price_price').get_text().replace(" ",""))
elif(site_url == 'www.amazon.de'):
price = int(site_content.find('span','a-size-medium a-color-price priceBlockBuyingPriceString').get_text().split(',')[0].replace(".","")) * euro
else:
print("Site not supported: "+ site_url)
# print("{} -> {}".format(link.split('/')[2],price))
except:
print(link)
def main():
items()
# link = list["Macbook AIR"]["2019"]["Space grey"]["128"][0]
data = {}
for a in list:
for b in list[a]:
for c in list[a][b]:
for d in list[a][b][c]:
min = 10000
print("{} {} {} {}GB".format(a,b,c,d))
for e in list[a][b][c][d]:
check_price(e)
if(min>price):
min = price
min_link = e
print("{} -> {}".format(min_link.split('/')[2],min))
data["{} {} {} {}GB".format(a,b,c,d)] = [min,min_link]
comp(data)
# test_data = {"Macbook AIR 2019 Space grey 128GB": [4900, "https://www.x-kom.pl/p/506277-notebook-laptop-133-apple-macbook-air-i5-8gb-128-uhd-617-mac-os-space-grey.html"], "Macbook AIR 2019 Space grey 256GB": [5649, "https://www.x-kom.pl/p/506278-notebook-laptop-133-apple-macbook-air-i5-8gb-256-uhd-617-mac-os-space-grey.html"], "Macbook AIR 2019 Silver 128GB": [4999, "https://www.morele.net/laptop-apple-macbook-air-13-3-2019-srebrny-mvfk2ze-a-6116788/"], "Macbook AIR 2019 Silver 256GB": [5097, "https://www.mediamarkt.pl/komputery-i-tablety/laptop-apple-macbook-air-13-retina-i5-8gb-256gb-ssd-macos-srebrny-mrec2ze-a"], "Macbook PRO 2019 Space grey 128GB": [5699, "https://www.euro.com.pl/laptopy-i-netbooki/apple-laptop-mbp-tb-i5-8gb-128ssd-space-grey.bhtml"], "Macbook PRO 2019 Silver 128GB": [5649, "https://www.euro.com.pl/laptopy-i-netbooki/apple-laptop-mbp-tb-i5-8gb-128ssd-silver.bhtml"], "Macbook PRO 2017 Space grey 128GB": [4797, "https://www.mediamarkt.pl/komputery-i-tablety/laptop-apple-macbook-pro-13-3-i5-8gb-128gb-ssd-iris-plus-640-macos-srebrny-mpxr2ze-a"], "Macbook PRO 2017 Silver 128GB": [4797, "https://www.mediamarkt.pl/komputery-i-tablety/laptop-apple-macbook-pro-13-3-i5-8gb-128gb-ssd-iris-plus-640-macos-gwiezdna-szarosc-mpxq2ze-a"], "Macbook PRO 2017 Silver 256GB": [5599, "https://www.euro.com.pl/laptopy-i-netbooki/apple-macbook-pro-13-13-3-intel-core-i5-7360u-8gb-ram-256gb-dysk-os-x-sierra.bhtml"]}
# test_comp = {"Macbook AIR 2019 Space grey 128GB": [4900, "https://www.x-kom.pl/p/506277-notebook-laptop-133-apple-macbook-air-i5-8gb-128-uhd-617-mac-os-space-grey.html"],"Macbook AIR 2019 Space grey 124GB": [41230, "https://www.x-kom.pl/p/506277-notebook-laptop-133-apple-macbook-air-i5-8gb-128-uhd-617-mac-os-space-grey.html"]}
def comp(data):
with open('prices.json','r') as file:
json_data = json.loads(file.read())
lower = False
higher = False
body = {}
for item in json_data:
if(data[item][0] < json_data[item][0]):
body[item] = [json_data[item][0],data[item][0],data[item][1]]
lower = True
elif(data[item][0] > json_data[item][0]):
body[item] = [data[item][0],json_data[item][0],data[item][1]]
higher = True
if(lower):
print("Lower price")
with open('prices.json','w') as file:
json.dump(data,file, indent=4, sort_keys=True)
send_mail(body)
print("Update completed")
elif(higher):
print("Higher price")
with open('prices.json','w') as file:
json.dump(data,file, indent=4, sort_keys=True)
else:
print("No changes")
if __name__ == '__main__':
main()
| nilq/baby-python | python |
import subprocess
import time
from timeit import default_timer as timer
start = timer()
commands_node1 = '''
export NODE_ID=3001
'''
addresses = [
'13XfCX8bLpdu8YgnXPD4BDeBC5RyvqBfPh',
'14L3zLQWPiXM6hZXdfmgjET8crM52VJpXX',
'1C4tyo8poeG1uFioZjtgnLZKotEUZFJyVh',
'18Nt9jiYVjm2TxCTHNSeYquriaauh5wfux',
'16uqNuajndwknbHSQw1cfTvSgsXxa5Vxi8',
'1AqNL5SPcuWqUT1SjTEQ3WGDLfy47HK74c',
'17aju9bJh3G7xC9PAkQ1j5czizA31rN77S',
'1Ci67qmp8KerJA3zZhsDC7AcXz8RCZwbt',
'1MzLjrr737WtVpubSGxN6CUECBD2vnQqef',
'165KxLW2bFms5wtKs2sNQXfD8TLQrehGCT',
'14RJHhG374XyuTLfZ48qRxUdxRLWj3BcA7',
'13L7UYXjUCGUUKF5o4oExDFQnV6p3AkDoB',
]
send_repeat = ["./blockchain_ureca send -from ",
" -amount 1 -to "]
def copy_db():
commands = "export NODE_ID=3002\n"
if i % 100 == 0 and i > 0:
print("i: ", i)
commands += "cp blockchain_3000.db blockchain_3002.db" + '\n'
process_node = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process_node.communicate(commands.encode('utf-8'))
for t in range(10000):
print("t: ", t)
for i in range(10):
commands_node1 = "export NODE_ID=3002\n"
if i % 100 == 0 and i > 0:
print("i: ", i)
commands_node1 += send_repeat[0] + addresses[i] + send_repeat[1] + addresses[10] + '\n'
process_node1 = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process_node1.communicate(commands_node1.encode('utf-8'))
time.sleep(1)
# Make sure the new txs has been put into database
time.sleep(1)
copy_db()
time.sleep(0.5)
# commands_node1 += "./blockchain_ureca startnode -port 9090\n"
# print(commands_node1)
process_node1 = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process_node1.communicate(commands_node1.encode('utf-8'))
# print(out)
end = timer()
print(end-start)
| nilq/baby-python | python |
from rest_framework import serializers
from can_server.models import DbcFile, CanSettings
class DbcFileSerializer(serializers.ModelSerializer):
class Meta:
model = DbcFile
fields = ('FileName', 'FileData')
class CanSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = CanSettings
fields = ('bustype', 'channel', 'bitrate')
| nilq/baby-python | python |
from django.core import mail
from django.test import override_settings, TestCase
from django.urls import reverse
from opentech.apply.utils.testing.tests import BaseViewTestCase
from .factories import OAuthUserFactory, StaffFactory, UserFactory
@override_settings(ROOT_URLCONF='opentech.apply.urls')
class BaseTestProfielView(TestCase):
@classmethod
def setUpTestData(cls):
cls.url = reverse('users:account')
class TestProfileView(BaseTestProfielView):
def setUp(self):
self.user = UserFactory()
self.client.force_login(self.user)
def test_cant_acces_if_not_logged_in(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
# Initial redirect will be via to https through a 301
self.assertRedirects(response, reverse('users_public:login') + '?next=' + self.url, status_code=301)
def test_includes_change_password(self):
response = self.client.get(self.url, follow=True)
self.assertContains(response, reverse('users:password_change'))
def test_doesnt_includes_change_password_for_oauth(self):
self.client.force_login(OAuthUserFactory())
response = self.client.get(self.url, follow=True)
self.assertNotContains(response, reverse('users:password_change'))
def test_cant_set_slack_name(self):
response = self.client.get(self.url, follow=True)
self.assertNotContains(response, 'Slack name')
class TestStaffProfileView(BaseTestProfielView):
def setUp(self):
self.staff = StaffFactory()
self.client.force_login(self.staff)
def test_can_set_slack_name(self):
response = self.client.get(self.url, follow=True)
self.assertContains(response, 'Slack name')
class TestPasswordReset(BaseViewTestCase):
user_factory = UserFactory
url_name = 'users:{}'
base_view_name = 'password_reset'
def test_recieves_email(self):
response = self.post_page(None, data={'email': self.user.email})
self.assertRedirects(response, self.url(None, view_name='password_reset_done'))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('https://testserver/account/password/reset/confirm', mail.outbox[0].body)
| nilq/baby-python | python |
import xlrd
class ReadExcel:
def readexcel(self, url):
data = xlrd.open_workbook(url) # 打开xls文件
table = data.sheets()[0] # 打开第一张表
nrows = table.nrows # 获取表的行数
htmlhead = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>'''
htmltable = '<table border="1">'
htmltable += '<tr>'
for row in range(nrows):
htmltable += '<tr>'
for e in table.row_values(row):
htmltable += '<td>' + str(e) + '</td>'
htmltable += '</tr>'
htmltable += '</table>'
htmltail = '</body></html>'
html = htmlhead + htmltable + htmltail
print(html)
return html
| nilq/baby-python | python |
from __future__ import absolute_import
from six.moves import range
try:
import h5py
except:
pass
import logging
import scipy as sp
from fastlmm.pyplink.snpset import *
from fastlmm.pyplink.altset_list import *
#!!document the format
class Hdf5(object):
def __init__(self,filename, order = 'F',blocksize=5000):
self._ran_once = False
self.h5 = None
##!! copy relevent comments from Bed reader
self.filename=filename
self.order = order
self.blocksize = blocksize
def copyinputs(self, copier):
copier.input(self.filename)
@property
def snp_to_index(self):
self.run_once()
return self._snp_to_index
def run_once(self):
if (self._ran_once):
return
self._ran_once = True
try:
self.h5 = h5py.File(self.filename, "r")
except IOError as e:
raise IOError("Missing or unopenable file '{0}' -- Native error message: {1}".format(self.filename,e))
self._original_iids = sp.array(sp.array(self.h5['iid']),dtype=str)
self.rs = sp.array(sp.array(self.h5['rs']),dtype='str')
self.pos = sp.array(self.h5['pos'])
## similar code in bed
self._snp_to_index = {}
logging.info("indexing snps");
for i,snp in enumerate(self.rs):
if snp in self._snp_to_index : raise Exception("Expect snp to appear in bim file only once. ({0})".format(snp))
self._snp_to_index[snp]=i
self.snpsInFile = self.h5['snps']
if "SNP-major" not in self.snpsInFile.attrs: raise Exception("In Hdf5 the 'snps' matrix must have a Boolean 'SNP-major' attribute")
self.is_snp_major = self.snpsInFile.attrs["SNP-major"]
S_original = len(self.rs)
N_original = len(self.original_iids)
if self.is_snp_major:
if not self.snpsInFile.shape == (S_original, N_original) : raise Exception("In Hdf5, snps matrix dimensions don't match those of 'rs' and 'iid'")
else:
if not self.snpsInFile.shape == (N_original, S_original) : raise Exception("In Hdf5, snps matrix dimensions don't match those of 'rs' and 'iid'")
@property
def snp_count(self):
self.run_once()
return len(self.rs);
@property
def original_iids(self):
self.run_once()
return self._original_iids
#same code is in Bed. Could this be moved to an abstract class?
def read(self,snp_set = AllSnps(), order="F", dtype=SP.float64, force_python_only=False):
self.run_once()
snpset_withbed = snp_set.addbed(self)
return self.read_with_specification(snpset_withbed, order=order, dtype=dtype, force_python_only=force_python_only)
@staticmethod
#should move into utils
def is_sorted_without_repeats(list):
if len(list) < 2:
return True
for i in range(1,len(list)):
if not list[i-1] < list[i]:
return False
return True
def __del__(self):
if self.h5 != None: # we need to test this because Python doesn't guarantee that __init__ was fully run
self.h5.close()
def read_direct(self, snps, selection=sp.s_[:,:]):
if self.is_snp_major:
selection = tuple(reversed(selection))
if snps.flags["F_CONTIGUOUS"]:
self.snpsInFile.read_direct(snps.T,selection)
else:
self.snpsInFile.read_direct(snps,selection)
#!! much code the same as for Bed
def create_block(self, blocksize, dtype, order):
N_original = len(self.original_iids) #similar code else where -- make a method
matches_order = self.is_snp_major == (order =="F") #similar code else where -- make a method
opposite_order = "C" if order == "F" else "F"#similar code else where -- make a method
if matches_order:
return sp.empty([N_original,blocksize], dtype=dtype, order=order)
else:
return sp.empty([N_original,blocksize], dtype=dtype, order=opposite_order)
def read_with_specification(self, snpset_with_snpreader, order="F", dtype=SP.float64, force_python_only=False):
self.run_once()
order = order.upper()
opposite_order = "C" if order == "F" else "F"
snp_index_list = sp.array(list(snpset_with_snpreader)) # Is there a way to create an array from an iterator without putting it through a list first?
S = len(snp_index_list)
S_original = self.snp_count
N_original = len(self.original_iids)
# Check if snps and iids indexes are in order and in range
snps_are_sorted = Hdf5.is_sorted_without_repeats(snp_index_list)
if hasattr(self,'_ind_used'):
iid_index_list = self._ind_used
iid_is_sorted = Hdf5.is_sorted_without_repeats(iid_index_list)
else:
iid_index_list = sp.arange(N_original)
iid_is_sorted = True
N = len(iid_index_list)
SNPs = sp.empty([N, S], dtype=dtype, order=order)
matches_order = self.is_snp_major == (order =="F")
is_simple = not force_python_only and iid_is_sorted and snps_are_sorted and matches_order #If 'is_simple' may be able to use a faster reader
# case 1 - all snps & all ids requested
if is_simple and S == S_original and N == N_original:
self.read_direct(SNPs)
# case 2 - some snps and all ids
elif is_simple and N == N_original:
self.read_direct(SNPs, sp.s_[:,snp_index_list])
# case 3 all snps and some ids
elif is_simple and S == S_original:
self.read_direct(SNPs, sp.s_[iid_index_list,:])
# case 4 some snps and some ids -- use blocks
else:
blocksize = min(self.blocksize, S)
block = self.create_block(blocksize, dtype, order)
if not snps_are_sorted:
snp_index_index_list = sp.argsort(snp_index_list)
snp_index_list_sorted = snp_index_list[snp_index_index_list]
else:
snp_index_index_list = sp.arange(S)
snp_index_list_sorted = snp_index_list
for start in range(0, S, blocksize):
#print start
end = min(start+blocksize,S)
if end-start < blocksize: #On the last loop, the buffer might be too big, so make it smaller
block = self.create_block(end-start, dtype, order)
snp_index_list_forblock = snp_index_list_sorted[start:end]
snp_index_index_list_forblock = snp_index_index_list[start:end]
self.read_direct(block, sp.s_[:,snp_index_list_forblock])
SNPs[:,snp_index_index_list_forblock] = block[iid_index_list,:]
rs = self.rs[snp_index_list]
pos = self.pos[snp_index_list,:]
iids = sp.array(self.original_iids[iid_index_list],dtype="str") #Need to make another copy of to stop it from being converted to a list of 1-d string arrays
has_right_order = (order=="C" and SNPs.flags["C_CONTIGUOUS"]) or (order=="F" and SNPs.flags["F_CONTIGUOUS"])
#if SNPs.shape == (1, 1):
assert(SNPs.shape == (N, S) and SNPs.dtype == dtype and has_right_order)
ret = {
'rs' :rs,
'pos' :pos,
'snps' :SNPs,
'iid' :iids
}
return ret
@property
def ind_used(self):
# doesn't need to self.run_once() because only uses original inputs
return self._ind_used
@ind_used.setter
def ind_used(self, value):
'''
Tell the Bed reader to return data for only a subset (perhaps proper) of the individuals in a particular order
e.g. 2,10,0 says to return data for three users: the user at index position 2, the user at index position 10, and the user at index position 0.
'''
# doesn't need to self.run_once() because only uses original inputs
self._ind_used = value
@staticmethod
def write(snpMatrix, hdf5file, dtype='f8',snp_major=True,compression=None):
if not isinstance(dtype, str) or len(dtype) != 2 or dtype[0] != 'f' : raise Exception("Expect dtype to start with 'f', e.g. 'f4' for single, 'f8' for double")
data = (snpMatrix['snps'].T) if snp_major else snpMatrix['snps']
with h5py.File(hdf5file, "w") as h5:
h5.create_dataset('snps', data=data,dtype=dtype,compression=compression,shuffle=True)
h5['snps'].attrs["SNP-major"] = snp_major
h5.create_dataset('iid', data=snpMatrix['iid'])
h5.create_dataset('pos', data=snpMatrix['pos'])
h5.create_dataset('rs', data=snpMatrix['rs'])
| nilq/baby-python | python |
__author__ = 'lionel'
#!/usr/bin/python
# -*- coding: utf-8 -*-
import struct
import sys
# 搜狗的scel词库就是保存的文本的unicode编码,每两个字节一个字符(中文汉字或者英文字母)
# 找出其每部分的偏移位置即可
# 主要两部分
# 1.全局拼音表,貌似是所有的拼音组合,字典序
# 格式为(index,len,pinyin)的列表
# index: 两个字节的整数 代表这个拼音的索引
# len: 两个字节的整数 拼音的字节长度
# pinyin: 当前的拼音,每个字符两个字节,总长len
#
# 2.汉语词组表
# 格式为(same,py_table_len,py_table,{word_len,word,ext_len,ext})的一个列表
# same: 两个字节 整数 同音词数量
# py_table_len: 两个字节 整数
# py_table: 整数列表,每个整数两个字节,每个整数代表一个拼音的索引
#
# word_len:两个字节 整数 代表中文词组字节数长度
# word: 中文词组,每个中文汉字两个字节,总长度word_len
# ext_len: 两个字节 整数 代表扩展信息的长度,好像都是10
# ext: 扩展信息 前两个字节是一个整数(不知道是不是词频) 后八个字节全是0
#
# {word_len,word,ext_len,ext} 一共重复same次 同音词 相同拼音表
# 拼音表偏移,
startPy = 0x1540
# 汉语词组表偏移
startChinese = 0x2628
# 全局拼音表
GPy_Table = {}
# 解析结果
# 元组(词频,拼音,中文词组)的列表
GTable = []
def byte2str(data):
# 将原始字节码转为字符串
i = 0
length = len(data)
ret = u''
while i < length:
x = data[i] + data[i + 1]
t = unichr(struct.unpack('H', x)[0])
if t == u'\r':
ret += u'\n'
elif t != u' ':
ret += t
i += 2
return ret
# 获取拼音表
def getPyTable(data):
if data[0:4] != "\x9D\x01\x00\x00":
return None
data = data[4:]
pos = 0
length = len(data)
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print index,
pos += 2
l = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print l,
pos += 2
py = byte2str(data[pos:pos + l])
# print py
GPy_Table[index] = py
pos += l
# 获取一个词组的拼音
def getWordPy(data):
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += GPy_Table[index]
pos += 2
return ret
# 获取一个词组
def getWord(data):
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += GPy_Table[index]
pos += 2
return ret
# 读取中文表
def getChinese(data):
# import pdb
# pdb.set_trace()
pos = 0
length = len(data)
while pos < length:
# 同音词数量
same = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print '[same]:', same,
# 拼音索引表长度
pos += 2
py_table_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 拼音索引表
pos += 2
py = getWordPy(data[pos: pos + py_table_len])
# 中文词组
pos += py_table_len
for i in xrange(same):
# 中文词组长度
c_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 中文词组
pos += 2
word = byte2str(data[pos: pos + c_len])
# 扩展数据长度
pos += c_len
ext_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 词频
pos += 2
count = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 保存
GTable.append((count, py, word))
# 到下个词的偏移位置
pos += ext_len
def deal(file_name):
print('-' * 60)
f = open(file_name, 'rb')
data = f.read()
f.close()
if data[0:12] != "\x40\x15\x00\x00\x44\x43\x53\x01\x01\x00\x00\x00":
print("确认你选择的是搜狗(.scel)词库?")
sys.exit(0)
# pdb.set_trace()
print("词库名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
print("词库类型:", byte2str(data[0x338:0x540])) # .encode('GB18030')
print("描述信息:", byte2str(data[0x540:0xd40])) # .encode('GB18030')
print("词库示例:", byte2str(data[0xd40:startPy])) # .encode('GB18030')
getPyTable(data[startPy:startChinese])
getChinese(data[startChinese:])
if __name__ == '__main__':
# 将要转换的词库添加在这里就可以了
o = ['明星【官方推荐】.scel']
# o = ['全国大酒店名录.scel', '全国宾馆名录.scel', '全国旅行社名录.scel']
# o = ['饮食大全【官方推荐】.scel']
# o = ['最详细的全国地名大全.scel']
for f in o:
deal(f)
# 保存结果到sougou.txt
f = open('sougou.txt', 'w')
for count, py, word in GTable:
# GTable保存着结果,是一个列表,每个元素是一个元组(词频,拼音,中文词组),有需要的话可以保存成自己需要个格式
# 我没排序,所以结果是按照上面输入文件的顺序
f.write(unicode('%(count)s' % {'count': count} + ' ' + word).encode('utf-8')) # 最终保存文件的编码,可以自给改
f.write('\n')
f.close()
| nilq/baby-python | python |
from . import upgrade_0_to_1
from . import upgrade_2_to_3
from . import upgrade_7_to_8
from . import upgrade_8_to_9
def init_new_testsuite(engine, session, name):
"""When all the metadata fields are setup for a suite, call this
to provision the tables."""
# We only need to do the test-suite agnostic upgrades,
# most of the upgrades target nts or compile only.
upgrade_0_to_1.initialize_testsuite(engine, session, name)
session.commit()
upgrade_2_to_3.upgrade_testsuite(engine, session, name)
session.commit()
upgrade_7_to_8.upgrade_testsuite(engine, session, name)
session.commit()
upgrade_8_to_9.upgrade_testsuite(engine, session, name)
session.commit()
| nilq/baby-python | python |
class Field:
def __init__(self, left_lb, sv, e, right_lb):
self._parameter = None
self._left_lb = left_lb
self._sv = sv
self._e = e
self._right_lb = right_lb
def set_parameter(self, parameter):
self._parameter = parameter
def get_parameter(self):
return self._parameter
def set_left_lb_text(self, text):
self._left_lb.config(text=f"{text} :")
def set_right_lb_text(self, text):
self._right_lb.config(text=text)
def set_text(self, text):
self.clear_text()
self._e.insert(0, text)
def get_text(self):
return self._e.get()
def clear_text(self):
self._e.delete(0, "end")
def set_callback_on_text_change(self, callback):
self._sv.trace("w", lambda name, index, mode, sv=self._sv: callback(sv))
def activate(self):
self._e["state"] = "normal"
def disable(self):
self._e["state"] = "disable"
def readonly(self):
self._e["state"] = "readonly"
| nilq/baby-python | python |
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
class BusinessIDValidator(RegexValidator):
regex = r"^[0-9]{7}\-[0-9]{1}\Z"
message = _("Enter a valid business ID.")
| nilq/baby-python | python |
# Import libnacl libs
import libnacl.public
import libnacl.dual
# Import python libs
import unittest
class TestDual(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = b'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.dual.DualSecret()
alice = libnacl.dual.DualSecret()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = b'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.dual.DualSecret()
alice = libnacl.dual.DualSecret()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
def test_sign(self):
msg = (b'Well, that\'s no ordinary rabbit. That\'s the most foul, '
b'cruel, and bad-tempered rodent you ever set eyes on.')
signer = libnacl.dual.DualSecret()
signed = signer.sign(msg)
signature = signer.signature(msg)
self.assertNotEqual(msg, signed)
veri = libnacl.sign.Verifier(signer.hex_vk())
verified = veri.verify(signed)
verified2 = veri.verify(signature + msg)
self.assertEqual(verified, msg)
self.assertEqual(verified2, msg)
| nilq/baby-python | python |
import numpy as np
import zengl
from objloader import Obj
from PIL import Image
from progress.bar import Bar
from skimage.filters import gaussian
import assets
from window import Window
window = Window(720, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
size = 1024
samples = 512
temp_color = ctx.image((size, size), 'r32sint')
temp_depth = ctx.image((size, size), 'depth24plus')
temp_color.clear_value = -1
model = Obj.open(assets.get('ao-map-target.obj')).pack('vx vy vz nx ny nz tx ty')
vertex_buffer = ctx.buffer(model)
uniform_buffer = ctx.buffer(size=64)
ctx.includes['size'] = f'const int size = {size};'
texcoord_pipeline = ctx.pipeline(
vertex_shader='''
#version 330
layout (std140) uniform Common {
mat4 mvp;
};
layout (location = 0) in vec3 in_vertex;
layout (location = 1) in vec3 in_normal;
layout (location = 2) in vec2 in_texcoord;
out vec2 v_texcoord;
void main() {
gl_Position = mvp * vec4(in_vertex, 1.0);
v_texcoord = in_texcoord;
}
''',
fragment_shader='''
#version 330
#include "size"
in vec2 v_texcoord;
layout (location = 0) out int out_address;
void main() {
int tx = int(v_texcoord.x * size + 0.5);
int ty = int(v_texcoord.y * size + 0.5);
out_address = ty * size + tx;
}
''',
layout=[
{
'name': 'Common',
'binding': 0,
},
],
resources=[
{
'type': 'uniform_buffer',
'binding': 0,
'buffer': uniform_buffer,
},
],
framebuffer=[temp_color, temp_depth],
topology='triangles',
cull_face='back',
vertex_buffers=zengl.bind(vertex_buffer, '3f 3f 2f', 0, -1, 2),
vertex_count=vertex_buffer.size // zengl.calcsize('3f 3f 2f'),
)
bar = Bar('Progress', fill='-', suffix='%(percent)d%%', max=samples)
ao = np.zeros(size * size, 'f4')
for i in range(samples):
phi = np.pi * (3.0 - np.sqrt(5.0))
y = 1.0 - (i / (samples - 1.0)) * 2.0
x = np.cos(phi * i) * np.sqrt(1.0 - y * y)
z = np.sin(phi * i) * np.sqrt(1.0 - y * y)
camera = zengl.camera((x * 5.0, y * 5.0, z * 5.0), (0.0, 0.0, 0.0), aspect=1.0, fov=45.0)
uniform_buffer.write(camera)
temp_color.clear()
temp_depth.clear()
texcoord_pipeline.render()
t = np.frombuffer(temp_color.read(), 'i4').reshape((size, size))
ao[np.unique(t[t >= 0])] += 1.0
bar.next()
ao -= ao.min()
ao /= ao.max()
ao = gaussian(ao, 1.0)
texture = ctx.image((size, size), 'r32float', ao)
Image.fromarray((ao.reshape(size, size) * 255.0).astype('u1'), 'L').save('generated-ao-map.png')
render_pipeline = ctx.pipeline(
vertex_shader='''
#version 330
layout (std140) uniform Common {
mat4 mvp;
};
layout (location = 0) in vec3 in_vertex;
layout (location = 1) in vec3 in_normal;
layout (location = 2) in vec2 in_texcoord;
out vec3 v_normal;
out vec2 v_texcoord;
void main() {
gl_Position = mvp * vec4(in_vertex, 1.0);
v_normal = in_normal;
v_texcoord = in_texcoord;
}
''',
fragment_shader='''
#version 330
uniform sampler2D Texture;
in vec2 v_texcoord;
layout (location = 0) out vec4 out_color;
void main() {
float lum = texture(Texture, v_texcoord).r;
vec3 color = vec3(1.0, 1.0, 1.0);
out_color = vec4(color * lum, 1.0);
}
''',
layout=[
{
'name': 'Common',
'binding': 0,
},
{
'name': 'Texture',
'binding': 0,
},
],
resources=[
{
'type': 'uniform_buffer',
'binding': 0,
'buffer': uniform_buffer,
},
{
'type': 'sampler',
'binding': 0,
'image': texture,
'wrap_x': 'clamp_to_edge',
'wrap_y': 'clamp_to_edge',
},
],
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_buffers=zengl.bind(vertex_buffer, '3f 3f 2f', 0, -1, 2),
vertex_count=vertex_buffer.size // zengl.calcsize('3f 3f 2f'),
)
while window.update():
x, y = np.cos(window.time * 0.5) * 5.0, np.sin(window.time * 0.5) * 5.0
camera = zengl.camera((x, y, 1.0), (0.0, 0.0, 0.0), aspect=1.0, fov=45.0)
uniform_buffer.write(camera)
image.clear()
depth.clear()
render_pipeline.render()
image.blit()
| nilq/baby-python | python |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
##process.load("AuxCode.CheckTkCollection.Run123151_RECO_cff")
process.load("FWCore.MessageService.MessageLogger_cfi")
MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
),
destinations = cms.untracked.vstring('cout')
)
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR09_R_34X_V2::All'
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_123615.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124009.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124020.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124022.root')
#
#'rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124024.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124030.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124230.root'
#,'rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124120.root' #2.36TeV run
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.LhcTrackAnalyzer = cms.EDAnalyzer("LhcTrackAnalyzer",
# TrackCollectionTag = cms.InputTag("generalTracks"),
TrackCollectionTag = cms.InputTag("ALCARECOTkAlMinBias"),
PVtxCollectionTag = cms.InputTag("offlinePrimaryVertices"),
OutputFileName = cms.string("AnalyzerOutput_1.root"),
Debug = cms.bool(False)
)
process.p = cms.Path(process.LhcTrackAnalyzer)
| nilq/baby-python | python |
import numpy as np
# a = np.array([[1, 2], [3, 4]])
# a = np.array([[[1, 2], [3, 4]], [[5,6],[7,8]]])
a = np.array([[[0, 1], [2, 3]], [[4,5],[6,7]]])
print(a.sum(axis = 0))
print(a.sum(axis = 1))
| nilq/baby-python | python |
import numpy as np
import abc
import os
from typing import NamedTuple, Optional, List, Dict, Tuple, Iterable
from representation.code2vec.common import common
from representation.code2vec.vocabularies import Code2VecVocabs, VocabType
from representation.code2vec.config import Config
class ModelEvaluationResults(NamedTuple):
topk_acc: float
subtoken_precision: float
subtoken_recall: float
subtoken_f1: float
loss: Optional[float] = None
def __str__(self):
res_str = 'topk_acc: {topk_acc}, precision: {precision}, recall: {recall}, F1: {f1}'.format(
topk_acc=self.topk_acc,
precision=self.subtoken_precision,
recall=self.subtoken_recall,
f1=self.subtoken_f1)
if self.loss is not None:
res_str = ('loss: {}, '.format(self.loss)) + res_str
return res_str
class ModelPredictionResults(NamedTuple):
original_name: str
topk_predicted_words: np.ndarray
topk_predicted_words_scores: np.ndarray
attention_per_context: Dict[Tuple[str, str, str], float]
code_vector: Optional[np.ndarray] = None
class Code2VecModelBase(abc.ABC):
def __init__(self, config: Config):
self.config = config
self.config.verify()
self._log_creating_model()
if not config.RELEASE:
self._init_num_of_examples()
self._log_model_configuration()
self.vocabs = Code2VecVocabs(config)
self.vocabs.target_vocab.get_index_to_word_lookup_table() # just to initialize it (if not already initialized)
self._load_or_create_inner_model()
self._initialize()
def _log_creating_model(self):
self.log('')
self.log('')
self.log('---------------------------------------------------------------------')
self.log('---------------------------------------------------------------------')
self.log('---------------------- Creating code2vec model ----------------------')
self.log('---------------------------------------------------------------------')
self.log('---------------------------------------------------------------------')
def _log_model_configuration(self):
self.log('---------------------------------------------------------------------')
self.log('----------------- Configuration - Hyper Parameters ------------------')
longest_param_name_len = max(len(param_name) for param_name, _ in self.config)
for param_name, param_val in self.config:
self.log('{name: <{name_len}}{val}'.format(
name=param_name, val=param_val, name_len=longest_param_name_len+2))
self.log('---------------------------------------------------------------------')
@property
def logger(self):
return self.config.get_logger()
def log(self, msg):
self.logger.info(msg)
def _init_num_of_examples(self):
self.log('Checking number of examples ...')
if self.config.is_training:
self.config.NUM_TRAIN_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.train_data_path)
self.log(' Number of train examples: {}'.format(self.config.NUM_TRAIN_EXAMPLES))
if self.config.is_testing:
self.config.NUM_TEST_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.TEST_DATA_PATH)
self.log(' Number of test examples: {}'.format(self.config.NUM_TEST_EXAMPLES))
@staticmethod
def _get_num_of_examples_for_dataset(dataset_path: str) -> int:
dataset_num_examples_file_path = dataset_path + '.num_examples'
if os.path.isfile(dataset_num_examples_file_path):
with open(dataset_num_examples_file_path, 'r') as file:
num_examples_in_dataset = int(file.readline())
else:
num_examples_in_dataset = common.count_lines_in_file(dataset_path)
with open(dataset_num_examples_file_path, 'w') as file:
file.write(str(num_examples_in_dataset))
return num_examples_in_dataset
def load_or_build(self):
self.vocabs = Code2VecVocabs(self.config)
self._load_or_create_inner_model()
def save(self, model_save_path=None):
if model_save_path is None:
model_save_path = self.config.MODEL_SAVE_PATH
model_save_dir = '/'.join(model_save_path.split('/')[:-1])
if not os.path.isdir(model_save_dir):
os.makedirs(model_save_dir, exist_ok=True)
self.vocabs.save(self.config.get_vocabularies_path_from_model_path(model_save_path))
self._save_inner_model(model_save_path)
def _write_code_vectors(self, file, code_vectors):
for vec in code_vectors:
file.write(' '.join(map(str, vec)) + '\n')
def _get_attention_weight_per_context(
self, path_source_strings: Iterable[str], path_strings: Iterable[str], path_target_strings: Iterable[str],
attention_weights: Iterable[float]) -> Dict[Tuple[str, str, str], float]:
attention_weights = np.squeeze(attention_weights, axis=-1) # (max_contexts, )
attention_per_context: Dict[Tuple[str, str, str], float] = {}
# shape of path_source_strings, path_strings, path_target_strings, attention_weights is (max_contexts, )
# iterate over contexts
for path_source, path, path_target, weight in \
zip(path_source_strings, path_strings, path_target_strings, attention_weights):
string_context_triplet = (common.binary_to_string(path_source),
common.binary_to_string(path),
common.binary_to_string(path_target))
attention_per_context[string_context_triplet] = weight
return attention_per_context
def close_session(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
@abc.abstractmethod
def train(self):
...
@abc.abstractmethod
def evaluate(self) -> Optional[ModelEvaluationResults]:
...
@abc.abstractmethod
def predict(self, predict_data_lines: Iterable[str]) -> List[ModelPredictionResults]:
...
@abc.abstractmethod
def _save_inner_model(self, path):
...
def _load_or_create_inner_model(self):
if self.config.is_loading:
self._load_inner_model()
else:
self._create_inner_model()
@abc.abstractmethod
def _load_inner_model(self):
...
def _create_inner_model(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
def _initialize(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
@abc.abstractmethod
def _get_vocab_embedding_as_np_array(self, vocab_type: VocabType) -> np.ndarray:
...
def save_word2vec_format(self, dest_save_path: str, vocab_type: VocabType):
if vocab_type not in VocabType:
raise ValueError('`vocab_type` should be `VocabType.Token`, `VocabType.Target` or `VocabType.Path`.')
vocab_embedding_matrix = self._get_vocab_embedding_as_np_array(vocab_type)
index_to_word = self.vocabs.get(vocab_type).index_to_word
with open(dest_save_path, 'w') as words_file:
common.save_word2vec_file(words_file, index_to_word, vocab_embedding_matrix)
| nilq/baby-python | python |
from tkinter import *
from tkinter import filedialog
from tkinter.constants import *
import platform
import os
import re
class Window(Frame):
desktop_path = os.path.expanduser("~/Desktop")
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.file = None
self.init_window()
def init_window(self):
self.master.title("GUI")
self.pack()
# create Menu instance
menu = Menu(self.master)
self.master.config(menu=menu)
self.init_file_menu(menu)
self.init_edit_menu(menu)
# main text field
self.main_text_field = Text(self.master)
self.main_text_field.config()
self.main_text_field.pack(fill=BOTH, expand=1)
def init_file_menu(self, menu_instance):
# add upper_menu (open, save etc)
file_menu = Menu(menu_instance)
file_menu.add_command(label="Open", command=self.open_file)
file_menu.add_command(label="Save", command=self.save_file)
file_menu.add_command(label="Exit", command=self.quit_client)
menu_instance.add_cascade(label="File", menu=file_menu)
def init_edit_menu(self, menu_instance):
# add Edit menu
edit = Menu(menu_instance)
edit.add_command(label="Undo", command=self.undo_changes)
edit.add_command(label="Redo", command=self.redo_changes)
menu_instance.add_cascade(label="Edit", menu=edit)
def open_file(self):
print("Open file!")
self.file = filedialog.askopenfilename(initialdir = self.desktop_path, title = "Select file", filetypes = (("TXT files","*.txt"),("all files","*.*")))
if(self.file is not None):
self.main_text_field.insert(END, self.read_file(self.file))
def read_file(self, filename):
f = open(filename)
text = f.read()
return text
def save_file(self):
print("Save file!")
def undo_changes(self):
print("Undo changes!")
def redo_changes(self):
print("Redo changes!")
def quit_client(self):
exit()
root = Tk()
root.grid_columnconfigure(0, weight=1)
if(platform.system() != 'Linux'):
root.attributes("-fullscreen", True)
else:
root.attributes("-zoomed", True)
app = Window(root)
root.mainloop() | nilq/baby-python | python |
import heroku3
from config import Config
client = heroku3.from_key(Config.HEROKU_API_KEY)
class HerokuHelper:
def __init__(self,appName,apiKey):
self.API_KEY = apiKey
self.APP_NAME = appName
self.client = self.getClient()
self.app = self.client.apps()[self.APP_NAME]
def getClient(self):
return heroku3.from_key(self.API_KEY)
def getAccount(self):
return self.client.account()
def getLog(self):
return self.app.get_log()
def addEnvVar(self,key,value):
self.app.config()[key] = value
def restart(self):
return self.app.restart()
| nilq/baby-python | python |
from django.apps import AppConfig
class LoverRecorderConfig(AppConfig):
name = 'lover_recorder'
| nilq/baby-python | python |
import numpy
SCENARIO_VERSION = '2020a' # default scenario version for writing scenario files
SUPPORTED_COMMONROAD_VERSIONS = {'2018b', '2020a'} # supported version for reading scenario files
TWO_PI = 2.0 * numpy.pi
| nilq/baby-python | python |
import random
print("Hi, please enter your name")
name = input() #input 1
secretNumber = random.randint(1, 50)
print(name + ' Guess the number between 1 & 50', '\nYou have 4 tries')
attempts = 0
for attempts in range(1, 5):
print('Take a guess')
while True:
try:
guess = int(input())
break
except ValueError:
print('Please Enter a Number')
continue
if guess < secretNumber:
print('Too Low, you have ' + str(4 - attempts) + ' attempts remaining')
elif guess > secretNumber:
print('Too High, you have ' + str(4 - attempts) + ' attempts remaining')
else:
break
if guess == secretNumber:
print('Well Done ' + name)
else:
print('Too Many Attempts ' + str(attempts) + ', It was ' + str(secretNumber))
play = False
# add something extra | nilq/baby-python | python |
class PrettyEnv(RenderBasic):
def __init__( ):
def getBestEnv
def getEnvList
| nilq/baby-python | python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""__init__"""
from .equal import equal_manual, equal_auto
from .greater_equal import greater_equal_manual, greater_equal_auto
from .less_equal import less_equal_manual, less_equal_auto
from .cast import cast_manual, cast_auto
from .tile import tile_manual, tile_auto
from .one_hot import one_hot_manual, one_hot_auto
from .sqrt import sqrt_manual, sqrt_auto
from .sub import sub_manual, sub_auto
from .add import add_manual, add_auto
from .addn import addn_manual, addn_auto
from .rsqrt import rsqrt_manual, rsqrt_auto
from .expand_dims import expand_dims_manual, expand_dims_auto
from .batch_matmul import batch_matmul_manual, batch_matmul_auto
from .mul import mul_manual, mul_auto
from .exp import exp_manual, exp_auto
from .divide import divide_manual, divide_auto
from .maximum import maximum_manual, maximum_auto
from .minimum import minimum_manual, minimum_auto
from .reshape import reshape_manual, reshape_auto
from .trans_data import trans_data_manual, trans_data_auto
from .log import log_manual, log_auto
from .pow import pow_manual, pow_auto
from .reduce_sum import reduce_sum_manual, reduce_sum_auto
from .abs import abs_manual, abs_auto
from .neg import neg_manual, neg_auto
from .round import round_manual, round_auto
from .select import select_manual, select_auto
from .reciprocal import reciprocal_manual, reciprocal_auto
from .reduce_min import reduce_min_manual, reduce_min_auto
from .reduce_max import reduce_max_manual, reduce_max_auto
from .pad import pad_manual, pad_auto
from .resize import resize_manual, resize_auto
from .resize_nearest_neighbor_grad import resize_nearest_neighbor_grad_manual, resize_nearest_neighbor_grad_auto
from .fused_pad import fused_pad_manual, fused_pad_auto
from .fused_bn_reduce import fused_bn_reduce_manual, fused_bn_reduce_auto
from .fused_bn_update import fused_bn_update_manual, fused_bn_update_auto
from .fused_bn_follow_relu import fused_bn_follow_relu_manual, fused_bn_follow_relu_auto
from .fused_bn_follow_relu_avgpool import fused_bn_follow_relu_avgpool_manual, fused_bn_follow_relu_avgpool_auto
from .fused_bn_double_follow_relu import fused_bn_double_follow_relu_manual, fused_bn_double_follow_relu_auto
from .fused_bn_reduce_grad import fused_bn_reduce_grad_manual, fused_bn_reduce_grad_auto
from .fused_relu_grad_bn_reduce_grad import fused_relu_grad_bn_reduce_grad_manual, fused_relu_grad_bn_reduce_grad_auto
from .fused_relu_grad_bn_double_reduce_grad import fused_relu_grad_bn_double_reduce_grad_manual, fused_relu_grad_bn_double_reduce_grad_auto
from .fused_l2loss_grad import fused_l2loss_grad_manual, fused_l2loss_grad_auto
from .fused_is_finite import fused_is_finite_manual, fused_is_finite_auto
from .fused_relu_grad_bn_update_grad import fused_relu_grad_bn_update_grad_manual, fused_relu_grad_bn_update_grad_auto
from .fused_relu_grad_bn_double_update_grad import fused_relu_grad_bn_double_update_grad_manual, fused_relu_grad_bn_double_update_grad_auto
from .fused_relu_grad import fused_relu_grad_manual, fused_relu_grad_auto
from .fused_bn_update_grad import fused_bn_update_grad_manual, fused_bn_update_grad_auto
from .fused_mul_div_rsqrt_mul_isfinite_red import fused_mul_div_rsqrt_mul_isfinite_red_manual, fused_mul_div_rsqrt_mul_isfinite_red_auto
| nilq/baby-python | python |
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: globalids.py
#
# Tests: libsim - connecting to simulation and retrieving data from it.
# mesh - 3D unstructured mesh.
# global node and cell ids
# unstructured ghost cell generation from global ids
#
# Programmer: Brad Whitlock
# Date: Tue Jun 17 16:32:51 PDT 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
def step(sim):
sim.consolecommand("step")
# Read from stderr to look for the echoed command. Sync.
keepGoing = True
while keepGoing:
buf = sim.p.stderr.readline()
print buf
if "Command step" in buf:
keepGoing = False
def set_the_view():
v = GetView3D()
v.viewNormal = (-0.707418, 0.404282, 0.579755)
v.focus = (0.5, 0.5, 0.5)
v.viewUp = (0.294715, 0.914272, -0.27794)
v.viewAngle = 30
v.parallelScale = 0.866025
v.nearPlane = -1.73205
v.farPlane = 1.73205
v.imagePan = (0, 0.0589478)
v.imageZoom = 1.0963
v.perspective = 1
v.eyeAngle = 2
v.centerOfRotationSet = 0
v.centerOfRotation = (0.5, 0.5, 0.5)
v.axis3DScaleFlag = 0
v.axis3DScales = (1, 1, 1)
v.shear = (0, 0, 1)
v.windowValid = 1
SetView3D(v)
def set_the_view2():
v = GetView3D()
v.viewNormal = (-0.542717, -0.70433, 0.457578)
v.focus = (0.5, 0.5, 0.5)
v.viewUp = (0.252732, 0.3826, 0.888675)
v.viewAngle = 30
v.parallelScale = 0.722842
v.nearPlane = -1.44568
v.farPlane = 1.44568
v.imagePan = (-0.00135472, 0.013532)
v.imageZoom = 1.12868
v.perspective = 1
v.eyeAngle = 2
v.centerOfRotationSet = 0
v.centerOfRotation = (0.5, 0.5, 0.5)
v.axis3DScaleFlag = 0
v.axis3DScales = (1, 1, 1)
v.shear = (0, 0, 1)
v.windowValid = 1
SetView3D(v)
def add_time(times):
Query("Time")
t2 = times + str(GetQueryOutputValue()) + "\n"
return t2
def start_time():
return add_time("Times:\n")
def test0(sim):
TestSection("Topologically 3D meshes in 3D")
DefineScalarExpression("gnid", "global_nodeid(mesh)")
DefineScalarExpression("gzid", "global_zoneid(mesh)")
DefineScalarExpression("nid", "nodeid(mesh)")
AddPlot("Pseudocolor", "nid")
DrawPlots()
set_the_view()
Test("globalids02")
ChangeActivePlotsVar("gzid")
DrawPlots()
Test("globalids03")
ChangeActivePlotsVar("gnid")
DrawPlots()
Test("globalids04")
DeleteAllPlots()
AddPlot("Subset", "Domains")
DrawPlots()
Test("globalids05")
# Make sure that the plot is hollow
s = SubsetAttributes(1)
s_clear = SubsetAttributes(1)
s_clear.opacity = 0.25
s_clear.colorType = s_clear.ColorBySingleColor
s_clear.singleColor = (200,200,200,255)
SetPlotOptions(s_clear)
DrawPlots()
Test("globalids06")
# Advance some steps and make sure that the plot
# stays transparent. We're changing the size of the
# domains at each time step and thus the global ids.
times = start_time()
idx = 7
for i in xrange(3):
# Advance some steps. This should make the plots update.
nsteps = 5
for j in xrange(nsteps):
step(sim)
DrawPlots()
times = add_time(times)
SetPlotOptions(s)
Test("globalids%02d" % idx)
idx = idx + 1
SetPlotOptions(s_clear)
Test("globalids%02d" % idx)
idx = idx + 1
TestText("globalids%02d" % idx, times)
idx = idx + 1
DeleteAllPlots()
def hideplot(id):
pl = GetPlotList()
if pl.GetPlots(id).hiddenFlag == 0:
SetActivePlots(id)
HideActivePlots()
def showplot(id):
pl = GetPlotList()
if pl.GetPlots(id).hiddenFlag == 1:
SetActivePlots(id)
HideActivePlots()
def test1(sim):
TestSection("Topologically 2D meshes in 3D")
DefineScalarExpression("gnid2d", "global_nodeid(surface)")
DefineScalarExpression("gzid2d", "global_zoneid(surface)")
DefineScalarExpression("nid2d", "nodeid(surface)")
AddPlot("FilledBoundary", "surfacemat")
fb = FilledBoundaryAttributes(1)
fb.colorType = fb.ColorBySingleColor
fb.singleColor = (0,0,0,255)
fb.wireframe = 1
fb.lineWidth = 3
SetPlotOptions(fb)
AddPlot("Subset", "Domains(surface)")
AddPlot("Pseudocolor", "nid2d")
DrawPlots()
set_the_view2()
idx = 0
times = start_time()
ntests = 4
for i in xrange(3):
ids = [idx+j for j in range(ntests)]
# Show the Subset plot
showplot(1)
hideplot(2)
Test("globalids_1_%02d" % ids[0])
# Show the Pseudocolor plot
hideplot(1)
showplot(2)
ChangeActivePlotsVar("nid2d")
Test("globalids_1_%02d" % ids[1])
ChangeActivePlotsVar("gnid2d")
Test("globalids_1_%02d" % ids[2])
ChangeActivePlotsVar("gzid2d")
Test("globalids_1_%02d" % ids[3])
SetActivePlots(0)
times = add_time(times)
# Take a step.
showplot(1)
step(sim)
idx = idx + ntests
TestText("globalids_1_%02d" % idx, times)
DeleteAllPlots()
def main():
# Create our simulation object.
sim = TestSimulation("globalids", "globalids.sim2")
sim.addargument("-echo")
# Test that we can start and connect to the simulation.
started, connected = TestSimStartAndConnect("globalids00", sim)
# Perform our tests.
if connected:
# Make sure the metadata is right.
TestSimMetaData("globalids01", sim.metadata())
test0(sim)
test1(sim)
# Close down the simulation.
if started:
sim.endsim()
main()
Exit()
| nilq/baby-python | python |
import sys
import typing
def equation(a: int, b: int, c: int) -> typing.Tuple[int, int]:
x1 = (-1*b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
x2 = (-1*b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
return int(x1), int(x2)
def test() -> None:
assert equation(1, -3, -4) == (4, -1)
assert equation(13, 236, -396) == (1, -19)
assert equation(23, -116, 96) == (4, 1)
if __name__ == '__main__':
test()
args: typing.List[int] = []
if len(sys.argv) >= 4:
[print(x) for x in equation(int(sys.argv[1]),
int(sys.argv[2]),
int(sys.argv[3]))]
| nilq/baby-python | python |
#
# PySNMP MIB module CISCO-MOBILITY-TAP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MOBILITY-TAP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:07:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
cTap2StreamIndex, cTap2MediationContentId = mibBuilder.importSymbols("CISCO-TAP2-MIB", "cTap2StreamIndex", "cTap2MediationContentId")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Unsigned32, iso, TimeTicks, ModuleIdentity, Counter32, IpAddress, NotificationType, ObjectIdentity, MibIdentifier, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "TimeTicks", "ModuleIdentity", "Counter32", "IpAddress", "NotificationType", "ObjectIdentity", "MibIdentifier", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "Gauge32")
TruthValue, TextualConvention, StorageType, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "StorageType", "DisplayString", "RowStatus")
ciscoMobilityTapMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 672))
ciscoMobilityTapMIB.setRevisions(('2010-06-16 00:00', '2010-04-15 00:00', '2008-08-05 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setRevisionsDescriptions(('Added a new textual convention: CmtapLawfulInterceptID. Added following three objects to cmtapStreamTable. cmtapStreamLIIdentifier. cmtapStreamLocationInfo. cmtapStreamInterceptType. Added the following new MODULE-COMPLIANCE. ciscoMobilityTapMIBComplianceRev01. Added the following new OBJECT-GROUP. ciscoMobilityTapStreamGroupSup1.', "Added enumeration 'servedMdn' for mtapStreamCapabilities object and CmtapSubscriberIDType.", 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setLastUpdated('201006160000Z')
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setContactInfo('Cisco Systems Customer Service Postal:170 W. Tasman Drive San Jose, CA 95134 USA Tel:+1 800 553-NETS E-mail:cs-li@cisco.com')
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setDescription("This module manages Cisco's intercept feature for Mobility Gateway Products. This MIB is used along with CISCO-TAP2-MIB MIB to intercept Mobility Gateway traffic. CISCO-TAP2-MIB MIB along with specific filter MIBs like this MIB replace the CISCO-TAP-MIB MIB. To create a Mobility intercept, an entry cmtapStreamEntry is created which contains the filter details. An entry cTap2StreamEntry of CISCO-TAP2-MIB is created which is the common stream information for all kinds of intercepts and type of the specific stream is set to 'mobility' in this entry.")
ciscoMobilityTapMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 0))
ciscoMobilityTapMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 1))
ciscoMobilityTapMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 2))
cmtapStreamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1))
class CmtapLawfulInterceptID(TextualConvention, OctetString):
description = 'An octet string containing the Lawful Intercept Identifier (LIID)assigned to the intercepted target by a law enforcement agency defined by Communications Assistance for Law Enforcement Act (CALEA).'
status = 'current'
displayHint = '256a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 256)
class CmtapSubscriberIDType(TextualConvention, Integer32):
description = "A value that represents the type of address that is used to identify a subscriber. The following types are currently supported: unknown: The Subscriber's identifier type is not known. msid: A Mobile Subscriber Identity (MSID). imsi: An International Mobile Subscriber Identity(IMSI) number. nai: A Network Access Identifier (NAI). esn: An Electronic Serial Number (ESN). servedMdn: Served Mdn(mobile directory number) is a vendor specific attribute. It is similar to the class IETF attribute. Refer to RFC 2865 for vendor specific attribute format. Example:dsg-mdn."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("unknown", 1), ("msid", 2), ("imsi", 3), ("nai", 4), ("esn", 5), ("servedMdn", 6))
class CmtapSubscriberID(TextualConvention, OctetString):
description = "An octet string containing a subscriber's identification, preferably in human-readable form. A CmtapStreamSubscriberID value is always interpreted within the context of an CmtapStreamSubscriberIDType value. Every usage of the CmtapStreamSubscriberID textual convention is required to specify the identity that corresponds to a CmtapStreamSubscriberIDType object."
status = 'current'
displayHint = '256a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 256)
cmtapStreamCapabilities = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 1), Bits().clone(namedValues=NamedValues(("tapEnable", 0), ("interface", 1), ("calledSubscriberID", 2), ("nonvolatileStorage", 3), ("msid", 4), ("imsi", 5), ("nai", 6), ("esn", 7), ("servedMdn", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cmtapStreamCapabilities.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamCapabilities.setDescription("This object indicates the Mobility Gateway intercept features that are implemented by this device and are manageable through this MIB. tapEnable: set if table entries with cTap2StreamInterceptEnable set to 'false' are used to pre-screen packets for intercept; otherwise these entries are ignored. interface: SNMP ifIndex Value may be used to select interception of all data crossing an interface or set of interfaces. nonvolatileStorage: The cmTapStreamTable supports the ability to store rows in nonvolatile memory. calledSubscriberID: The cmtapStreamCalledSubscriberID can be used to specify intercepts. Otherwise, this field is disabled. msid: A Mobile Subscriber Identity (MSID) can be used in the ID strings to specify intercepts. imsi: An International Mobile Subscriber Identity (IMSI) number can be used ID strings to specify intercepts. nai: A Network Access Identifier (NAI) can be used in the ID strings to specify intercepts. esn: An Electronic Serial Number (ESN) can be used in the ID strings to specify intercepts. servedMdn: Vendor specific attribute Served-Mobile Directory Number(MDN) can be used in the ID strings to specify intercepts.")
cmtapStreamTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2), )
if mibBuilder.loadTexts: cmtapStreamTable.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamTable.setDescription('The Mobility Stream Table lists the data streams to be intercepted. The same data stream may be required by multiple taps. This essentially provides options for packet selection, only some of which might be used. For example, if all the traffic to or from a subscriber is to be intercepted, one would configure an entry listing SubscriberID along with the SubscriberIDType corresponding to the stream that one wishes to intercept. The first index indicates which Mediation Device the intercepted traffic will be diverted to. The second index, which indicates the specific intercept stream, permits multiple classifiers to be used together. For example, an IP stream and a Mobility stream could both be listed in their respective tables, yet still correspond to the same Mediation Device entry. Entries are added to this table via cmtapStreamStatus in accordance with the RowStatus convention.')
cmtapStreamEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-TAP2-MIB", "cTap2MediationContentId"), (0, "CISCO-TAP2-MIB", "cTap2StreamIndex"))
if mibBuilder.loadTexts: cmtapStreamEntry.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamEntry.setDescription('A stream entry indicates a single data stream to be intercepted to a Mediation Device. Many selected data streams may go to the same application interface and many application interfaces are supported.')
cmtapStreamCalledSubscriberIDType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 1), CmtapSubscriberIDType().clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberIDType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberIDType.setDescription('Identifies the type of address that is stored in the cmtapStreamCalledSubscriberID string.')
cmtapStreamCalledSubscriberID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 2), CmtapSubscriberID()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberID.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberID.setDescription('A string used to identify the party being contacted. The type of this identification is determined by the cmtapStreamCalledSubscriberIDType object.')
cmtapStreamSubscriberIDType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 3), CmtapSubscriberIDType().clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamSubscriberIDType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamSubscriberIDType.setDescription('Identifies the type of address that is stored in the cmtapStreamSubscriberID string.')
cmtapStreamSubscriberID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 4), CmtapSubscriberID()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamSubscriberID.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamSubscriberID.setDescription('A string used to identify the subscriber to tap. The type of this indentification is determined by the cmtapStreamSubscriberIDType object.')
cmtapStreamStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 5), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamStorageType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamStorageType.setDescription("This object specifies the storage type of this conceptual row. If it is set to 'nonVolatile', this entry can be saved into non-volatile memory.")
cmtapStreamStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamStatus.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamStatus.setDescription("The status of this conceptual row. This object manages creation, modification, and deletion of rows in this table. When any field must be changed, cmtapStreamStatus must be first set to 'notInService'.")
cmtapStreamLIIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 7), CmtapLawfulInterceptID().clone('not set')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamLIIdentifier.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamLIIdentifier.setDescription('This object is an identifier assigned by a Law Enforcement Agency (LEA) to facilitate LI operations as defined in 3GPP TS 33.108 v8.7.0 standards document.')
cmtapStreamLocationInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 8), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamLocationInfo.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamLocationInfo.setDescription('This object indicates, if the userLocationInfo object should be included in the Intercept Related Information (IRI) messages sent by the gateway to mediation gateway(s) for interception taps. The userLocationInfo is defined as part of the IRI messages in 3GPP 33.108 v8.7.0 standards document.')
cmtapStreamInterceptType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ccOnly", 1), ("iriOnly", 2), ("both", 3))).clone('both')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamInterceptType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamInterceptType.setDescription('This object indicates the intercept type of the tapped stream. The tap can be provisioned to intercept control messages (IRI) from the tapped stream, the payload (CC) messages from the tapped stream or both. The format of these messages in defined in 3GPP TS 33.108 v8.7.0 standards document. ccOnly(1) - Content of communication interception only. iriOnly(2) - Intercept Related Information only. both(3) - Intercept both: CC and IRI.')
ciscoMobilityTapMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 1))
ciscoMobilityTapMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2))
ciscoMobilityTapMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 1, 1)).setObjects(("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapCapabilityGroup"), ("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapStreamGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapMIBCompliance = ciscoMobilityTapMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoMobilityTapMIBCompliance.setDescription('The compliance statement for entities which implement the Cisco Intercept MIB for Mobility Gateways')
ciscoMobilityTapMIBComplianceRev01 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 1, 2)).setObjects(("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapCapabilityGroup"), ("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapStreamGroup"), ("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapStreamGroupSup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapMIBComplianceRev01 = ciscoMobilityTapMIBComplianceRev01.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapMIBComplianceRev01.setDescription('The compliance statement for entities which implement the Cisco Intercept MIB for Mobility Gateways.')
ciscoMobilityTapCapabilityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2, 1)).setObjects(("CISCO-MOBILITY-TAP-MIB", "cmtapStreamCapabilities"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapCapabilityGroup = ciscoMobilityTapCapabilityGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapCapabilityGroup.setDescription('A collection of objects which provide Mobility Gateway capabilities for the system.')
ciscoMobilityTapStreamGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2, 2)).setObjects(("CISCO-MOBILITY-TAP-MIB", "cmtapStreamCalledSubscriberIDType"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamCalledSubscriberID"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamSubscriberIDType"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamSubscriberID"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamStorageType"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapStreamGroup = ciscoMobilityTapStreamGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapStreamGroup.setDescription('A collection of objects which provide information about the stream from which we wish to intercept packets.')
ciscoMobilityTapStreamGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2, 3)).setObjects(("CISCO-MOBILITY-TAP-MIB", "cmtapStreamLIIdentifier"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamLocationInfo"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamInterceptType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapStreamGroupSup1 = ciscoMobilityTapStreamGroupSup1.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapStreamGroupSup1.setDescription('A collection of objects which provide additional information about the stream from which we wish to intercept packets.')
mibBuilder.exportSymbols("CISCO-MOBILITY-TAP-MIB", cmtapStreamInterceptType=cmtapStreamInterceptType, ciscoMobilityTapMIBConform=ciscoMobilityTapMIBConform, CmtapLawfulInterceptID=CmtapLawfulInterceptID, cmtapStreamStorageType=cmtapStreamStorageType, cmtapStreamGroup=cmtapStreamGroup, cmtapStreamCalledSubscriberIDType=cmtapStreamCalledSubscriberIDType, ciscoMobilityTapMIBNotifs=ciscoMobilityTapMIBNotifs, cmtapStreamCalledSubscriberID=cmtapStreamCalledSubscriberID, CmtapSubscriberID=CmtapSubscriberID, ciscoMobilityTapMIBComplianceRev01=ciscoMobilityTapMIBComplianceRev01, cmtapStreamTable=cmtapStreamTable, cmtapStreamSubscriberID=cmtapStreamSubscriberID, cmtapStreamEntry=cmtapStreamEntry, PYSNMP_MODULE_ID=ciscoMobilityTapMIB, ciscoMobilityTapStreamGroupSup1=ciscoMobilityTapStreamGroupSup1, cmtapStreamLocationInfo=cmtapStreamLocationInfo, cmtapStreamLIIdentifier=cmtapStreamLIIdentifier, ciscoMobilityTapStreamGroup=ciscoMobilityTapStreamGroup, ciscoMobilityTapMIBGroups=ciscoMobilityTapMIBGroups, cmtapStreamSubscriberIDType=cmtapStreamSubscriberIDType, cmtapStreamStatus=cmtapStreamStatus, ciscoMobilityTapMIBCompliance=ciscoMobilityTapMIBCompliance, ciscoMobilityTapMIB=ciscoMobilityTapMIB, ciscoMobilityTapMIBCompliances=ciscoMobilityTapMIBCompliances, cmtapStreamCapabilities=cmtapStreamCapabilities, ciscoMobilityTapCapabilityGroup=ciscoMobilityTapCapabilityGroup, CmtapSubscriberIDType=CmtapSubscriberIDType, ciscoMobilityTapMIBObjects=ciscoMobilityTapMIBObjects)
| nilq/baby-python | python |
from conans import ConanFile, tools
class McapConan(ConanFile):
name = "mcap"
version = "0.0.1"
url = "https://github.com/foxglove/mcap"
homepage = "https://github.com/foxglove/mcap"
description = "A C++ implementation of the MCAP file format"
license = "Apache-2.0"
topics = ("mcap", "serialization", "deserialization", "recording")
settings = ("os", "compiler", "build_type", "arch")
requires = ("fmt/8.1.1", "lz4/1.9.3", "zstd/1.5.2")
generators = "cmake"
def validate(self):
tools.check_min_cppstd(self, "17")
def configure(self):
pass
def package(self):
self.copy(pattern="LICENSE", dst="licenses")
self.copy("include/*")
def package_id(self):
self.info.header_only()
| nilq/baby-python | python |
# -----------------------------------
# import
# -----------------------------------
from .basebox import FullBox
from heifpy.file import BinaryFileReader
# -----------------------------------
# define
# -----------------------------------
# -----------------------------------
# function
# -----------------------------------
# -----------------------------------
# class
# -----------------------------------
class MovieHeaderBox(FullBox):
"""
ISO/IEC 14496-12
Box Type: ‘mvhd’
Container: Movie Box (‘moov’)
Mandatory: Yes
Quantity: Exactly One
"""
def __init__(self):
super(MovieHeaderBox, self).__init__()
self.creation_time = 0
self.modification_time = 0
self.timescale = 0
self.duration = 0
self.rate = 0
self.volume = 0
self.matrix = 0
self.predefined = 0
self.next_track_ID = 0
def parse(self, reader: BinaryFileReader) -> None:
super(MovieHeaderBox, self).parse(reader)
if self.get_version() == 1:
self.creation_time = reader.read64()
self.modification_time = reader.read64()
self.timescale = reader.read32()
self.duration = reader.read64()
else:
self.creation_time = reader.read32()
self.modification_time = reader.read32()
self.timescale = reader.read32()
self.duration = reader.read32()
self.rate = reader.read32()
self.volume = reader.read16()
reader.read16() # reserved
reader.read32() # reserved
reader.read32() # reserved
self.matrix = []
for _ in range(9):
self.matrix.append(reader.read32())
self.predefined = []
for _ in range(6):
self.predefined.append(reader.read32())
self.next_track_ID = reader.read32()
assert self.read_complete(reader), f'{self.type} num bytes left not 0.'
def print_box(self) -> None:
super(MovieHeaderBox, self).print_box()
print("creation_time :", self.creation_time)
print("modification_time :", self.modification_time)
print("timescale :", self.timescale)
print("duration :", self.duration)
print("rate :", self.rate)
print("volume :", self.volume)
print("matrix :", self.matrix)
print("predefined :", self.predefined)
print("next_track_ID :", self.next_track_ID)
# -----------------------------------
# main
# -----------------------------------
if __name__ == '__main__':
pass
| nilq/baby-python | python |
import base64
def decode(data):
# adding extra = for padding if needed
pad = len(data) % 4
if pad > 0:
data += "=" * (4 - pad)
return base64.urlsafe_b64decode(data)
| nilq/baby-python | python |
import screendetect
import os
import sys
import time
import keyboard
import pyautogui
import playsound
import pydirectinput
def play():
pass
def start():
time.sleep(3)
pydirectinput.click(900, 550)
pydirectinput.click(1239, 957)
pydirectinput.click(670, 1018)
screendetect.wait_for_screen('loading', 0.9)
time.sleep(0.5)
playsound(os.getcwd() + '/media/sounds/xp.wav')
def loop():
keyboard.add_hotkey('q', sys.exit, args=(0))
start()
while True:
play()
if __name__ == '__main__':
loop() | nilq/baby-python | python |
#!/usr/bin/env python3
#
# Tool for upgrading/converting a db
# Requirements:
# 1) Databse Schema - schema for the new database you what to upgrade to
# 2) Config File - the config file that describes how to convert the db
#
# Notes:
# 1) Will attempt to convert the db defined in /etc/planetlab/plc_config
# 2) Does not automatically drop archived database. They must be removed
# manually
import sys
import os
import getopt
import pgdb
config = {}
config_file = "/etc/planetlab/plc_config"
exec(compile(open(config_file).read(), config_file, 'exec'), config)
upgrade_config_file = "plcdb.3-4.conf"
schema_file = "planetlab4.sql"
temp_dir = "/tmp"
def usage():
print("Usage: %s [OPTION] UPGRADE_CONFIG_FILE " % sys.argv[0])
print("Options:")
print(" -s, --schema=FILE Upgraded Database Schema")
print(" -t, --temp-dir=DIR Temp Directory")
print(" --help This message")
sys.exit(1)
try:
(opts, argv) = getopt.getopt(sys.argv[1:],
"s:d:",
["schema=",
"temp-dir=",
"help"])
except getopt.GetoptError as err:
print("Error: ", err.msg)
usage()
for (opt, optval) in opts:
if opt == "-s" or opt == "--schema":
schema_file = optval
elif opt == "-d" or opt == "--temp-dir":
temp_dir = optval
elif opt == "--help":
usage()
try:
upgrade_config_file = argv[0]
except IndexError:
print("Error: too few arguments")
usage()
schema = {}
inserts = []
schema_items_ordered = []
sequences = {}
temp_tables = {}
# load conf file for this upgrade
try:
upgrade_config = {}
exec(compile(open(upgrade_config_file).read(), upgrade_config_file, 'exec'), upgrade_config)
upgrade_config.pop('__builtins__')
db_version_previous = upgrade_config['DB_VERSION_PREVIOUS']
db_version_new = upgrade_config['DB_VERSION_NEW']
except IOError as fault:
print("Error: upgrade config file (%s) not found. Exiting" % \
(fault))
sys.exit(1)
except KeyError as fault:
print("Error: %s not set in upgrade confing (%s). Exiting" % \
(fault, upgrade_config_file))
sys.exit(1)
def connect():
db = pgdb.connect(user = config['PLC_DB_USER'],
database = config['PLC_DB_NAME'])
return db
def archive_db(database, archived_database):
archive_db = " dropdb -U postgres %s > /dev/null 2>&1;" \
" psql template1 postgres -qc " \
" 'ALTER DATABASE %s RENAME TO %s;';" % \
(archived_database, database, archived_database)
exit_status = os.system(archive_db)
if exit_status:
print("Error: unable to archive database. Upgrade failed")
sys.exit(1)
#print "Status: %s has been archived. now named %s" % (database, archived_database)
def encode_utf8(inputfile_name, outputfile_name):
# rewrite a iso-8859-1 encoded file in utf8
try:
inputfile = open(inputfile_name, 'r')
outputfile = open(outputfile_name, 'w')
for line in inputfile:
if line.upper().find('SET CLIENT_ENCODING') > -1:
continue
outputfile.write(str(line, 'iso-8859-1').encode('utf8'))
inputfile.close()
outputfile.close()
except:
print('error encoding file')
raise
def create_item_from_schema(item_name):
try:
(type, body_list) = schema[item_name]
exit_status = os.system('psql %s %s -qc "%s" > /dev/null 2>&1' % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'],"".join(body_list) ) )
if exit_status:
raise Exception
except Exception as fault:
print('Error: create %s failed. Check schema.' % item_name)
sys.exit(1)
raise fault
except KeyError:
print("Error: cannot create %s. definition not found in %s" % \
(key, schema_file))
return False
def fix_row(row, table_name, table_fields):
if table_name in ['interfaces']:
# convert str bwlimit to bps int
bwlimit_index = table_fields.index('bwlimit')
if isinstance(row[bwlimit_index], int):
pass
elif row[bwlimit_index].find('mbit') > -1:
row[bwlimit_index] = int(row[bwlimit_index].split('mbit')[0]) \
* 1000000
elif row[bwlimit_index].find('kbit') > -1:
row[bwlimit_index] = int(row[bwlimit_index].split('kbit')[0]) \
* 1000
elif table_name in ['slice_attribute']:
# modify some invalid foreign keys
attribute_type_index = table_fields.index('attribute_type_id')
if row[attribute_type_index] == 10004:
row[attribute_type_index] = 10016
elif row[attribute_type_index] == 10006:
row[attribute_type_index] = 10017
elif row[attribute_type_index] in [10031, 10033]:
row[attribute_type_index] = 10037
elif row[attribute_type_index] in [10034, 10035]:
row[attribute_type_index] = 10036
elif table_name in ['slice_attribute_types']:
type_id_index = table_fields.index('attribute_type_id')
if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]:
return None
return row
def fix_table(table, table_name, table_fields):
if table_name in ['slice_attribute_types']:
# remove duplicate/redundant primary keys
type_id_index = table_fields.index('attribute_type_id')
for row in table:
if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]:
table.remove(row)
return table
def remove_temp_tables():
# remove temp_tables
try:
for temp_table in temp_tables:
os.remove(temp_tables[temp_table])
except:
raise
def generate_temp_table(table_name, db):
cursor = db.cursor()
try:
# get upgrade directions
table_def = upgrade_config[table_name].replace('(', '').replace(')', '').split(',')
table_fields, old_fields, joins, wheres = [], [], set(), set()
for field in table_def:
field_parts = field.strip().split(':')
table_fields.append(field_parts[0])
old_fields.append(field_parts[1])
if field_parts[2:]:
joins.update(set([x for x in field_parts[2:] if not x.find('=') > -1]))
wheres.update(set([x for x in field_parts[2:] if x.find('=') > -1]))
# get indices of fields that cannot be null
(type, body_list) = schema[table_name]
not_null_indices = []
for field in table_fields:
for body_line in body_list:
if body_line.find(field) > -1 and \
body_line.upper().find("NOT NULL") > -1:
not_null_indices.append(table_fields.index(field))
# get index of primary key
primary_key_indices = []
for body_line in body_list:
if body_line.find("PRIMARY KEY") > -1:
primary_key = body_line
for field in table_fields:
if primary_key.find(" "+field+" ") > -1:
primary_key_indices.append(table_fields.index(field))
#break
# get old data
get_old_data = "SELECT DISTINCT %s FROM %s" % \
(", ".join(old_fields), old_fields[0].split(".")[0])
for join in joins:
get_old_data = get_old_data + " INNER JOIN %s USING (%s) " % \
(join.split('.')[0], join.split('.')[1])
if wheres:
get_old_data = get_old_data + " WHERE "
for where in wheres:
get_old_data = get_old_data + " %s" % where
cursor.execute(get_old_data)
rows = cursor.fetchall()
# write data to a temp file
temp_file_name = '%s/%s.tmp' % (temp_dir, table_name)
temp_file = open(temp_file_name, 'w')
for row in rows:
# attempt to make any necessary fixes to data
row = fix_row(row, table_name, table_fields)
# do not attempt to write null rows
if row == None:
continue
# do not attempt to write rows with null primary keys
if [x for x in primary_key_indices if row[x] == None]:
continue
for i in range(len(row)):
# convert nulls into something pg can understand
if row[i] == None:
if i in not_null_indices:
# XX doesnt work if column is int type
row[i] = ""
else:
row[i] = "\N"
if isinstance(row[i], int) or isinstance(row[i], float):
row[i] = str(row[i])
# escape whatever can mess up the data format
if isinstance(row[i], str):
row[i] = row[i].replace('\t', '\\t')
row[i] = row[i].replace('\n', '\\n')
row[i] = row[i].replace('\r', '\\r')
data_row = "\t".join(row)
temp_file.write(data_row + "\n")
temp_file.write("\.\n")
temp_file.close()
temp_tables[table_name] = temp_file_name
except KeyError:
#print "WARNING: cannot upgrade %s. upgrade def not found. skipping" % \
# (table_name)
return False
except IndexError as fault:
print("Error: error found in upgrade config file. " \
"check %s configuration. Aborting " % \
(table_name))
sys.exit(1)
except:
print("Error: configuration for %s doesnt match db schema. " \
" Aborting" % (table_name))
try:
db.rollback()
except:
pass
raise
# Connect to current db
db = connect()
cursor = db.cursor()
# determin current db version
try:
cursor.execute("SELECT relname from pg_class where relname = 'plc_db_version'")
rows = cursor.fetchall()
if not rows:
print("Warning: current db has no version. Unable to validate config file.")
else:
cursor.execute("SELECT version FROM plc_db_version")
rows = cursor.fetchall()
if not rows or not rows[0]:
print("Warning: current db has no version. Unable to validate config file.")
elif rows[0][0] == db_version_new:
print("Status: Versions are the same. No upgrade necessary.")
sys.exit()
elif not rows[0][0] == db_version_previous:
print("Stauts: DB_VERSION_PREVIOUS in config file (%s) does not" \
" match current db version %d" % (upgrade_config_file, rows[0][0]))
sys.exit()
else:
print("STATUS: attempting upgrade from %d to %d" % \
(db_version_previous, db_version_new))
# check db encoding
sql = " SELECT pg_catalog.pg_encoding_to_char(d.encoding)" \
" FROM pg_catalog.pg_database d " \
" WHERE d.datname = '%s' " % config['PLC_DB_NAME']
cursor.execute(sql)
rows = cursor.fetchall()
if rows[0][0] not in ['UTF8', 'UNICODE']:
print("WARNING: db encoding is not utf8. Attempting to encode")
db.close()
# generate db dump
dump_file = '%s/dump.sql' % (temp_dir)
dump_file_encoded = dump_file + ".utf8"
dump_cmd = 'pg_dump -i %s -U postgres -f %s > /dev/null 2>&1' % \
(config['PLC_DB_NAME'], dump_file)
if os.system(dump_cmd):
print("ERROR: during db dump. Exiting.")
sys.exit(1)
# encode dump to utf8
print("Status: encoding database dump")
encode_utf8(dump_file, dump_file_encoded)
# archive original db
archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_sqlascii_archived')
# create a utf8 database and upload encoded data
recreate_cmd = 'createdb -U postgres -E UTF8 %s > /dev/null; ' \
'psql -a -U %s %s < %s > /dev/null 2>&1;' % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], \
config['PLC_DB_NAME'], dump_file_encoded)
print("Status: recreating database as utf8")
if os.system(recreate_cmd):
print("Error: database encoding failed. Aborting")
sys.exit(1)
os.remove(dump_file_encoded)
os.remove(dump_file)
except:
raise
db = connect()
cursor = db.cursor()
# parse the schema user wishes to upgrade to
try:
file = open(schema_file, 'r')
index = 0
lines = file.readlines()
while index < len(lines):
line = lines[index]
if line.find("--") > -1:
line_parts = line.split("--")
line = line_parts[0]
# find all created objects
if line.startswith("CREATE"):
line_parts = line.split(" ")
if line_parts[1:3] == ['OR', 'REPLACE']:
line_parts = line_parts[2:]
item_type = line_parts[1]
item_name = line_parts[2]
schema_items_ordered.append(item_name)
if item_type in ['INDEX']:
schema[item_name] = (item_type, line)
# functions, tables, views span over multiple lines
# handle differently than indexes
elif item_type in ['AGGREGATE', 'TABLE', 'VIEW']:
fields = [line]
while index < len(lines):
index = index + 1
nextline =lines[index]
if nextline.find("--") > -1:
new_line_parts = nextline.split("--")
nextline = new_line_parts[0]
# look for any sequences
if item_type in ['TABLE'] and nextline.find('serial') > -1:
sequences[item_name] = nextline.strip().split()[0]
fields.append(nextline)
if nextline.find(";") >= 0:
break
schema[item_name] = (item_type, fields)
else:
print("Error: unknown type %s" % item_type)
elif line.startswith("INSERT"):
inserts.append(line)
index = index + 1
except:
raise
print("Status: generating temp tables")
# generate all temp tables
for key in schema_items_ordered:
(type, body_list) = schema[key]
if type == 'TABLE':
generate_temp_table(key, db)
# disconenct from current database and archive it
cursor.close()
db.close()
print("Status: archiving database")
archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived')
os.system('createdb -U postgres -E UTF8 %s > /dev/null; ' % config['PLC_DB_NAME'])
print("Status: upgrading database")
# attempt to create and load all items from schema into temp db
try:
for key in schema_items_ordered:
(type, body_list) = schema[key]
create_item_from_schema(key)
if type == 'TABLE':
if key in upgrade_config:
# attempt to populate with temp table data
table_def = upgrade_config[key].replace('(', '').replace(')', '').split(',')
table_fields = [field.strip().split(':')[0] for field in table_def]
insert_cmd = "psql %s %s -c " \
" 'COPY %s (%s) FROM stdin;' < %s " % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], key,
", ".join(table_fields), temp_tables[key] )
exit_status = os.system(insert_cmd)
if exit_status:
print("Error: upgrade %s failed" % key)
sys.exit(1)
# update the primary key sequence
if key in sequences:
sequence = key +"_"+ sequences[key] +"_seq"
update_seq = "psql %s %s -c " \
" \"select setval('%s', max(%s)) FROM %s;\" > /dev/null" % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], sequence,
sequences[key], key)
exit_status = os.system(update_seq)
if exit_status:
print("Error: sequence %s update failed" % sequence)
sys.exit(1)
else:
# check if there are any insert stmts in schema for this table
print("Warning: %s has no temp data file. Unable to populate with old data" % key)
for insert_stmt in inserts:
if insert_stmt.find(key) > -1:
insert_cmd = 'psql %s postgres -qc "%s;" > /dev/null 2>&1' % \
(config['PLC_DB_NAME'], insert_stmt)
os.system(insert_cmd)
except:
print("Error: failed to populate db. Unarchiving original database and aborting")
undo_command = "dropdb -U postgres %s > /dev/null; psql template1 postgres -qc" \
" 'ALTER DATABASE %s RENAME TO %s;'; > /dev/null" % \
(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived', config['PLC_DB_NAME'])
os.system(undo_command)
#remove_temp_tables()
raise
#remove_temp_tables()
print("upgrade complete")
| nilq/baby-python | python |
"""Support for user- and CDC-based flu info sensors from Flu Near You."""
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_STATE,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.core import callback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import CATEGORY_CDC_REPORT, CATEGORY_USER_REPORT, DATA_COORDINATOR, DOMAIN
ATTR_CITY = "city"
ATTR_REPORTED_DATE = "reported_date"
ATTR_REPORTED_LATITUDE = "reported_latitude"
ATTR_REPORTED_LONGITUDE = "reported_longitude"
ATTR_STATE_REPORTS_LAST_WEEK = "state_reports_last_week"
ATTR_STATE_REPORTS_THIS_WEEK = "state_reports_this_week"
ATTR_ZIP_CODE = "zip_code"
DEFAULT_ATTRIBUTION = "Data provided by Flu Near You"
SENSOR_TYPE_CDC_LEVEL = "level"
SENSOR_TYPE_CDC_LEVEL2 = "level2"
SENSOR_TYPE_USER_CHICK = "chick"
SENSOR_TYPE_USER_DENGUE = "dengue"
SENSOR_TYPE_USER_FLU = "flu"
SENSOR_TYPE_USER_LEPTO = "lepto"
SENSOR_TYPE_USER_NO_SYMPTOMS = "none"
SENSOR_TYPE_USER_SYMPTOMS = "symptoms"
SENSOR_TYPE_USER_TOTAL = "total"
CDC_SENSORS = [
(SENSOR_TYPE_CDC_LEVEL, "CDC Level", "mdi:biohazard", None),
(SENSOR_TYPE_CDC_LEVEL2, "CDC Level 2", "mdi:biohazard", None),
]
USER_SENSORS = [
(SENSOR_TYPE_USER_CHICK, "Avian Flu Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_DENGUE, "Dengue Fever Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_FLU, "Flu Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_LEPTO, "Leptospirosis Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_NO_SYMPTOMS, "No Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_SYMPTOMS, "Flu-like Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_TOTAL, "Total Symptoms", "mdi:alert", "reports"),
]
EXTENDED_SENSOR_TYPE_MAPPING = {
SENSOR_TYPE_USER_FLU: "ili",
SENSOR_TYPE_USER_NO_SYMPTOMS: "no_symptoms",
SENSOR_TYPE_USER_TOTAL: "total_surveys",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Flu Near You sensors based on a config entry."""
coordinators = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id]
sensors = []
for (sensor_type, name, icon, unit) in CDC_SENSORS:
sensors.append(
CdcSensor(
coordinators[CATEGORY_CDC_REPORT],
config_entry,
sensor_type,
name,
icon,
unit,
)
)
for (sensor_type, name, icon, unit) in USER_SENSORS:
sensors.append(
UserSensor(
coordinators[CATEGORY_USER_REPORT],
config_entry,
sensor_type,
name,
icon,
unit,
)
)
async_add_entities(sensors)
class FluNearYouSensor(CoordinatorEntity):
"""Define a base Flu Near You sensor."""
def __init__(self, coordinator, config_entry, sensor_type, name, icon, unit):
"""Initialize the sensor."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._config_entry = config_entry
self._icon = icon
self._name = name
self._sensor_type = sensor_type
self._state = None
self._unit = unit
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return (
f"{self._config_entry.data[CONF_LATITUDE]},"
f"{self._config_entry.data[CONF_LONGITUDE]}_{self._sensor_type}"
)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self.update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the sensor."""
raise NotImplementedError
class CdcSensor(FluNearYouSensor):
"""Define a sensor for CDC reports."""
@callback
def update_from_latest_data(self):
"""Update the sensor."""
self._attrs.update(
{
ATTR_REPORTED_DATE: self.coordinator.data["week_date"],
ATTR_STATE: self.coordinator.data["name"],
}
)
self._state = self.coordinator.data[self._sensor_type]
class UserSensor(FluNearYouSensor):
"""Define a sensor for user reports."""
@callback
def update_from_latest_data(self):
"""Update the sensor."""
self._attrs.update(
{
ATTR_CITY: self.coordinator.data["local"]["city"].split("(")[0],
ATTR_REPORTED_LATITUDE: self.coordinator.data["local"]["latitude"],
ATTR_REPORTED_LONGITUDE: self.coordinator.data["local"]["longitude"],
ATTR_STATE: self.coordinator.data["state"]["name"],
ATTR_ZIP_CODE: self.coordinator.data["local"]["zip"],
}
)
if self._sensor_type in self.coordinator.data["state"]["data"]:
states_key = self._sensor_type
elif self._sensor_type in EXTENDED_SENSOR_TYPE_MAPPING:
states_key = EXTENDED_SENSOR_TYPE_MAPPING[self._sensor_type]
self._attrs[ATTR_STATE_REPORTS_THIS_WEEK] = self.coordinator.data["state"][
"data"
][states_key]
self._attrs[ATTR_STATE_REPORTS_LAST_WEEK] = self.coordinator.data["state"][
"last_week_data"
][states_key]
if self._sensor_type == SENSOR_TYPE_USER_TOTAL:
self._state = sum(
v
for k, v in self.coordinator.data["local"].items()
if k
in (
SENSOR_TYPE_USER_CHICK,
SENSOR_TYPE_USER_DENGUE,
SENSOR_TYPE_USER_FLU,
SENSOR_TYPE_USER_LEPTO,
SENSOR_TYPE_USER_SYMPTOMS,
)
)
else:
self._state = self.coordinator.data["local"][self._sensor_type]
| nilq/baby-python | python |
# License: BSD 3 clause
import unittest
from tick.solver import SGD
from tick.solver.tests import TestSolver
class SGDTest(object):
def test_solver_sgd(self):
"""...Check SGD solver for Logistic Regression with Ridge
penalization
"""
solver = SGD(max_iter=100, verbose=False, seed=TestSolver.sto_seed,
step=200)
self.check_solver(solver, fit_intercept=True, model="logreg",
decimal=0)
def test_sgd_sparse_and_dense_consistency(self):
"""...SGDTest SGD can run all glm models and is consistent with sparsity
"""
def create_solver():
return SGD(max_iter=1, verbose=False, step=1e-5,
seed=TestSolver.sto_seed)
self._test_solver_sparse_and_dense_consistency(create_solver)
def test_sgd_dtype_can_change(self):
"""...Test sgd astype method
"""
def create_solver():
return SGD(max_iter=100, verbose=False, step=1e-1,
seed=TestSolver.sto_seed)
self._test_solver_astype_consistency(create_solver)
class SGDTestFloat32(TestSolver, SGDTest):
def __init__(self, *args, **kwargs):
TestSolver.__init__(self, *args, dtype="float32", **kwargs)
class SGDTestFloat64(TestSolver, SGDTest):
def __init__(self, *args, **kwargs):
TestSolver.__init__(self, *args, dtype="float64", **kwargs)
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
"""
Tests for the `kpal.kmer` module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future import standard_library
from future.builtins import str, zip
import itertools
from io import open, StringIO
from Bio import Seq
import numpy as np
from kpal import kmer
import utils
with standard_library.hooks():
from collections import Counter
class TestKmer(utils.TestEnvironment):
def test_main_info(self, capsys):
# For the `capsys` fixture, see:
# http://pytest.org/latest/capture.html
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.profile(counts, 8, 'a')
kmer.main(['info', filename])
out, err = capsys.readouterr()
expected = 'File format version: 1.0.0\n'
expected += 'Produced by: kMer unit tests\n\n'
expected += 'Profile: a\n'
expected += '- k-mer length: 8 (%d k-mers)\n' % (4**8)
expected += '- Zero counts: %i\n' % (4**8 - len(counts))
expected += '- Non-zero counts: %i\n' % len(counts)
expected += '- Sum of counts: %i\n' % sum(counts.values())
expected += '- Mean of counts: %.3f\n' % np.mean([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Median of counts: %.3f\n' % np.median([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Standard deviation of counts: %.3f\n' % np.std([0] * (4**8 - len(counts)) + list(counts.values()))
assert out == expected
def test_convert(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with open(self.profile_old_format(counts, 8)) as handle:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.convert([handle], profile_handle)
utils.test_profile_file(filename, counts, 8)
def test_cat(self):
counts_a = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_b = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_a, 8, name='a')) as handle_a:
with utils.open_profile(self.profile(counts_b, 8, name='b')) as handle_b:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.cat([handle_a, handle_b], profile_handle)
utils.test_profile_file(filename, counts_a, 8, name='a')
utils.test_profile_file(filename, counts_b, 8, name='b')
def test_cat_prefixes(self):
counts_a = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_b = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_a, 8, name='X')) as handle_a:
with utils.open_profile(self.profile(counts_b, 8, name='X')) as handle_b:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.cat([handle_a, handle_b], profile_handle, prefixes=['a_', 'b_'])
utils.test_profile_file(filename, counts_a, 8, name='a_X')
utils.test_profile_file(filename, counts_b, 8, name='b_X')
def test_count(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with open(self.fasta(utils.SEQUENCES)) as fasta_handle:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([fasta_handle], profile_handle, 8)
utils.test_profile_file(filename, counts, 8)
def test_count_multi(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with open(self.fasta(utils.SEQUENCES_LEFT)) as handle_left:
with open(self.fasta(utils.SEQUENCES_RIGHT)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([handle_left, handle_right], profile_handle, 8, names=['a', 'b'])
utils.test_profile_file(filename, counts_left, 8, name='a')
utils.test_profile_file(filename, counts_right, 8, name='b')
def test_count_by_record(self):
counts_by_record = [utils.counts(record, 8) for record in utils.SEQUENCES]
names = [str(i) for i, _ in enumerate(counts_by_record)]
filename = self.empty()
with open(self.fasta(utils.SEQUENCES, names=names)) as fasta_handle:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([fasta_handle], profile_handle, 8, by_record=True)
for name, counts in zip(names, counts_by_record):
utils.test_profile_file(filename, counts, 8, name=name)
def test_count_multi_by_record(self):
counts_by_record_left = [utils.counts(record, 8) for record in utils.SEQUENCES_LEFT]
counts_by_record_right = [utils.counts(record, 8) for record in utils.SEQUENCES_RIGHT]
names_left = [str(i) for i, _ in enumerate(counts_by_record_left)]
names_right = [str(i) for i, _ in enumerate(counts_by_record_right)]
filename = self.empty()
with open(self.fasta(utils.SEQUENCES_LEFT, names=names_left)) as handle_left:
with open(self.fasta(utils.SEQUENCES_RIGHT, names=names_right)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([handle_left, handle_right], profile_handle, 8, names=['a', 'b'], by_record=True)
for name, counts in zip(names_left, counts_by_record_left):
utils.test_profile_file(filename, counts, 8, name='a_' + name)
for name, counts in zip(names_right, counts_by_record_right):
utils.test_profile_file(filename, counts, 8, name='b_' + name)
def test_merge(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle)
utils.test_profile_file(filename, counts_left + counts_right, 8)
def test_merge_xor(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle, merger='xor')
counts_xor = counts_left + counts_right
for s in set(counts_left) & set(counts_right):
del counts_xor[s]
utils.test_profile_file(filename, counts_xor, 8)
def test_merge_custom_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle, custom_merger='(left + right) * np.logical_xor(left, right)')
counts_xor = counts_left + counts_right
for s in set(counts_left) & set(counts_right):
del counts_xor[s]
utils.test_profile_file(filename, counts_xor, 8)
def test_merge_custom_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle, custom_merger='numpy.multiply')
counts_mult = Counter(dict((s, counts_left[s] * counts_right[s])
for s in set(counts_left) & set(counts_right)))
utils.test_profile_file(filename, counts_mult, 8)
def test_balance(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
with utils.open_profile(filename, 'w') as output_handle:
kmer.balance(input_handle, output_handle)
counts.update(dict((utils.reverse_complement(s), c) for s, c in counts.items()))
utils.test_profile_file(filename, counts, 8)
def test_get_balance(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
kmer.get_balance(input_handle, out, precision=3)
assert out.getvalue() == '1 0.669\n'
def test_get_stats(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
kmer.get_stats(input_handle, out)
name, mean, std = out.getvalue().strip().split()
assert name == '1'
assert mean == '%.10f' % np.mean(utils.as_array(counts, 8))
assert std == '%.10f' % np.std(utils.as_array(counts, 8))
def test_distribution(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
kmer.distribution(input_handle, out)
counter = Counter(utils.as_array(counts, 8))
assert out.getvalue() == '\n'.join('1 %i %i' % x
for x in sorted(counter.items())) + '\n'
def test_info(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8, 'a')) as input_handle:
kmer.info(input_handle, out)
expected = 'File format version: 1.0.0\n'
expected += 'Produced by: kMer unit tests\n\n'
expected += 'Profile: a\n'
expected += '- k-mer length: 8 (%d k-mers)\n' % (4**8)
expected += '- Zero counts: %i\n' % (4**8 - len(counts))
expected += '- Non-zero counts: %i\n' % len(counts)
expected += '- Sum of counts: %i\n' % sum(counts.values())
expected += '- Mean of counts: %.3f\n' % np.mean([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Median of counts: %.3f\n' % np.median([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Standard deviation of counts: %.3f\n' % np.std([0] * (4**8 - len(counts)) + list(counts.values()))
assert out.getvalue() == expected
def test_get_count(self):
counts = utils.counts(utils.SEQUENCES, 8)
word, count = counts.most_common(1)[0]
out = StringIO()
with utils.open_profile(self.profile(counts, 8, 'a')) as input_handle:
kmer.get_count(input_handle, out, word)
assert out.getvalue() == 'a %d\n' % count
def test_positive(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.positive(handle_left, handle_right, out_left, out_right)
utils.test_profile_file(filename_left, Counter(s for s in counts_left.elements()
if s in counts_right), 8)
utils.test_profile_file(filename_right, Counter(s for s in counts_right.elements()
if s in counts_left), 8)
def test_scale(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.scale(handle_left, handle_right, out_left, out_right)
if sum(counts_left.values()) < sum(counts_right.values()):
scale_left = sum(counts_right.values()) / sum(counts_left.values())
scale_right = 1.0
else:
scale_left = 1.0
scale_right = sum(counts_left.values()) / sum(counts_right.values())
for s in counts_left:
counts_left[s] *= scale_left
for s in counts_right:
counts_right[s] *= scale_right
utils.test_profile_file(filename_left, counts_left, 8)
utils.test_profile_file(filename_right, counts_right, 8)
def test_shrink(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
with utils.open_profile(filename, 'w') as output_handle:
kmer.shrink(input_handle, output_handle, 1)
counts = Counter(dict((t, sum(counts[u] for u in counts
if u.startswith(t)))
for t in set(s[:-1] for s in counts)))
utils.test_profile_file(filename, counts, 7)
def test_shuffle(self):
# See test_klib.profile_shuffle
counts = utils.counts(utils.SEQUENCES, 2)
filename = self.empty()
with utils.open_profile(self.profile(counts, 2)) as input_handle:
with utils.open_profile(filename, 'w') as output_handle:
np.random.seed(100)
kmer.shuffle(input_handle, output_handle)
counts = dict(zip([''.join(s) for s in itertools.product('ACGT', repeat=2)],
[13, 7, 6, 18, 12, 1, 13, 17, 16, 12, 23, 27, 24, 17, 18, 12]))
utils.test_profile_file(filename, counts, 2)
def test_smooth(self):
# See test_kdistlib.test_ProfileDistance_dynamic_smooth
counts_left = Counter(['AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TG', 'TT'])
counts_right = Counter(['AC', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'])
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 2)) as handle_left:
with utils.open_profile(self.profile(counts_right, 2)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.smooth(handle_left, handle_right, out_left, out_right, summary='min')
counts_left = Counter(['AA', 'AA', 'AA', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TA', 'TA'])
counts_right = Counter(['AA', 'AA', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TA', 'TA', 'TA'])
utils.test_profile_file(filename_left, counts_left, 2)
utils.test_profile_file(filename_right, counts_right, 2)
def test_smooth_custom_expr(self):
# See test_kdistlib.test_ProfileDistance_dynamic_smooth
counts_left = Counter(['AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TG', 'TT'])
counts_right = Counter(['AC', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'])
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 2)) as handle_left:
with utils.open_profile(self.profile(counts_right, 2)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.smooth(handle_left, handle_right, out_left, out_right, custom_summary='np.max(values)')
def test_smooth_custom_name(self):
# See test_kdistlib.test_ProfileDistance_dynamic_smooth
counts_left = Counter(['AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TG', 'TT'])
counts_right = Counter(['AC', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'])
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 2)) as handle_left:
with utils.open_profile(self.profile(counts_right, 2)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.smooth(handle_left, handle_right, out_left, out_right, custom_summary='numpy.max')
def test_distance(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out)
assert out.getvalue() == 'left right %.10f\n' % 0.4626209323
def test_distance_smooth(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True, precision=3)
assert out.getvalue() == 'left right 0.077\n'
def test_distance_smooth_average(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True,
precision=3, summary='average')
assert out.getvalue() == 'left right 0.474\n'
def test_distance_smooth_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True,
precision=3, custom_summary='np.max(values)')
assert out.getvalue() == 'left right 0.474\n'
def test_distance_smooth_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True,
precision=3, custom_summary='numpy.max')
assert out.getvalue() == 'left right 0.474\n'
def test_distance_pairwise_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, precision=3,
custom_pairwise='abs(left - right) / (left + right + 1000)')
assert out.getvalue() == 'left right 0.001\n'
def test_distance_pairwise_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, precision=3,
custom_pairwise='numpy.multiply')
assert out.getvalue() == 'left right 0.084\n'
def test_distance_matrix(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, precision=3)
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.463', '0.000 0.463']
def test_distance_matrix_smooth(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True, precision=3)
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.077', '0.000 0.077']
def test_distance_matrix_smooth_average(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True,
summary='average', precision=3)
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.474', '0.000 0.474']
def test_distance_matrix_smooth_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True, precision=3,
custom_summary='np.max(values)')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.474', '0.000 0.474']
def test_distance_matrix_smooth_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True, precision=3,
custom_summary='numpy.max')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.474', '0.000 0.474']
def test_distance_matrix_pairwise_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, precision=3,
custom_pairwise='abs(left - right) / (left + right + 1000)')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.001', '0.000 0.001']
def test_distance_matrix_pairwise_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, precision=3,
custom_pairwise='numpy.multiply')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.084', '1.206 0.084']
| nilq/baby-python | python |
__author__ = 'wei'
__all__=["gt_req_pb2" ] | nilq/baby-python | python |
import os
import toml
from test_common import make_source_dic
from rcwa.tmm import tmm_
def make_layer_dic(epsilon, mu, thickness):
return {'epsilon': epsilon, 'mu': mu, 'thickness': thickness}
def test_benchmark():
'''Test case from Computational Electromagnetics Course Assignment by Raymond Rumpf'''
try:
os.remove('output.toml')
except FileNotFoundError:
pass
source_dic = make_source_dic(1, 57, 23, [1, 0], [0, 1])
superstrate_dic = {'mu': 1.2, 'epsilon': 1.4}
layer_1_dic = make_layer_dic(2, 1, 0.25)
layer_2_dic = make_layer_dic(1, 3, 0.5)
substrate_dic = {'mu': 1.6, 'epsilon': 1.8}
input_toml = {'layer': [layer_1_dic, layer_2_dic], 'source': source_dic,\
'superstrate': superstrate_dic, 'substrate': substrate_dic}
tmm_(input_toml)
output_toml = toml.load('output.toml')
assert output_toml['R']['00'] == 0.4403
assert output_toml['T']['00'] == 0.5597
assert output_toml['R_T']['00'] == 1
| nilq/baby-python | python |
from bs4 import BeautifulSoup
with open('cooking.html') as f:
body = f.read()
soup = BeautifulSoup(body, 'lxml')
def rows(soup):
item = soup.find(id='Recipes').find_next('table').tr
while item:
if item:
item = item.next_sibling
if item:
item = item.next_sibling
if item:
yield item
def counts(text):
start = 0
end = text.find(')', start)
while end != -1:
mid = text.find('(', start, end)
name = text[start:mid].strip().replace(u'\xa0', ' ')
count = int(text[mid+1:end])
yield name, count
start = end + 1
end = text.find(')', start)
def edges(item):
td = item.find_all('td')
name = td[1].text.strip()
for ingredient, count in counts(td[3].text):
yield name, ingredient, count
mappings = (
(u'\xa0', ' '),
(u' ', ';'),
(u'(', None),
(u')', None),
)
for item in rows(soup):
for a, b, c in edges(item):
print('{}\t{}\t{}'.format(a, b, c))
| nilq/baby-python | python |
from collections import OrderedDict
from flask import Flask
from werkzeug.wsgi import DispatcherMiddleware, SharedDataMiddleware
import config
from ext import sse
from views import home, json_api
def create_app():
app = Flask(__name__)
app.config.from_object(config)
app.register_blueprint(home.bp)
app.register_blueprint(sse, url_prefix='/stream')
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, OrderedDict((
('/j', json_api),
)))
app.add_url_rule('/uploads/<filename>', 'uploaded_file',
build_only=True)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/uploads': app.config['UPLOAD_FOLDER']
})
return app
app = create_app()
# For local test
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8100, debug=app.debug)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
import sys
import glob
import codecs
args = sys.argv
#FilePath
product_path_name = args[1]
grep_file_name = product_path_name + "\**\*.txt"
result_file_name = "ResultGrep.txt"
hit_word = "TODO"
#サブディレクトリまで対象にする
list_up = glob.glob(grep_file_name, recursive=True)
result_open = codecs.open(result_file_name, "w", "utf-8")
return_code = 0;
for path_name in list_up:
with open(path_name, encoding="utf8", errors='ignore') as f:
# ファイル読み込み
code = f.readlines()
# 終端の改行削除
code_cut_new_line = [line.strip() for line in code]
# 検索ワードにヒットした行を抽出
list_hit_line = [line for line in code_cut_new_line if hit_word in line]
# 該当項目があれば、ファイル名出力
if len(list_hit_line) != 0:
result_open.write(path_name.join("\r\n"))
return_code = 1
# 該当行を出力
for line in list_hit_line:
result_open.writelines(line)
result_open.writelines("\r\n")
result_open.close()
sys.exit(return_code)
| nilq/baby-python | python |
from osgeo import gdal
import os
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import remove_small_objects, watershed
import tqdm
def rlencode(x, dropna=False):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
x = np.asarray(x)
n = len(x)
if n == 0:
return (np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=x.dtype))
starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
if dropna:
mask = ~np.isnan(values)
starts, lengths, values = starts[mask], lengths[mask], values[mask]
return starts, lengths, values
def rldecode(starts, lengths, values, minlength=None):
"""
Decode a run-length encoding of a 1D array.
Parameters
----------
starts, lengths, values : 1D array_like
The run-length encoding.
minlength : int, optional
Minimum length of the output array.
Returns
-------
1D array. Missing data will be filled with NaNs.
"""
starts, lengths, values = map(np.asarray, (starts, lengths, values))
ends = starts + lengths
n = ends[-1]
if minlength is not None:
n = max(minlength, n)
x = np.full(n, np.nan)
for lo, hi, val in zip(starts, ends, values):
x[lo:hi] = val
return x
def rle_to_string(rle):
(starts, lengths, values) = rle
items = []
for i in range(len(starts)):
items.append(str(values[i]))
items.append(str(lengths[i]))
return ",".join(items)
def my_watershed(mask1, mask2):
markers = ndi.label(mask2, output=np.uint32)[0]
labels = watershed(mask1, markers, mask=mask1, watershed_line=True)
return labels
def make_submission(prediction_dir, data_dir, submission_file):
# 8881 - 0.3 / +0.4 / 100 / 120 test 8935
threshold = 0.3
f_submit = open(submission_file, "w")
strings = []
predictions = list(sorted(os.listdir(prediction_dir)))
for f in tqdm.tqdm(predictions):
if 'xml' in f:
continue
dsm_ds = gdal.Open(os.path.join(data_dir, f.replace('RGB', 'DSM')), gdal.GA_ReadOnly)
band_dsm = dsm_ds.GetRasterBand(1)
nodata = band_dsm.GetNoDataValue()
dsm = band_dsm.ReadAsArray()
tile_id = f.split('_RGB.tif')[0]
mask_ds = gdal.Open(os.path.join(prediction_dir, f))
mask_img = mask_ds.ReadAsArray()
mask_img[dsm==nodata] = 0
img_copy = np.copy(mask_img)
img_copy[mask_img <= threshold + 0.4] = 0
img_copy[mask_img > threshold + 0.4] = 1
img_copy = img_copy.astype(np.bool)
img_copy = remove_small_objects(img_copy, 100).astype(np.uint8)
mask_img[mask_img <= threshold] = 0
mask_img[mask_img > threshold] = 1
mask_img = mask_img.astype(np.bool)
mask_img = remove_small_objects(mask_img, 120).astype(np.uint8)
labeled_array = my_watershed(mask_img, img_copy)
# labeled_array = remove_on_boundary(labeled_array)
rle_str = rle_to_string(rlencode(labeled_array.flatten()))
s = "{tile_id}\n2048,2048\n{rle}\n".format(tile_id=tile_id, rle=rle_str)
strings.append(s)
f_submit.writelines(strings)
f_submit.close()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 17:39:25 2018
Good morning! Here's your coding interview problem for today.
This problem was asked by Amazon.
Given a N by M matrix of numbers, print out the matrix in a clockwise spiral.
For example, given the following matrix:
[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]]
You should print out the following:
1
2
3
4
5
10
15
20
19
18
17
16
11
6
7
8
9
14
13
12
"""
import numpy as np
from random import random_integers
x = np.random.random_integers(0, 20, (3,4))
def unroll(x) :
print(x)
A = []
while x.shape[0] * x.shape[1] > 0 :
#0deg
try:
A.extend(x[0,:])
x = x[1:,:]
except :
break
#90deg
try:
A.extend(x[:,-1])
x = x[:,:-1]
except :
break
#180deg
try:
A.extend(list(reversed(x[-1,:])))
x = x[:-1,:]
except :
break
#270deg
try:
A.extend(list(reversed(x[:,0])))
x = x[:,1:]
except :
break
return A
unroll(x)
'''
unroll(x)
[[ 7 1 20 18]
[ 0 8 3 13]
[14 11 13 10]]
Out[116]: [7, 1, 20, 18, 13, 10, 13, 11, 14, 0, 8, 3]
''' | nilq/baby-python | python |
from flask import request, render_template, redirect, flash, Blueprint, session, current_app
from ..config import CLIENT_ID, CALLBACK_URL
from bs4 import BeautifulSoup
import requests
import hashlib
import base64
import string
import random
auth = Blueprint('auth', __name__)
@auth.route("/callback")
def indieauth_callback():
code = request.args.get("code")
state = request.args.get("state")
if state != session.get("state"):
flash("Your authentication failed. Please try again.")
return redirect("/")
data = {
"code": code,
"redirect_uri": CALLBACK_URL,
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code_verifier": session["code_verifier"]
}
headers = {
"Accept": "application/json"
}
r = requests.post(session.get("token_endpoint"), data=data, headers=headers)
if r.status_code != 200:
flash("There was an error with your token endpoint server.")
return redirect("/login")
# remove code verifier from session because the authentication flow has finished
session.pop("code_verifier")
if r.json().get("me").strip("/") != current_app.config["ME"].strip("/"):
flash("Your domain is not allowed to access this website.")
return redirect("/login")
session["me"] = r.json().get("me")
session["access_token"] = r.json().get("access_token")
return redirect("/")
@auth.route("/logout")
def logout():
session.pop("me")
session.pop("access_token")
return redirect("/home")
@auth.route("/discover", methods=["POST"])
def discover_auth_endpoint():
domain = request.form.get("indie_auth_url")
r = requests.get(domain)
soup = BeautifulSoup(r.text, "html.parser")
authorization_endpoint = soup.find("link", rel="authorization_endpoint")
if authorization_endpoint is None:
flash("An IndieAuth authorization ndpoint could not be found on your website.")
return redirect("/login")
if not authorization_endpoint.get("href").startswith("https://") and not authorization_endpoint.get("href").startswith("http://"):
flash("Your IndieAuth authorization endpoint published on your site must be a full HTTP URL.")
return redirect("/login")
token_endpoint = soup.find("link", rel="token_endpoint")
if token_endpoint is None:
flash("An IndieAuth token endpoint could not be found on your website.")
return redirect("/login")
if not token_endpoint.get("href").startswith("https://") and not token_endpoint.get("href").startswith("http://"):
flash("Your IndieAuth token endpoint published on your site must be a full HTTP URL.")
return redirect("/login")
auth_endpoint = authorization_endpoint["href"]
random_code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
session["code_verifier"] = random_code
session["authorization_endpoint"] = auth_endpoint
session["token_endpoint"] = token_endpoint["href"]
sha256_code = hashlib.sha256(random_code.encode('utf-8')).hexdigest()
code_challenge = base64.b64encode(sha256_code.encode('utf-8')).decode('utf-8')
state = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
session["state"] = state
return redirect(
auth_endpoint +
"?client_id=" + CLIENT_ID +
"&redirect_uri=" + CALLBACK_URL +
"&scope=profile&response_type=code&code_challenge=" + code_challenge +
"&code_challenge_method=S256&state=" + state
)
@auth.route("/login", methods=["GET", "POST"])
def login():
return render_template("user/auth.html", title="James' Wiki Dashboard Login") | nilq/baby-python | python |
import datetime
import os
# from heavy import special_commit
def modify():
file = open('zero.md', 'r')
flag = int(file.readline()) == 0
file.close()
file = open('zero.md', 'w+')
if flag:
file.write('1')
else:
file.write('0')
file.close()
def commit():
os.system('git commit -a -m test_github_streak > /dev/null 2>&1')
def set_sys_time(year, month, day):
os.system('date -s %04d%02d%02d' % (year, month, day))
def trick_commit(year, month, day):
set_sys_time(year, month, day)
modify()
commit()
def daily_commit(start_date, end_date):
for i in range((end_date - start_date).days + 1):
cur_date = start_date + datetime.timedelta(days=i)
trick_commit(cur_date.year, cur_date.month, cur_date.day)
if __name__ == '__main__':
daily_commit(datetime.date(2020, 9, 20), datetime.date(2020, 11, 9)) | nilq/baby-python | python |
"""
Additional Activation functions not yet present in tensorflow
Creation Date: April 2020
Creator: GranScudetto
"""
import tensorflow as tf
def mish_activation(x):
"""
Mish activation function
as described in:
"Mish: A Self Regularized Non-Monotonic Neural Activation Function"
https://arxiv.org/abs/1908.08681
formula: mish(x) = x * tanh(ln(1 + exp(x)))
= x * tanh(softplus(x))
"""
return (x * tf.math.tanh(tf.math.softplus(x)))
def swish_activation(x):
"""
Swish activation function (currently only in tf-nightly)
as described in:
"Searching for Activation Functions"
https://arxiv.org/abs/1710.05941
formula: swish(x) = x* sigmoid(x)
"""
return(x * tf.math.sigmoid(x))
tf.keras.utils.get_custom_objects().update(
{'custom_activation': (tf.keras.layers.Activation(mish_activation),
tf.keras.layers.Activation(swish_activation))
}
)
| nilq/baby-python | python |
# coding: utf-8
from __future__ import absolute_import
import unittest
from unittest import mock
from swagger_server.test import BaseTestCase
from swagger_server.wml_util import get_wml_credentials
from swagger_server.test_mocked.util import mock_wml_env, MOCKED_CREDENTIALS_VARS
class TestWMLUtil(BaseTestCase, unittest.TestCase):
"""WML util integration test stubs"""
@mock_wml_env()
@mock.patch("swagger_server.wml_util.requests.request")
def test_get_wml_credentials(self, mock_request):
"""Test case for get_wml_credentials
Get WML credentials
"""
mock_request.return_value.json.return_value = {
"access_token": "token",
"refresh_token": "refresh_token",
"token_type": "Bearer",
"expires_in": 3600,
"expiration": 1598543068,
"scope": "ibm openid"
}
expected = ("{'token': 'token', 'space_id': '" + MOCKED_CREDENTIALS_VARS['WML_SPACE_ID'] + "', 'url': '" + MOCKED_CREDENTIALS_VARS['WML_URL'] + "'}")
response = get_wml_credentials()
assert isinstance(response, object)
assert str(response) == expected, 'response is not matching expected response'
mock_request.assert_called_once_with("POST", 'https://iam.cloud.ibm.com/identity/token', data='grant_type=urn%3Aibm%3Aparams%3Aoauth%3Agrant-type%3Aapikey&apikey=apikey', headers=mock.ANY) | nilq/baby-python | python |
"""
Code to represent a dataset release.
"""
from enum import Enum
import json
import copy
from dataclasses import dataclass
from typing import Dict, List, Tuple
####################
# Utility functions and enums.
def load_jsonl(fname):
return [json.loads(line) for line in open(fname)]
class Label(Enum):
SUPPORTS = 1
NEI = 0
REFUTES = -1
def make_label(label_str, allow_NEI=True):
lookup = {"SUPPORT": Label.SUPPORTS,
"NOT_ENOUGH_INFO": Label.NEI,
"CONTRADICT": Label.REFUTES}
res = lookup[label_str]
if (not allow_NEI) and (res is Label.NEI):
raise ValueError("An NEI was given.")
return res
####################
# Representations for the corpus and abstracts.
@dataclass(repr=False, frozen=True)
class Document:
id: str
title: str
sentences: Tuple[str]
def __repr__(self):
return self.title.upper() + "\n" + "\n".join(["- " + entry for entry in self.sentences])
def __lt__(self, other):
return self.title.__lt__(other.title)
def dump(self):
res = {"doc_id": self.id,
"title": self.title,
"abstract": self.sentences,
"structured": self.is_structured()}
return json.dumps(res)
@dataclass(repr=False, frozen=True)
class Corpus:
"""
A Corpus is just a collection of `Document` objects, with methods to look up
a single document.
"""
documents: List[Document]
def __repr__(self):
return f"Corpus of {len(self.documents)} documents."
def __getitem__(self, i):
"Get document by index in list."
return self.documents[i]
def get_document(self, doc_id):
"Get document by ID."
res = [x for x in self.documents if x.id == doc_id]
assert len(res) == 1
return res[0]
@classmethod
def from_jsonl(cls, corpus_file):
corpus = load_jsonl(corpus_file)
documents = []
for entry in corpus:
doc = Document(entry["doc_id"], entry["title"], entry["abstract"])
documents.append(doc)
return cls(documents)
####################
# Gold dataset.
class GoldDataset:
"""
Class to represent a gold dataset, include corpus and claims.
"""
def __init__(self, corpus_file, data_file):
self.corpus = Corpus.from_jsonl(corpus_file)
self.claims = self._read_claims(data_file)
def __repr__(self):
msg = f"{self.corpus.__repr__()} {len(self.claims)} claims."
return msg
def __getitem__(self, i):
return self.claims[i]
def _read_claims(self, data_file):
"Read claims from file."
examples = load_jsonl(data_file)
res = []
for this_example in examples:
entry = copy.deepcopy(this_example)
entry["release"] = self
entry["cited_docs"] = [self.corpus.get_document(doc)
for doc in entry["cited_doc_ids"]]
assert len(entry["cited_docs"]) == len(entry["cited_doc_ids"])
del entry["cited_doc_ids"]
res.append(Claim(**entry))
res = sorted(res, key=lambda x: x.id)
return res
def get_claim(self, example_id):
"Get a single claim by ID."
keep = [x for x in self.claims if x.id == example_id]
assert len(keep) == 1
return keep[0]
@dataclass
class EvidenceAbstract:
"A single evidence abstract."
id: int
label: Label
rationales: List[List[int]]
@dataclass(repr=False)
class Claim:
"""
Class representing a single claim, with a pointer back to the dataset.
"""
id: int
claim: str
evidence: Dict[int, EvidenceAbstract]
cited_docs: List[Document]
release: GoldDataset
def __post_init__(self):
self.evidence = self._format_evidence(self.evidence)
@staticmethod
def _format_evidence(evidence_dict):
# This function is needed because the data schema is designed so that
# each rationale can have its own support label. But, in the dataset,
# all rationales for a given claim / abstract pair all have the same
# label. So, we store the label at the "abstract level" rather than the
# "rationale level".
res = {}
for doc_id, rationales in evidence_dict.items():
doc_id = int(doc_id)
labels = [x["label"] for x in rationales]
if len(set(labels)) > 1:
msg = ("In this SciFact release, each claim / abstract pair "
"should only have one label.")
raise Exception(msg)
label = make_label(labels[0])
rationale_sents = [x["sentences"] for x in rationales]
this_abstract = EvidenceAbstract(doc_id, label, rationale_sents)
res[doc_id] = this_abstract
return res
def __repr__(self):
msg = f"Example {self.id}: {self.claim}"
return msg
def pretty_print(self, evidence_doc_id=None, file=None):
"Pretty-print the claim, together with all evidence."
msg = self.__repr__()
print(msg, file=file)
# Print the evidence
print("\nEvidence sets:", file=file)
for doc_id, evidence in self.evidence.items():
# If asked for a specific evidence doc, only show that one.
if evidence_doc_id is not None and doc_id != evidence_doc_id:
continue
print("\n" + 20 * "#" + "\n", file=file)
ev_doc = self.release.corpus.get_document(doc_id)
print(f"{doc_id}: {evidence.label.name}", file=file)
for i, sents in enumerate(evidence.rationales):
print(f"Set {i}:", file=file)
kept = [sent for i, sent in enumerate(ev_doc.sentences) if i in sents]
for entry in kept:
print(f"\t- {entry}", file=file)
####################
# Predicted dataset.
class PredictedDataset:
"""
Class to handle predictions, with a pointer back to the gold data.
"""
def __init__(self, gold, prediction_file):
"""
Takes a GoldDataset, as well as files with rationale and label
predictions.
"""
self.gold = gold
self.predictions = self._read_predictions(prediction_file)
def __getitem__(self, i):
return self.predictions[i]
def __repr__(self):
msg = f"Predictions for {len(self.predictions)} claims."
return msg
def _read_predictions(self, prediction_file):
res = []
predictions = load_jsonl(prediction_file)
for pred in predictions:
prediction = self._parse_prediction(pred)
res.append(prediction)
return res
def _parse_prediction(self, pred_dict):
claim_id = pred_dict["id"]
predicted_evidence = pred_dict["evidence"]
res = {}
# Predictions should never be NEI; there should only be predictions for
# the abstracts that contain evidence.
for key, this_prediction in predicted_evidence.items():
label = this_prediction["label"]
evidence = this_prediction["sentences"]
pred = PredictedAbstract(int(key),
make_label(label, allow_NEI=False),
evidence)
res[int(key)] = pred
gold_claim = self.gold.get_claim(claim_id)
return ClaimPredictions(claim_id, res, gold_claim)
@dataclass
class PredictedAbstract:
# For predictions, we have a single list of rationale sentences instead of a
# list of separate rationales (see paper for details).
abstract_id: int
label: Label
rationale: List
@dataclass
class ClaimPredictions:
claim_id: int
predictions: Dict[int, PredictedAbstract]
gold: Claim = None # For backward compatibility, default this to None.
def __repr__(self):
msg = f"Predictions for {self.claim_id}: {self.gold.claim}"
return msg
def pretty_print(self, evidence_doc_id=None, file=None):
msg = self.__repr__()
print(msg, file=file)
# Print the evidence
print("\nEvidence sets:", file=file)
for doc_id, prediction in self.predictions.items():
# If asked for a specific evidence doc, only show that one.
if evidence_doc_id is not None and doc_id != evidence_doc_id:
continue
print("\n" + 20 * "#" + "\n", file=file)
ev_doc = self.gold.release.corpus.get_document(doc_id)
print(f"{doc_id}: {prediction.label.name}", file=file)
# Print the predicted rationale.
sents = prediction.rationale
kept = [sent for i, sent in enumerate(ev_doc.sentences) if i in sents]
for entry in kept:
print(f"\t- {entry}", file=file)
| nilq/baby-python | python |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
from .forms import CustomUserChangeForm,CustomUserCreationForm
class UserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = User
fieldsets = (
('User Profile', {'fields': ('name',)}),
) + UserAdmin.fieldsets
list_display = ('username','name','is_superuser')
search_fields = ['name',]
admin.site.register(User,UserAdmin) | nilq/baby-python | python |
import ptypes, math, logging
from ptypes import *
from .primitives import *
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### primitives
## float types
class FLOAT16(pfloat.half): pass
class FLOAT(pfloat.single): pass
class DOUBLE(pfloat.double): pass
## int types
class SI8(pint.int8_t): pass
class SI16(pint.int16_t): pass
class SI24(pint.int_t): length = 3
class SI32(pint.int32_t): pass
class SI64(pint.int64_t): pass
class UI8(pint.int8_t): pass
class UI16(pint.int16_t): pass
class UI24(pint.int_t): length = 3
class UI32(pint.int32_t): pass
class UI64(pint.int64_t): pass
(SI8, UI8, SI16, UI16, SI32, UI32, UI64) = ( pint.bigendian(x) for x in (SI8,UI8,SI16,UI16,SI32,UI32,UI64) )
## fixed-point types
class SI8_8(pfloat.sfixed_t): length,fractional = 2,8
class SI16_16(pfloat.sfixed_t): length,fractional = 4,16
class UI8_8(pfloat.ufixed_t): length,fractional = 2,8
class UI16_16(pfloat.ufixed_t): length,fractional = 4,16
#### Tags
class TagHeader(ptype.definition): cache = {}
class TagBody(ptype.definition): cache = {}
### AUDIODATA
@TagHeader.define
class AudioTagHeader(pbinary.struct):
type = 8
_fields_ = [
(4,'SoundFormat'),
(2,'SoundRate'),
(1,'SoundSize'),
(1,'SoundType'),
(lambda s: 8 if s['SoundFormat'] == 10 else 0,'AACPacketType'),
]
# FIXME
@TagBody.define
class AudioTagBody(pstruct.type):
type = 8
def __Data(self):
h = self.getparent(FLVTAG)['TagHeader'].li
return AudioPacketData.lookup(h['SoundFormat'])
_fields_ = [(__Data, 'Data')]
## audio packet data
class AudioPacketData(ptype.definition): cache = {}
@AudioPacketData.define
class AACAUDIODATA(pstruct.type):
type = 10
_fields_ = [(lambda s: AudioSpecificConfig if s.getparent(FLVTAG)['TagHeader'].li['AACPacketType'] == 0 else ptype.block, 'Data')]
### VIDEODATA
@TagHeader.define
class VideoTagHeader(pstruct.type):
type = 9
class Type(pbinary.struct):
_fields_ = [(4, 'FrameType'), (4, 'CodecID')]
def summary(self):
return 'FrameType:{:d} CodecId:{:d}'.format(self['FrameType'], self['CodecID'])
def __Header(self):
t = self['Type'].li
return VideoPacketHeader.withdefault(t['CodecID'], type=t['CodecID'])
_fields_ = [
(Type, 'Type'),
(__Header, 'Header'),
]
def summary(self):
h = self['Type']
return 'Type{{{:s}}} {:s}'.format(h.summary(), self['Header'].classname(), self['Header'].summary() or repr(''))
# FIXME
@TagBody.define
class VideoTagBody(pstruct.type):
type = 9
def __Data(self):
h = self.getparent(StreamTag)['Header'].li
t = h['Type']
if t['FrameType'] == 5:
return UI8
return VideoPacketData.lookup(t['CodecId'])
_fields_ = [(__Data,'Data')]
## video packet header
class VideoPacketHeader(ptype.definition):
cache = {}
class unknown(pstruct.type): _fields_ = []
default = unknown
@VideoPacketHeader.define
class AVCVIDEOPACKETHEADER(pstruct.type):
type = 7
class AVCPacketType(pint.enum, UI8):
_values_ = [
(0, 'AVC sequence header'),
(1, 'AVC NALU'),
(2, 'AVC end-of-sequence header'),
]
_fields_ = [
(AVCPacketType, 'AVCPacketType'),
(SI24, 'CompositionTime'),
]
## video packet data
class VideoPacketData(ptype.definition): cache = {}
@VideoPacketData.define
class H263VIDEOPACKET(pbinary.struct):
"""Sorenson H.263"""
type = 2
def __Custom(self):
t = self['PictureSize']
if t == 0:
return 8
elif t == 1:
return 16
return 0
class ExtraInformation(pbinary.terminatedarray):
class _object_(pbinary.struct):
_fields_ = [
(1, 'Flag'),
(lambda s: s['Flag'] and 8 or 0, 'Data'),
]
def isTerminator(self, value):
return self['Flag'] == 0
class MACROBLOCK(pbinary.struct):
class BLOCKDATA(ptype.block):
# FIXME: Look up H.263 ieee spec
pass
_fields_ = [
(1, 'CodecMacroBlockFlag'),
# ...
(ptype.block, 'MacroBlockType'), # H.263 5.3.2
(ptype.block, 'BlockPattern'), # H.263 5.3.5
(2, 'QuantizerInformation'), # H.263 5.3.6
(2, 'MotionVectorData'), # H.263 5.3.7
(6, 'ExtraMotionVectorData'), # H.263 5.3.8
(dyn.array(BLOCKDATA, 6), 'BlockData'),
]
_fields_ = [
(17, 'PictureStartCode'),
(5, 'Version'),
(8, 'TemporalReference'),
(3, 'PictureSize'),
(__Custom, 'CustomWidth'),
(__Custom, 'CustomHeight'),
(2, 'PictureType'),
(1, 'DeblockingFlag'),
(5, 'Quantizer'),
(ExtraInformation, 'ExtraInformation'),
(MACROBLOCK, 'Macroblock'),
]
@VideoPacketData.define
class SCREENVIDEOPACKET(pstruct.type):
"""Screen video"""
type = 3
class IMAGEBLOCK(pstruct.type):
_fields_ = [
(pint.bigendian(UI16), 'DataSize'), # UB[16], but whatever
(lambda s: dyn.block(s['DataSize'].li.int()), 'Data'),
]
def __ImageBlocks(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCK, math.trunc(count))
class Dim(pbinary.struct):
_fields_ = [(4,'Block'),(12,'Image')]
_fields_ = [
(Dim, 'Width'),
(Dim, 'Height'),
(__ImageBlocks, 'ImageBlocks'),
]
@VideoPacketData.define
class VP6FLVVIDEOPACKET(ptype.block):
"""On2 VP6"""
type = 4
class Adjustment(pbinary.struct):
_fields_ = [(4, 'Horizontal'),(4,'Vertical')]
_fields_ = [
(Adjustment, 'Adjustment'),
(lambda s: dyn.block(s.getparent(StreamTag).DataSize() - s['Adjustment'].li.size()), 'Data'),
]
@VideoPacketData.define
class VP6FLVALPHAVIDEOPACKET(pstruct.type):
"""On2 VP6 with alpha channel"""
type = 5
def __AlphaData(self):
return ptype.undefined
def __Data(self):
streamtag = self.getparent(StreamTag)
sz = streamtag.DataSize()
ofs = self['OffsetToAlpha'].li.int()
if ofs + self['Adjustment'].li.size() >= sz:
logging.warning('OffsetToAlpha incorrect : %x', self.getoffset())
return dyn.block(sz - self['Adjustment'].size() - self['OffsetToAlpha'].size())
return dyn.block(ofs)
_fields_ = [
(VP6FLVVIDEOPACKET.Adjustment, 'Adjustment'),
(UI24, 'OffsetToAlpha'),
# (lambda s: dyn.block(s['OffsetToAlpha'].li.int()), 'Data'),
(__Data, 'Data'),
(lambda s: dyn.block(s.getparent(StreamTag).DataSize() - (s['Adjustment'].li.size()+s['OffsetToAlpha'].li.size()+s['Data'].li.size())), 'AlphaData'),
]
@VideoPacketData.define
class SCREENV2VIDEOPACKET(pstruct.type):
"""Screen video version 2"""
type = 6
class Flags(pbinary.struct):
_fields_ = [
(6, 'Reserved'),
(1, 'HasIFrameImage'),
(1, 'HasPaletteInfo'),
]
class IMAGEBLOCKV2(pstruct.type):
class IMAGEFORMAT(pbinary.struct):
_fields_ = [
(3, 'Reserved'),
(2, 'ColorDepth'),
(1, 'HasDiffBlocks'),
(1, 'ZlibPrimeCompressCurrent'),
(1, 'ZlibPrimeCompressPrevious'),
]
class IMAGEDIFFPOSITION(pstruct.type):
_fields_ = [(UI8,n) for n in ('RowStart','Height')]
class IMAGEPRIMEPOSITION(pbinary.struct):
_fields_ = [(UI8,n) for n in ('Block column','Block row')]
def __ImageBlockHeader(self):
# FIXME: since this field depends on 2 separate flags...which one should get prio?
fmt = self['Format'].li
if fmt['HasDiffBlocks']:
return self.IMAGEDIFFPOSITION
elif fmt['ZlibPrimeCompressCurrent']:
return self.IMAGEPRIMEPOSITION
return ptype.undefined
_fields_ = [
(pint.bigendian(UI16), 'DataSize'), # UB[16], but whatever
(IMAGEFORMAT, 'Format'),
(__ImageBlockHeader, 'ImageBlockHeader'),
(lambda s: dyn.block(s['DataSize'].li.int()), 'Data'),
]
def __ImageBlocks(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCKV2, math.trunc(count))
def __IFrameImage(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCKV2, math.trunc(count))
_fields_ = [
(SCREENVIDEOPACKET.Dim, 'Width'),
(SCREENVIDEOPACKET.Dim, 'Height'),
(Flags, 'Flags'),
(lambda s: s['Flags'].li['HasPaletteInfo'] and SCREENVIDEOPACKET.IMAGEBLOCK or ptype.block, 'PaletteInfo'),
(__ImageBlocks, 'ImageBlocks'),
(__IFrameImage, 'IFrameImage'),
]
@VideoPacketData.define
class AVCVIDEOPACKET(pstruct.type):
"""AVC"""
type = 7
def __Data(self):
h = self.getparent(StreamTag)['Header']
t = h['AVCPacketType'].int()
if t == 0:
# FIXME: ISO 14496-15, 5.2.4.1
return AVCDecoderConfigurationRecord
elif t == 1:
# FIXME: avcC
return NALU
return ptype.block
_fields_ = [
(__Data, 'Data')
]
### SCRIPTDATA
class SCRIPTDATAVALUE(pstruct.type):
def __ScriptDataValue(self):
t = self['Type'].li.int()
return SCRIPTDATATYPE.withdefault(t, type=t)
_fields_ = [
(UI8,'Type'),
(__ScriptDataValue, 'Value'),
]
def summary(self):
return '{:s}({:d})/{:s}'.format(self['Value'].classname(), self['Type'].int(), self['Value'].summary())
repr = summary
class SCRIPTDATATYPE(ptype.definition): cache = {}
class SCRIPTDATASTRING(pstruct.type):
_fields_ = [(UI16,'StringLength'),(lambda s:dyn.clone(STRING,length=s['StringLength'].li.int()),'StringData')]
def summary(self):
return self['StringData'].summary()
repr = summary
class SCRIPTDATAOBJECTPROPERTY(pstruct.type):
_fields_ = [(SCRIPTDATASTRING,'Name'),(SCRIPTDATAVALUE,'Value')]
def summary(self):
return '{!r}={!r}'.format(self['Name'].str(), self['Value'].str())
repr = summary
# FIXME
@TagBody.define
class ScriptTagBody(pstruct.type):
type = 18
_fields_ = [(SCRIPTDATAVALUE,'Name'),(SCRIPTDATAVALUE,'Value')]
def summary(self):
return 'Name:{:s} Value:{:s}'.format(self['Name'].summary(), self['Value'].summary())
repr = summary
@SCRIPTDATATYPE.define
class DOUBLE(DOUBLE):
type = 0
@SCRIPTDATATYPE.define
class UI8(UI8):
type = 1
@SCRIPTDATATYPE.define
class SCRIPTDATASTRING(SCRIPTDATASTRING):
type = 2
@SCRIPTDATATYPE.define
class SCRIPTDATAOBJECT(parray.terminated):
type = 3
_object_ = SCRIPTDATAOBJECTPROPERTY
def isTerminator(self, value):
return type(value['Value'].li['Value']) == SCRIPTDATAOBJECTEND
#return value['PropertyName'].li['StringLength'] == 0 and value['PropertyValue'].li['Type'].int() == SCRIPTDATAOBJECTEND.type
def summary(self):
return repr([ x.summary() for x in self ])
repr = summary
@SCRIPTDATATYPE.define
class UI16(UI16):
type = 7
@SCRIPTDATATYPE.define
class SCRIPTDATAECMAARRAY(pstruct.type):
type = 8
_fields_ = [
(UI32,'EcmaArrayLength'),
(SCRIPTDATAOBJECT, 'Variables'),
]
@SCRIPTDATATYPE.define
class SCRIPTDATAOBJECTEND(ptype.type):
type = 9
@SCRIPTDATATYPE.define
class SCRIPTDATASTRICTARRAY(pstruct.type):
type = 10
_fields_ = [(UI32,'StrictArrayLength'),(lambda s:dyn.clone(SCRIPTDATAVALUE,length=s['StrictArrayLength'].li.int()),'StrictArrayValue')]
def summary(self):
return '{!r}'.format([x.summary() for x in self['StrictArrayValue']])
repr = summary
@SCRIPTDATATYPE.define
class SCRIPTDATADATE(pstruct.type):
type = 11
_fields_ = [(DOUBLE,'DateTime'),(SI16,'LocalDateTimeOffset')]
def summary(self):
return 'DataTime:{:s} LocalDateTimeOffset:{:d}'.format(self['DateTime'].summary(), self['LocalDateTimeOffset'].int())
repr = summary
@SCRIPTDATATYPE.define
class SCRIPTDATALONGSTRING(pstruct.type):
type = 12
_fields_ = [
(UI32, 'StringLength'),
(lambda s: dyn.clone(STRING,length=s['StringLength'].li.int()), 'StringData'),
]
def summary(self):
return self['StringData'].str()
repr = summary
### Structures
class StreamTag(pstruct.type):
def __Header(self):
base = self.getparent(FLVTAG)
t = base['Type'].li['TagType']
return TagHeader.withdefault(t, type=t)
def __FilterParams(self):
base = self.getparent(FLVTAG)
return FilterParams if base['Type'].li['Filter'] == 1 else ptype.undefined
def __Body(self):
base = self.getparent(FLVTAG)
t = base['Type'].li['TagType']
return TagBody.withdefault(t, type=t, length=self.DataSize())
def DataSize(self):
base = self.getparent(FLVTAG)
sz = base['DataSize'].li.int()
ex = self['Header'].li.size() + self['FilterParams'].li.size()
return sz - ex
_fields_ = [
(__Header, 'Header'),
(__FilterParams, 'FilterParams'),
(__Body, 'Body'),
]
class EncryptionTagHeader(pstruct.type):
_fields_ = [
(UI8, 'NumFilters'),
(STRING, 'FilterName'),
(UI24, 'Length'),
]
class EncryptionFilterParams(pstruct.type):
_fields_ = [(dyn.array(UI8,16), 'IV')]
class SelectiveEncryptionFilterParams(pbinary.struct):
_fields_ = [(1,'EncryptedAU'),(7,'Reserved'),(lambda s: dyn.clone(pbinary.array,length=16,_object_=8),'IV')]
class FilterParams(pstruct.type):
def __FilterParams(self):
header = self.getparent(EncryptionTagHeader)
filtername = header['FilterName'].li.str()
if filtername == 'Encryption':
return EncryptionFilterParams
if filtername == 'SE':
return SelectiveEncryptionFilterParams
return ptype.undefined
_fields_ = [
(__FilterParams, 'FilterParams'),
]
class FLVTAG(pstruct.type):
class Type(pbinary.struct):
_fields_ = [(2,'Reserved'),(1,'Filter'),(5,'TagType')]
def summary(self):
return 'TagType:{:d} {:s}Reserved:{:d}'.format(self['TagType'], 'Filtered ' if self['Filter'] else '', self['Reserved'])
def __Extra(self):
sz = self['DataSize'].li.int()
ts = self['Stream'].li.size()
return dyn.block(sz-ts)
_fields_ = [
(Type, 'Type'),
(UI24, 'DataSize'),
(UI24, 'Timestamp'),
(UI8, 'TimestampExtended'),
(UI24, 'StreamID'),
(StreamTag, 'Stream'),
(__Extra, 'Extra'),
]
### file types
class File(pstruct.type):
class Header(pstruct.type):
class TypeFlags(pbinary.struct):
_fields_ = [(5,'Reserved(0)'),(1,'Audio'),(1,'Reserved(1)'),(1,'Video')]
def summary(self):
res = []
if self['Audio']: res.append('Audio')
if self['Video']: res.append('Video')
if self['Reserved(1)'] or self['Reserved(0)']: res.append('Reserved?')
return '/'.join(res)
def __Padding(self):
sz = self['DataOffset'].li.int()
return dyn.block(sz - 9)
_fields_ = [
(dyn.array(UI8,3), 'Signature'),
(UI8, 'Version'),
(TypeFlags, 'TypeFlags'),
(UI32, 'DataOffset'),
(__Padding, 'Padding'),
]
def __Padding(self):
h = self['Header'].li
sz = h['DataOffset'].int()
return dyn.block(sz - h.size())
class Body(parray.block):
class _object_(pstruct.type):
_fields_ = [
(UI32, 'PreviousTagSize'),
(FLVTAG, 'Tag'),
]
def __Body(self):
ex = self['Header'].li['DataOffset'].int()
return dyn.clone(self.Body, blocksize=lambda s:self.source.size() - ex)
_fields_ = [
(Header, 'Header'),
(__Body, 'Body'),
]
if __name__ == '__main__':
import ptypes,swf.flv as flv
ptypes.setsource(ptypes.prov.file('c:/users/user/Documents/blah.flv',mode='rb'))
a = flv.File()
a = a.l
print(a['Header']['TypeFlags'])
print(a['Header'])
print(a['Header']['Padding'].hexdump())
print(a['Body'][0]['Tag'])
print(a['Body'][0]['Tag']['TagData'])
| nilq/baby-python | python |
# This should work on python 3.6+
import ahip
URL = "http://httpbin.org/uuid"
async def main(backend=None):
with ahip.PoolManager(backend=backend) as http:
print("URL:", URL)
r = await http.request("GET", URL, preload_content=False)
print("Status:", r.status)
print("Data:", await r.read())
print("--- Trio ---")
import trio
trio.run(main)
print("\n--- asyncio (via AnyIO) ---")
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
print("\n--- Curio (via AnyIO) ---")
import curio
curio.run(main)
| nilq/baby-python | python |
#!/usr/bin/env python
from netmiko import ConnectHandler
iosv_l2_SW5 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.100',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW1 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.101',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW2 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.102',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW3 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.103',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW4 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.104',
'username': 'admin',
'password': 'cisco',
}
all_devices = [iosv_l2_SW3, iosv_l2_SW4, iosv_l2_SW2, iosv_l2_SW1, iosv_l2_SW5]
for device in all_devices:
net_connect = ConnectHandler(**device)
output = net_connect.send_command('wr mem')
print(output)
| nilq/baby-python | python |
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
from django.utils.functional import SimpleLazyObject
from mediawiki_auth import mediawiki
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = mediawiki.get_or_create_django_user(request)
return request._cached_user
class AuthenticationMiddleware(MiddlewareMixin):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE%s setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
) % ("_CLASSES" if settings.MIDDLEWARE is None else "")
request.user = SimpleLazyObject(lambda: get_user(request))
| nilq/baby-python | python |
"""
Module to run something
"""
def hello_world(message='Hello World'):
"""
Print demo message to stdout
"""
print(message)
| nilq/baby-python | python |
"""
This example shows how EasyNMT can be used for sentence translation
"""
import datetime
from easynmt import EasyNMT
sentences = [
# '薄雾',
# 'Voici un exemple d\'utilisation d\'EasyNMT.', # 'This is an example how to use EasyNMT.',
'南瓜人?',
# 'Cada frase es luego traducida al idioma de destino seleccionado.',
# 'Each sentences is then translated to your chosen target language.',
# 'On our website, you can find various translation models.',
# 'New York City (NYC), often called simply New York, is the most populous city in the United States.',
# 'PyTorch is an open source machine learning library based on the Torch library, used for applications such as computer vision and natural language processing, primarily developed by Facebook\'s AI Research lab (FAIR).',
# 'A deep neural network (DNN) is an artificial neural network (ANN) with multiple layers between the input and output layers.'
]
target_lang = 'en' # We want to translate the sentences to German (de)
source_lang = 'zh' # We want to translate the sentences to German (de)
# model = EasyNMT('opus-mt')
model = EasyNMT('m2m_100_418M')
# model = EasyNMT('m2m_100_1.2B')
print(datetime.datetime.now())
translations = model.translate(sentences, target_lang=target_lang, source_lang=source_lang, batch_size=8, beam_size=3)
print(translations)
print(datetime.datetime.now())
| nilq/baby-python | python |
from . import argument_magics as _args
from . import data_magics as _data
from .list_magic import L as _LType
from .seq_magic import N as _NType
# Argument magics
X_i = _args.X_i()
F = _args.F()
# Sequence type
N = _NType()
# Data magics
L = _LType()
D = _data.D()
S = _data.S()
B = _data.B()
T = _data.T()
| nilq/baby-python | python |
"""
実績作業時間に関するutil関数を定義しています。
"""
from __future__ import annotations
import datetime
from collections import defaultdict
from typing import Any, Dict, Optional, Tuple
from annoworkapi.utils import datetime_to_str, str_to_datetime
_ActualWorkingHoursDict = Dict[Tuple[datetime.date, str, str], float]
"""実績作業時間の日ごとの情報を格納する辞書
key: (date, workspace_member_id, job_id), value: 実績作業時間
"""
def get_term_start_end_from_date_for_actual_working_time(
start_date: Optional[str], end_date: Optional[str], tzinfo: Optional[datetime.tzinfo] = None
) -> tuple[Optional[str], Optional[str]]:
"""開始日と終了日から、実績作業時間を取得するAPIに渡すクエリパラメタterm_startとterm_endを返します。
Args:
start_date: 開始日
end_date: 終了日
tzinfo: 指定した日付のタイムゾーン。Noneの場合は、システムのタイムゾーンとみなします。
Notes:
WebAPIの絞り込み条件が正しくない恐れがあります。
Returns:
実績作業時間を取得するAPIに渡すterm_startとterm_end
"""
if tzinfo is None:
# システムのタイムゾーンを利用する
tzinfo = datetime.datetime.now().astimezone().tzinfo
term_start: Optional[str] = None
if start_date is not None:
dt_local_start_date = datetime.datetime.fromisoformat(start_date).replace(tzinfo=tzinfo)
term_start = datetime_to_str(dt_local_start_date)
term_end: Optional[str] = None
if end_date is not None:
dt_local_end_date = datetime.datetime.fromisoformat(end_date).replace(tzinfo=tzinfo)
# end_date="2021-01-02"なら term_endは "2021-01-01T23:59:59.999"になるようにする
# WARNING: WebAPIの都合。将来的に変わる恐れがある
tmp = dt_local_end_date + datetime.timedelta(days=1) - datetime.timedelta(microseconds=1000)
term_end = datetime_to_str(tmp)
return term_start, term_end
def _create_actual_working_hours_dict(actual: dict[str, Any], tzinfo: datetime.tzinfo) -> _ActualWorkingHoursDict:
results_dict: _ActualWorkingHoursDict = {}
dt_local_start_datetime = str_to_datetime(actual["start_datetime"]).astimezone(tzinfo)
dt_local_end_datetime = str_to_datetime(actual["end_datetime"]).astimezone(tzinfo)
workspace_member_id = actual["workspace_member_id"]
job_id = actual["job_id"]
if dt_local_start_datetime.date() == dt_local_end_datetime.date():
actual_working_hours = (dt_local_end_datetime - dt_local_start_datetime).total_seconds() / 3600
results_dict[(dt_local_start_datetime.date(), workspace_member_id, job_id)] = actual_working_hours
else:
dt_tmp_local_start_datetime = dt_local_start_datetime
# 実績作業時間が24時間を超えることはないが、24時間を超えても計算できるような処理にする
while dt_tmp_local_start_datetime.date() < dt_local_end_datetime.date():
dt_next_date = dt_tmp_local_start_datetime.date() + datetime.timedelta(days=1)
dt_tmp_local_end_datetime = datetime.datetime(
year=dt_next_date.year, month=dt_next_date.month, day=dt_next_date.day, tzinfo=tzinfo
)
actual_working_hours = (dt_tmp_local_end_datetime - dt_tmp_local_start_datetime).total_seconds() / 3600
results_dict[(dt_tmp_local_start_datetime.date(), workspace_member_id, job_id)] = actual_working_hours
dt_tmp_local_start_datetime = dt_tmp_local_end_datetime
actual_working_hours = (dt_local_end_datetime - dt_tmp_local_start_datetime).total_seconds() / 3600
results_dict[(dt_local_end_datetime.date(), workspace_member_id, job_id)] = actual_working_hours
return results_dict
def create_actual_working_times_daily(
actual_working_times: list[dict[str, Any]], tzinfo: Optional[datetime.tzinfo] = None
) -> list[dict[str, Any]]:
"""`getActualWorkingTimes` APIなどで取得した実績時間のlistから、日付、ジョブ、メンバ単位で集計した実績時間を生成します。
Args:
actual_working_times: `getActualWorkingTimes` APIなどで取得した実績時間のlist
tzinfo: 日付を決めるためのタイムゾーン。未指定の場合はシステムのタイムゾーンを参照します。
Returns:
日付、ジョブ、メンバ単位で集計した実績時間のlistを返します。listの要素はdictで以下のキーを持ちます。
* date
* job_id
* workspace_member_id
* actual_working_hours
"""
results_dict: _ActualWorkingHoursDict = defaultdict(float)
tmp_tzinfo = tzinfo if tzinfo is not None else datetime.datetime.now().astimezone().tzinfo
assert tmp_tzinfo is not None
for actual in actual_working_times:
tmp_results = _create_actual_working_hours_dict(actual, tzinfo=tmp_tzinfo)
for key, value in tmp_results.items():
results_dict[key] += value
results_list: list[dict[str, Any]] = []
for (date, workspace_member_id, job_id), actual_working_hours in results_dict.items():
# 実績作業時間が0の情報は不要なので、結果情報に格納しない
if actual_working_hours > 0:
results_list.append(
dict(
date=str(date),
workspace_member_id=workspace_member_id,
job_id=job_id,
actual_working_hours=actual_working_hours,
)
)
return results_list
| nilq/baby-python | python |
from __future__ import unicode_literals
from django_markdown.models import MarkdownField
from django.db import models
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from taggit.models import TaggedItemBase
from os.path import join as isfile
from django.conf import settings
import os
#this will store the extra profile details of the user
class UserProfileModel(models.Model):
user = models.OneToOneField(User)
user_description = MarkdownField()
skills = TaggableManager()
user_type_select = models.CharField(max_length = 50,default = 'None')
programme = models.CharField(max_length = 15)
branch = models.CharField(max_length = 100)
college_year = models.CharField(max_length = 10)
graduation_year = models.CharField(max_length = 4)
user_profile_pic = models.FileField(upload_to = 'profile_pics/',blank = True,default = 'profile_pics/avatars/default.png')
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.user.username
class CodehubTopicModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic_heading = models.CharField(max_length = 100)
topic_detail = MarkdownField()
topic_link = models.CharField(max_length = 100,blank = True)
tags = TaggableManager()
topic_type = models.CharField(max_length = 10)
file = models.FileField(upload_to = 'uploads/',blank = True)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic_heading
def delete(self,*args,**kwargs):
print 'in the delete function of codehub model'
if self.file:
file_path = os.path.join(settings.MEDIA_ROOT,self.file.name)
print file_path
if os.path.isfile(file_path):
os.remove(file_path)
super(CodehubTopicModel,self).delete(*args,**kwargs)
class CodehubTopicCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic = models.ForeignKey('CodehubTopicModel')
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic.topic_heading
class CodehubCreateEventModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event_heading = models.CharField(max_length = 100)
event_date = models.DateTimeField(null = True,blank = True)
event_venue = models.CharField(max_length = 100)
event_description = MarkdownField()
event_for = models.CharField(max_length = 25)#basic or advanced
tags = TaggableManager()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.event_heading
class CodehubEventQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(CodehubCreateEventModel)
question_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class MusicModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
music_name = models.CharField(max_length = 100)
music_file = models.FileField(upload_to = 'music/')
music_lang = models.CharField(max_length = 20)
music_artist = models.CharField(max_length = 30)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.music_name
class CodehubQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question_heading = models.CharField(max_length = 200)
question_description = MarkdownField()
question_link = models.CharField(max_length = 100,blank = True)
question_tags = TaggableManager()
question_type = models.CharField(max_length = 20)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.question_heading
class CodehubQuestionCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question = models.ForeignKey(CodehubQuestionModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.question.question_heading
class BlogPostModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
title = models.CharField(max_length = 200)
body = MarkdownField()
tags = TaggableManager()
image_file = models.FileField(upload_to = 'blog_images/',blank = True)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
views_count = models.CharField(max_length = 15, default = 0)
def __str__(self):
return self.title
def delete(self,*args,**kwargs):
print 'In the delete function of the BlogPostModel'
if self.image_file:
file_path = os.path.join(settings.MEDIA_ROOT,self.image_file.name)
if os.path.isfile(file_path):
os.remove(file_path)
super(BlogPostModel,self).delete(*args,**kwargs)
class BlogPostCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
blog_post = models.ForeignKey(BlogPostModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.comment_text
class CodehubInnovationPostModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
title = models.CharField(max_length = 200)
description = MarkdownField()
tags = TaggableManager()
vote = models.CharField(max_length = 100,default = 0)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.title
class CodehubInnovationCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
innovation_post = models.ForeignKey(CodehubInnovationPostModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.comment_text
class DevhubQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question_heading = models.CharField(max_length = 200)
question_description = MarkdownField()
question_link = models.CharField(max_length = 100,blank = True)
question_tags = TaggableManager()
question_type = models.CharField(max_length = 20)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.question_heading
class DevhubQuestionAnswerModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question = models.ForeignKey(DevhubQuestionModel)
answer_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class DevhubTopicModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic_heading = models.CharField(max_length = 100)
topic_detail = MarkdownField()
topic_link = models.CharField(max_length = 100,blank = True)
tags = TaggableManager()
file = models.FileField(upload_to = 'devhub/',blank = True)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic_heading
def delete(self,*args,**kwargs):
print 'in the delete function of devhub model'
if self.file:
file_path = os.path.join(settings.MEDIA_ROOT,self.file.name)
print file_path
if os.path.isfile(file_path):
os.remove(file_path)
super(DevhubTopicModel,self).delete(*args,**kwargs)
class DevhubTopicCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic = models.ForeignKey(DevhubTopicModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic.topic_heading
class DevhubProjectModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
project_heading = models.CharField(max_length = 200)
project_description = MarkdownField()
project_link = models.CharField(max_length = 100,blank = True)
tags = TaggableManager()
class FollowUserModel(models.Model):
# following_user = models.CharField(max_length = 10) #user who is following
following_user = models.ForeignKey(User,related_name = 'following_user')
followed_user = models.ForeignKey(User,related_name = 'followed_user') #user being followed
following_user_profile = models.ForeignKey(UserProfileModel,related_name = 'following_user_profile')
followed_user_profile = models.ForeignKey(UserProfileModel,related_name = 'followed_user_profile')
class ProposeEventModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event_heading = models.CharField(max_length = 200)
event_description = MarkdownField()
tags = TaggableManager()
event_type = models.CharField(max_length = 30)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class ProposeEventVoteModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(ProposeEventModel)
vote = models.CharField(max_length = 10)
class ProposeEventSuggestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(ProposeEventModel)
sugg_text = models.CharField(max_length = 500)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
#host_project section starts here
class HostProjectModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
project_name = models.CharField(max_length = 200)
project_description = MarkdownField()
skills = TaggableManager()
project_status = models.CharField(max_length = 15,default = 'active')
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class PingHostProjectModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
hosted_project = models.ForeignKey(HostProjectModel)
ping_status = models.CharField(max_length = 20,default = 'waiting')
created = models.DateTimeField(auto_now_add = True)
class HostProjectQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
project = models.ForeignKey(HostProjectModel)
question_text = models.CharField(max_length = 500)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class MesssageModel(models.Model):
sender = models.ForeignKey(User,related_name = 'sender')
receiver = models.ForeignKey(User,related_name = 'receiver')
sender_profile = models.ForeignKey(UserProfileModel,related_name = 'sender_profile')
receiver_profile = models.ForeignKey(UserProfileModel,related_name = 'receiver_profile')
message_text = models.CharField(max_length = 500)
message_status = models.CharField(max_length = 5,default = 'False')
created = models.DateTimeField(auto_now_add = True)
#the info section comes here
class TaggedInfoAddQuery(TaggedItemBase):
content_object = models.ForeignKey('TheInfoAddQueryModel')
class TheInfoAddQueryModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
queryText = models.CharField(max_length = 200)
queryTags = TaggableManager(through = TaggedInfoAddQuery)
created = models.DateTimeField(auto_now_add = True)
class TheInfoQueryAnswerModel(models.Model):
info_query = models.ForeignKey(TheInfoAddQueryModel)
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
answer_text = models.CharField(max_length = 200)
class TheInfoQueryAnswerVoteModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
answer = models.ForeignKey(TheInfoQueryAnswerModel)
class GeneralQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
ques_text = MarkdownField()
ques_tags = TaggableManager()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class GeneralQuestionAnswerModel(models.Model):
question = models.ForeignKey(GeneralQuestionModel)
answer_text = MarkdownField()
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class CreateUserGroupModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
group_name = models.CharField(max_length = 50)
group_description = MarkdownField()
group_tags = TaggableManager()
group_status = models.CharField(max_length = 15,default = 'active') #other option is deactive
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
#create track of the user request to join a particular group
class GroupUsersInterestTrackModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
group = models.ForeignKey(CreateUserGroupModel)
request_status = models.CharField(max_length = 15)
created = models.DateTimeField(auto_now_add = True)
class GroupUserCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
group = models.ForeignKey(CreateUserGroupModel)
comment_text = models.CharField(max_length = 150,blank = False)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class DevhubCreateEventModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event_heading = models.CharField(max_length = 100)
event_date = models.DateTimeField(null = True,blank = True)
event_venue = models.CharField(max_length = 100)
event_description = MarkdownField()
event_for = models.CharField(max_length = 25)#basic or advanced
tags = TaggableManager()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.event_heading
class DevhubEventQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(CodehubCreateEventModel)
question_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
"""
models for storing the user diff accounts
"""
class UserSocialAccountModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
social_profile_name = models.CharField(max_length = 50)
social_profile_link = models.CharField(max_length = 100)
| nilq/baby-python | python |
import xsimlab as xs
from ..processes.boundary import BorderBoundary
from ..processes.channel import (StreamPowerChannel,
DifferentialStreamPowerChannelTD)
from ..processes.context import FastscapelibContext
from ..processes.flow import DrainageArea, SingleFlowRouter, MultipleFlowRouter
from ..processes.erosion import TotalErosion
from ..processes.grid import RasterGrid2D
from ..processes.hillslope import LinearDiffusion, DifferentialLinearDiffusion
from ..processes.initial import (BareRockSurface,
Escarpment,
FlatSurface,
NoErosionHistory)
from ..processes.main import (Bedrock,
StratigraphicHorizons,
SurfaceTopography,
SurfaceToErode,
TerrainDerivatives,
TotalVerticalMotion,
UniformSedimentLayer)
from ..processes.marine import MarineSedimentTransport, Sea
from ..processes.tectonics import (BlockUplift,
SurfaceAfterTectonics,
TectonicForcing,
TwoBlocksUplift)
# ``bootstrap_model`` has the minimal set of processes required to
# simulate on a 2D uniform grid the evolution of topographic surface
# under the action of tectonic and erosion processes. None of such
# processes are included. It only provides the "skeleton" of a
# landscape evolution model and might be used as a basis to create
# custom models.
bootstrap_model = xs.Model({
'grid': RasterGrid2D,
'fs_context': FastscapelibContext,
'boundary': BorderBoundary,
'tectonics': TectonicForcing,
'surf2erode': SurfaceToErode,
'erosion': TotalErosion,
'vmotion': TotalVerticalMotion,
'topography': SurfaceTopography,
})
# ``basic_model`` is a "standard" landscape evolution model that
# includes block uplift, (bedrock) channel erosion using the stream
# power law and hillslope erosion/deposition using linear
# diffusion. Initial topography is a flat surface with random
# perturbations. Flow is routed on the topographic surface using a D8,
# single flow direction algorithm. All erosion processes are computed
# on a topographic surface that is first updated by tectonic forcing
# processes.
basic_model = bootstrap_model.update_processes({
'uplift': BlockUplift,
'surf2erode': SurfaceAfterTectonics,
'flow': SingleFlowRouter,
'drainage': DrainageArea,
'spl': StreamPowerChannel,
'diffusion': LinearDiffusion,
'terrain': TerrainDerivatives,
'init_topography': FlatSurface,
'init_erosion': NoErosionHistory
})
# ``sediment_model`` is built on top of ``basic_model`` ; it tracks
# the evolution of both the topographic surface and the bedrock,
# separated by a uniform, active layer of sediment. This model uses an
# extended version of the stream-power law that also includes channel
# transport and deposition. Flow is routed using a multiple flow
# direction algorithm. Differential erosion/deposition is enabled for
# both hillslope and channel processes, i.e., distinct values may be
# set for the erosion and transport coefficients (bedrock vs
# soil/sediment).
sediment_model = basic_model.update_processes({
'bedrock': Bedrock,
'active_layer': UniformSedimentLayer,
'init_bedrock': BareRockSurface,
'flow': MultipleFlowRouter,
'spl': DifferentialStreamPowerChannelTD,
'diffusion': DifferentialLinearDiffusion
})
# ``marine_model`` simulates the erosion, transport and deposition of
# bedrock or sediment in both continental and submarine
# environments. It is built on top of ``sediment_model`` to which it
# adds a process for sediment transport, deposition and compaction in
# the submarine domain (under sea level). The processes for the
# initial topography and uplift both allow easy set-up of the two land
# vs. marine environments. An additional process keeps track of a
# fixed number of stratigraphic horizons over time.
marine_model = sediment_model.update_processes({
'init_topography': Escarpment,
'uplift': TwoBlocksUplift,
'sea': Sea,
'marine': MarineSedimentTransport,
'strati': StratigraphicHorizons
})
| nilq/baby-python | python |
"""
Effects classes
added to show because they track themselves over time
have one or more targets that they can apply the effect to in unison
change some attribute over time - generally using envelopes
"""
import random
from birdfish.envelope import (Envelope, EnvelopeSegment,
ColorEnvelope)
from birdfish.lights import BaseLightElement, LightElement
from birdfish import tween
# TODO There should probably be a base element - then BaseData or BaseLight
# element
class BaseEffect(BaseLightElement):
def __init__(self, *args, **kwargs):
super(BaseEffect, self).__init__(*args, **kwargs)
self.targets = kwargs.get('targets', [])
# TODO shoud triggered default be T or F?
triggered = kwargs.get('triggered', True)
if triggered:
self.trigger_state = 0
else:
self.trigger_state = 1
self.envelope_filters = []
def filter_targets(self, targets):
"""
subclasses can override to provide some behavior that limits
the effect only to some targets, or targets in some state
"""
# TODO may need to rething to make it easier to add filters
# and or reuse this adsr stuff
if targets and self.envelope_filters:
filtered_targets = []
for target in targets:
if hasattr(target, 'adsr_envelope'):
label = target.adsr_envelope.get_current_segment().label
if label in self.envelope_filters:
filtered_targets.append(target)
return filtered_targets
else:
return targets
def get_targets(self, targets):
if not targets:
targets = self.targets
elif isinstance(targets, LightElement):
targets = [targets]
# set self.targets for use by _off_trigger or other
# methods outside the update call
self.targets = self.filter_targets(targets)
return self.targets
def trigger(self, intensity, **kwargs):
if intensity:
self.trigger_state = 1
self._on_trigger(intensity, **kwargs)
else:
self.trigger_state = 0
self._off_trigger(intensity, **kwargs)
def _off_trigger(self, intensity, **kwargs):
# Since effects can act on lights during release - after off-trigger
# they may be responsible for turning element intensity off
super(BaseEffect, self)._off_trigger()
for element in self.targets:
element.set_intensity(0)
def update(self, show, targets=None):
raise NotImplementedError
class EnvelopeMap(BaseEffect, Envelope):
def __init__(self, attr, *args, **kwargs):
BaseEffect.__init__(self, *args, **kwargs)
Envelope.__init__(self, *args, **kwargs)
self.attr = attr
def _off_trigger(self, intensity, **kwargs):
self.reset()
def update(self, show, targets=None):
if self.trigger_state:
targets = self.get_targets(targets)
if self.last_update != show.timecode:
val = Envelope.update(self, show.time_delta)
self.last_update = show.timecode
for target in targets:
setattr(target, self.attr, val)
class ColorShift(BaseEffect, ColorEnvelope):
# TODO notes:
# how does it handle the existing color of an element
# can I handle explicit start color, or take current color and shift both
# can we reset the color to the original?
#
def __init__(self, shift_amount=0, target=0, **kwargs):
super(ColorShift, self).__init__(**kwargs)
ColorEnvelope.__init__(self, **kwargs)
self.hue = 0
self.saturation = 0
self.intensity = 1
def _on_trigger(self, intensity, **kwargs):
self.reset()
def update(self, show, targets=None):
if self.trigger_state:
targets = self.get_targets(targets)
# TODO need to make this anti duplicate calling logic
# more effects generic - maybe effects specific stuff goes
# in a render method
if self.last_update != show.timecode:
self.hue, self.saturation, self.intensity = self._color_update(
show.time_delta)
self.last_update = show.timecode
for target in targets:
if self.hue is not None:
target.hue = self.hue
if self.saturation is not None:
target.saturation = self.saturation
if self.intensity is not None:
target.set_intensity(self.intensity)
class Twinkle(BaseEffect):
def __init__(self, frequency=2, **kwargs):
super(Twinkle, self).__init__(**kwargs)
self.on_min = .01
self.on_max = 1
self.off_min = .8
self.off_max = 1.3
self.intensity_min = .3
self.intensity_max = 1
self.blinkon = True
self.cycle_elapsed = 0
self.last_changed = None
# self.mode = 'darken'
self.mode = 'modes-disabled'
self.use_trigger = kwargs.get('use_trigger', True)
# the parameters of current cycle
self.on_dur = self.off_dur = self.intensity = 0
self.durations = {True: self.on_dur, False: self.off_dur}
def setup_cycle(self):
self.on_dur = self.on_min + random.random() * (self.on_max
- self.on_min)
self.off_dur = self.off_min + random.random() * (self.off_max
- self.off_min)
self.intensity = self.intensity_min + random.random() * (
self.intensity_max - self.intensity_min)
self.durations = {True: self.on_dur, False: self.off_dur}
def update(self, show, targets=None):
# note, currently can not easily assign a twinkle to an elements
# effects array - must add it to the show directly as it uses the
# trigger method this is true of any effect that uses trigger method of
# elements for rendering the effect - basically an effect can not be
# piggy-backed on an elements trigger, if it is to use trigger to
# cause/manage the effect perhaps an effect should always manipulate
# the lower level attributes instead of using a trigger
# self.trigger_state = 1
if self.trigger_state:
targets = self.get_targets(targets)
self.cycle_elapsed += show.time_delta
if self.cycle_elapsed > self.durations[self.blinkon]:
# current cycle complete
if self.blinkon:
# trigger off targets
if self.use_trigger:
[t.trigger(0) for t in targets]
else:
[t.set_intensity(0) for t in targets]
self.setup_cycle()
else:
for t in targets:
if self.mode == 'darken':
value = min(t.intensity, self.intensity)
elif self.mode == 'lighten':
value = max(t.intensity, self.intensity)
else:
# modes currently disabled
value = self.intensity
if self.use_trigger:
t.trigger(value)
else:
t.set_intensity(value)
self.blinkon = not self.blinkon
self.cycle_elapsed = 0
def _off_trigger(self):
# only works for explicit effect targets
if self.use_trigger:
[t.trigger(0) for t in self.targets]
else:
[t.set_intensity(0) for t in targets]
self.trigger_state = 1
class Blink(BaseEffect):
def __init__(self, frequency=2, **kwargs):
super(Blink, self).__init__(**kwargs)
self._frequency = frequency
self.blinkon = True
self.last_changed = None
self._set_frequency(self._frequency)
def update(self, show, targets=None):
targets = self.get_targets(targets)
if not self.last_changed:
self.last_changed = show.timecode
return
if show.timecode - self.last_changed > self.period_duration:
self.blinkon = not self.blinkon
self.last_changed = show.timecode
if not self.blinkon:
# we only modify intensity when off
for target in targets:
target.set_intensity(0)
def _get_frequency(self):
return self._frequency
def _set_frequency(self, frequency):
self._frequency = frequency
self.period_duration = 1.0 / (2 * self._frequency)
frequency = property(_get_frequency, _set_frequency)
class Pulser(BaseEffect):
# TODO need to implement trigger here - otherwise effects will run
# "in the background" all the time,and may not be synced to
# elements as desired.
#
def __init__(self, frequency=1, on_shape=tween.LINEAR,
off_shape=tween.LINEAR, **kwargs):
super(Pulser, self).__init__(**kwargs)
period_duration = 1.0 / (2 * frequency)
on_flash = EnvelopeSegment(start=0, change=1, tween=on_shape,
duration=period_duration)
off_flash = EnvelopeSegment(start=1, change=-1, tween=off_shape,
duration=period_duration)
self.envelope = Envelope(loop=-1)
self.envelope.segments = [on_flash, off_flash]
def update(self, show, targets=None):
if self.trigger_state:
targets = self.get_targets(targets)
val = self.envelope.update(show.time_delta)
for target in targets:
target.set_intensity(val * target.intensity)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
#
# python-json-patch - An implementation of the JSON Patch format
# https://github.com/stefankoegl/python-json-patch
#
# Copyright (c) 2011 Stefan Kögl <stefan@skoegl.net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" Apply JSON-Patches (RFC 6902) """
from __future__ import unicode_literals
import collections
import copy
import functools
import json
import sys
from jsonpointer import JsonPointer, JsonPointerException
_ST_ADD = 0
_ST_REMOVE = 1
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
str = unicode
# Will be parsed by setup.py to determine package metadata
__author__ = 'Stefan Kögl <stefan@skoegl.net>'
__version__ = '1.24'
__website__ = 'https://github.com/stefankoegl/python-json-patch'
__license__ = 'Modified BSD License'
# pylint: disable=E0611,W0404
if sys.version_info >= (3, 0):
basestring = (bytes, str) # pylint: disable=C0103,W0622
class JsonPatchException(Exception):
"""Base Json Patch exception"""
class InvalidJsonPatch(JsonPatchException):
""" Raised if an invalid JSON Patch is created """
class JsonPatchConflict(JsonPatchException):
"""Raised if patch could not be applied due to conflict situation such as:
- attempt to add object key when it already exists;
- attempt to operate with nonexistence object key;
- attempt to insert value to array at position beyond its size;
- etc.
"""
class JsonPatchTestFailed(JsonPatchException, AssertionError):
""" A Test operation failed """
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
mdict = collections.defaultdict(list)
for key, value in ordered_pairs:
mdict[key].append(value)
return dict(
# unpack lists that have only 1 item
(key, values[0] if len(values) == 1 else values)
for key, values in mdict.items()
)
# The "object_pairs_hook" parameter is used to handle duplicate keys when
# loading a JSON object.
_jsonloads = functools.partial(json.loads, object_pairs_hook=multidict)
def apply_patch(doc, patch, in_place=False):
"""Apply list of patches to specified json document.
:param doc: Document object.
:type doc: dict
:param patch: JSON patch as list of dicts or raw JSON-encoded string.
:type patch: list or str
:param in_place: While :const:`True` patch will modify target document.
By default patch will be applied to document copy.
:type in_place: bool
:return: Patched document object.
:rtype: dict
>>> doc = {'foo': 'bar'}
>>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}]
>>> other = apply_patch(doc, patch)
>>> doc is not other
True
>>> other == {'foo': 'bar', 'baz': 'qux'}
True
>>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}]
>>> apply_patch(doc, patch, in_place=True) == {'foo': 'bar', 'baz': 'qux'}
True
>>> doc == other
True
"""
if isinstance(patch, basestring):
patch = JsonPatch.from_string(patch)
else:
patch = JsonPatch(patch)
return patch.apply(doc, in_place)
def make_patch(src, dst):
"""Generates patch by comparing two document objects. Actually is
a proxy to :meth:`JsonPatch.from_diff` method.
:param src: Data source document object.
:type src: dict
:param dst: Data source document object.
:type dst: dict
>>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = make_patch(src, dst)
>>> new = patch.apply(src)
>>> new == dst
True
"""
return JsonPatch.from_diff(src, dst)
class JsonPatch(object):
"""A JSON Patch is a list of Patch Operations.
>>> patch = JsonPatch([
... {'op': 'add', 'path': '/foo', 'value': 'bar'},
... {'op': 'add', 'path': '/baz', 'value': [1, 2, 3]},
... {'op': 'remove', 'path': '/baz/1'},
... {'op': 'test', 'path': '/baz', 'value': [1, 3]},
... {'op': 'replace', 'path': '/baz/0', 'value': 42},
... {'op': 'remove', 'path': '/baz/1'},
... ])
>>> doc = {}
>>> result = patch.apply(doc)
>>> expected = {'foo': 'bar', 'baz': [42]}
>>> result == expected
True
JsonPatch object is iterable, so you can easily access each patch
statement in a loop:
>>> lpatch = list(patch)
>>> expected = {'op': 'add', 'path': '/foo', 'value': 'bar'}
>>> lpatch[0] == expected
True
>>> lpatch == patch.patch
True
Also JsonPatch could be converted directly to :class:`bool` if it contains
any operation statements:
>>> bool(patch)
True
>>> bool(JsonPatch([]))
False
This behavior is very handy with :func:`make_patch` to write more readable
code:
>>> old = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> new = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = make_patch(old, new)
>>> if patch:
... # document have changed, do something useful
... patch.apply(old) #doctest: +ELLIPSIS
{...}
"""
def __init__(self, patch):
self.patch = patch
self.operations = {
'remove': RemoveOperation,
'add': AddOperation,
'replace': ReplaceOperation,
'move': MoveOperation,
'test': TestOperation,
'copy': CopyOperation,
}
def __str__(self):
"""str(self) -> self.to_string()"""
return self.to_string()
def __bool__(self):
return bool(self.patch)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.patch)
def __hash__(self):
return hash(tuple(self._ops))
def __eq__(self, other):
if not isinstance(other, JsonPatch):
return False
return self._ops == other._ops
def __ne__(self, other):
return not(self == other)
@classmethod
def from_string(cls, patch_str):
"""Creates JsonPatch instance from string source.
:param patch_str: JSON patch as raw string.
:type patch_str: str
:return: :class:`JsonPatch` instance.
"""
patch = _jsonloads(patch_str)
return cls(patch)
@classmethod
def from_diff(cls, src, dst, optimization=True):
"""Creates JsonPatch instance based on comparison of two document
objects. Json patch would be created for `src` argument against `dst`
one.
:param src: Data source document object.
:type src: dict
:param dst: Data source document object.
:type dst: dict
:return: :class:`JsonPatch` instance.
>>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = JsonPatch.from_diff(src, dst)
>>> new = patch.apply(src)
>>> new == dst
True
"""
builder = DiffBuilder()
builder._compare_values('', None, src, dst)
ops = list(builder.execute())
return cls(ops)
def to_string(self):
"""Returns patch set as JSON string."""
return json.dumps(self.patch)
@property
def _ops(self):
return tuple(map(self._get_operation, self.patch))
def apply(self, obj, in_place=False):
"""Applies the patch to a given object.
:param obj: Document object.
:type obj: dict
:param in_place: Tweaks the way how patch would be applied - directly to
specified `obj` or to its copy.
:type in_place: bool
:return: Modified `obj`.
"""
if not in_place:
obj = copy.deepcopy(obj)
for operation in self._ops:
obj = operation.apply(obj)
return obj
def _get_operation(self, operation):
if 'op' not in operation:
raise InvalidJsonPatch("Operation does not contain 'op' member")
op = operation['op']
if not isinstance(op, basestring):
raise InvalidJsonPatch("Operation must be a string")
if op not in self.operations:
raise InvalidJsonPatch("Unknown operation {0!r}".format(op))
cls = self.operations[op]
return cls(operation)
class PatchOperation(object):
"""A single operation inside a JSON Patch."""
def __init__(self, operation):
if not operation.__contains__('path'):
raise InvalidJsonPatch("Operation must have a 'path' member")
if isinstance(operation['path'], JsonPointer):
self.location = operation['path'].path
self.pointer = operation['path']
else:
self.location = operation['path']
try:
self.pointer = JsonPointer(self.location)
except TypeError as ex:
raise InvalidJsonPatch("Invalid 'path'")
self.operation = operation
def apply(self, obj):
"""Abstract method that applies a patch operation to the specified object."""
raise NotImplementedError('should implement the patch operation.')
def __hash__(self):
return hash(frozenset(self.operation.items()))
def __eq__(self, other):
if not isinstance(other, PatchOperation):
return False
return self.operation == other.operation
def __ne__(self, other):
return not(self == other)
@property
def path(self):
return '/'.join(self.pointer.parts[:-1])
@property
def key(self):
try:
return int(self.pointer.parts[-1])
except ValueError:
return self.pointer.parts[-1]
@key.setter
def key(self, value):
self.pointer.parts[-1] = str(value)
self.location = self.pointer.path
self.operation['path'] = self.location
class RemoveOperation(PatchOperation):
"""Removes an object property or an array element."""
def apply(self, obj):
subobj, part = self.pointer.to_last(obj)
try:
del subobj[part]
except (KeyError, IndexError) as ex:
msg = "can't remove a non-existent object '{0}'".format(part)
raise JsonPatchConflict(msg)
return obj
def _on_undo_remove(self, path, key):
if self.path == path:
if self.key >= key:
self.key += 1
else:
key -= 1
return key
def _on_undo_add(self, path, key):
if self.path == path:
if self.key > key:
self.key -= 1
else:
key -= 1
return key
class AddOperation(PatchOperation):
"""Adds an object property or an array element."""
def apply(self, obj):
try:
value = self.operation["value"]
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
subobj, part = self.pointer.to_last(obj)
if isinstance(subobj, MutableSequence):
if part == '-':
subobj.append(value) # pylint: disable=E1103
elif part > len(subobj) or part < 0:
raise JsonPatchConflict("can't insert outside of list")
else:
subobj.insert(part, value) # pylint: disable=E1103
elif isinstance(subobj, MutableMapping):
if part is None:
obj = value # we're replacing the root
else:
subobj[part] = value
else:
if part is None:
raise TypeError("invalid document type {0}".format(type(subobj)))
else:
raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part))
return obj
def _on_undo_remove(self, path, key):
if self.path == path:
if self.key > key:
self.key += 1
else:
key += 1
return key
def _on_undo_add(self, path, key):
if self.path == path:
if self.key > key:
self.key -= 1
else:
key += 1
return key
class ReplaceOperation(PatchOperation):
"""Replaces an object property or an array element by a new value."""
def apply(self, obj):
try:
value = self.operation["value"]
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
subobj, part = self.pointer.to_last(obj)
if part is None:
return value
if part == "-":
raise InvalidJsonPatch("'path' with '-' can't be applied to 'replace' operation")
if isinstance(subobj, MutableSequence):
if part >= len(subobj) or part < 0:
raise JsonPatchConflict("can't replace outside of list")
elif isinstance(subobj, MutableMapping):
if part not in subobj:
msg = "can't replace a non-existent object '{0}'".format(part)
raise JsonPatchConflict(msg)
else:
if part is None:
raise TypeError("invalid document type {0}".format(type(subobj)))
else:
raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part))
subobj[part] = value
return obj
def _on_undo_remove(self, path, key):
return key
def _on_undo_add(self, path, key):
return key
class MoveOperation(PatchOperation):
"""Moves an object property or an array element to a new location."""
def apply(self, obj):
try:
if isinstance(self.operation['from'], JsonPointer):
from_ptr = self.operation['from']
else:
from_ptr = JsonPointer(self.operation['from'])
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'from' member")
subobj, part = from_ptr.to_last(obj)
try:
value = subobj[part]
except (KeyError, IndexError) as ex:
raise JsonPatchConflict(str(ex))
# If source and target are equal, this is a no-op
if self.pointer == from_ptr:
return obj
if isinstance(subobj, MutableMapping) and \
self.pointer.contains(from_ptr):
raise JsonPatchConflict('Cannot move values into their own children')
obj = RemoveOperation({
'op': 'remove',
'path': self.operation['from']
}).apply(obj)
obj = AddOperation({
'op': 'add',
'path': self.location,
'value': value
}).apply(obj)
return obj
@property
def from_path(self):
from_ptr = JsonPointer(self.operation['from'])
return '/'.join(from_ptr.parts[:-1])
@property
def from_key(self):
from_ptr = JsonPointer(self.operation['from'])
try:
return int(from_ptr.parts[-1])
except TypeError:
return from_ptr.parts[-1]
@from_key.setter
def from_key(self, value):
from_ptr = JsonPointer(self.operation['from'])
from_ptr.parts[-1] = str(value)
self.operation['from'] = from_ptr.path
def _on_undo_remove(self, path, key):
if self.from_path == path:
if self.from_key >= key:
self.from_key += 1
else:
key -= 1
if self.path == path:
if self.key > key:
self.key += 1
else:
key += 1
return key
def _on_undo_add(self, path, key):
if self.from_path == path:
if self.from_key > key:
self.from_key -= 1
else:
key -= 1
if self.path == path:
if self.key > key:
self.key -= 1
else:
key += 1
return key
class TestOperation(PatchOperation):
"""Test value by specified location."""
def apply(self, obj):
try:
subobj, part = self.pointer.to_last(obj)
if part is None:
val = subobj
else:
val = self.pointer.walk(subobj, part)
except JsonPointerException as ex:
raise JsonPatchTestFailed(str(ex))
try:
value = self.operation['value']
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
if val != value:
msg = '{0} ({1}) is not equal to tested value {2} ({3})'
raise JsonPatchTestFailed(msg.format(val, type(val),
value, type(value)))
return obj
class CopyOperation(PatchOperation):
""" Copies an object property or an array element to a new location """
def apply(self, obj):
try:
from_ptr = JsonPointer(self.operation['from'])
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'from' member")
subobj, part = from_ptr.to_last(obj)
try:
value = copy.deepcopy(subobj[part])
except (KeyError, IndexError) as ex:
raise JsonPatchConflict(str(ex))
obj = AddOperation({
'op': 'add',
'path': self.location,
'value': value
}).apply(obj)
return obj
class DiffBuilder(object):
def __init__(self):
self.index_storage = [{}, {}]
self.index_storage2 = [[], []]
self.__root = root = []
root[:] = [root, root, None]
def store_index(self, value, index, st):
try:
storage = self.index_storage[st]
stored = storage.get(value)
if stored is None:
storage[value] = [index]
else:
storage[value].append(index)
except TypeError:
self.index_storage2[st].append((value, index))
def take_index(self, value, st):
try:
stored = self.index_storage[st].get(value)
if stored:
return stored.pop()
except TypeError:
storage = self.index_storage2[st]
for i in range(len(storage)-1, -1, -1):
if storage[i][0] == value:
return storage.pop(i)[1]
def insert(self, op):
root = self.__root
last = root[0]
last[1] = root[0] = [last, root, op]
return root[0]
def remove(self, index):
link_prev, link_next, _ = index
link_prev[1] = link_next
link_next[0] = link_prev
index[:] = []
def iter_from(self, start):
root = self.__root
curr = start[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __iter__(self):
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def execute(self):
root = self.__root
curr = root[1]
while curr is not root:
if curr[1] is not root:
op_first, op_second = curr[2], curr[1][2]
if op_first.location == op_second.location and \
type(op_first) == RemoveOperation and \
type(op_second) == AddOperation:
yield ReplaceOperation({
'op': 'replace',
'path': op_second.location,
'value': op_second.operation['value'],
}).operation
curr = curr[1][1]
continue
yield curr[2].operation
curr = curr[1]
def _item_added(self, path, key, item):
index = self.take_index(item, _ST_REMOVE)
if index is not None:
op = index[2]
if type(op.key) == int and type(key) == int:
for v in self.iter_from(index):
op.key = v._on_undo_remove(op.path, op.key)
self.remove(index)
if op.location != _path_join(path, key):
new_op = MoveOperation({
'op': 'move',
'from': op.location,
'path': _path_join(path, key),
})
self.insert(new_op)
else:
new_op = AddOperation({
'op': 'add',
'path': _path_join(path, key),
'value': item,
})
new_index = self.insert(new_op)
self.store_index(item, new_index, _ST_ADD)
def _item_removed(self, path, key, item):
new_op = RemoveOperation({
'op': 'remove',
'path': _path_join(path, key),
})
index = self.take_index(item, _ST_ADD)
new_index = self.insert(new_op)
if index is not None:
op = index[2]
if type(op.key) == int:
for v in self.iter_from(index):
op.key = v._on_undo_add(op.path, op.key)
self.remove(index)
if new_op.location != op.location:
new_op = MoveOperation({
'op': 'move',
'from': new_op.location,
'path': op.location,
})
new_index[2] = new_op
else:
self.remove(new_index)
else:
self.store_index(item, new_index, _ST_REMOVE)
def _item_replaced(self, path, key, item):
self.insert(ReplaceOperation({
'op': 'replace',
'path': _path_join(path, key),
'value': item,
}))
def _compare_dicts(self, path, src, dst):
src_keys = set(src.keys())
dst_keys = set(dst.keys())
added_keys = dst_keys - src_keys
removed_keys = src_keys - dst_keys
for key in removed_keys:
self._item_removed(path, str(key), src[key])
for key in added_keys:
self._item_added(path, str(key), dst[key])
for key in src_keys & dst_keys:
self._compare_values(path, key, src[key], dst[key])
def _compare_lists(self, path, src, dst):
len_src, len_dst = len(src), len(dst)
max_len = max(len_src, len_dst)
min_len = min(len_src, len_dst)
for key in range(max_len):
if key < min_len:
old, new = src[key], dst[key]
if old == new:
continue
elif isinstance(old, MutableMapping) and \
isinstance(new, MutableMapping):
self._compare_dicts(_path_join(path, key), old, new)
elif isinstance(old, MutableSequence) and \
isinstance(new, MutableSequence):
self._compare_lists(_path_join(path, key), old, new)
else:
self._item_removed(path, key, old)
self._item_added(path, key, new)
elif len_src > len_dst:
self._item_removed(path, len_dst, src[key])
else:
self._item_added(path, key, dst[key])
def _compare_values(self, path, key, src, dst):
if isinstance(src, MutableMapping) and \
isinstance(dst, MutableMapping):
self._compare_dicts(_path_join(path, key), src, dst)
elif isinstance(src, MutableSequence) and \
isinstance(dst, MutableSequence):
self._compare_lists(_path_join(path, key), src, dst)
# To ensure we catch changes to JSON, we can't rely on a simple
# src == dst, because it would not recognize the difference between
# 1 and True, among other things. Using json.dumps is the most
# fool-proof way to ensure we catch type changes that matter to JSON
# and ignore those that don't. The performance of this could be
# improved by doing more direct type checks, but we'd need to be
# careful to accept type changes that don't matter when JSONified.
elif json.dumps(src) == json.dumps(dst):
return
else:
self._item_replaced(path, key, dst)
def _path_join(path, key):
if key is None:
return path
return path + '/' + str(key).replace('~', '~0').replace('/', '~1')
| nilq/baby-python | python |
import argparse
import os
class Parameters():
def __init__(self):###
# Training settings
self.LR=0.001
self.clsLR=0.001
self.batch_size=30
self.nthreads=8
self.tensorname='IDeMNet'
self.ways=5
self.shots=5
self.test_num=15
self.augnum=5
self.data='miniImageEmbedding'
self.network='None'
self.gallery_img=30
self.stepSize=10
self.patch_size=3
self.epoch=600
self.trainways=5
self.fixScale=0
self.GNet='none'
self.train_from_scratch=True
self.fix_deform=True
self.fix_emb=True
self.chooseNum=15 | nilq/baby-python | python |
from posixpath import join
import threading
from civis.response import PaginatedResponse, convert_response_data_type
def tostr_urljoin(*x):
return join(*map(str, x))
class CivisJobFailure(Exception):
def __init__(self, err_msg, response=None):
self.error_message = err_msg
self.response = response
def __str__(self):
return self.error_message
class CivisAPIError(Exception):
def __init__(self, response):
if response.content: # the API itself gave an error response
json = response.json()
self.error_message = json["errorDescription"]
else: # this was something like a 502
self.error_message = response.reason
self.status_code = response.status_code
self._response = response
def __str__(self):
if self.status_code:
return "({}) {}".format(self.status_code, self.error_message)
else:
return self.error_message
class EmptyResultError(Exception):
pass
class CivisAPIKeyError(Exception):
pass
class Endpoint:
_base_url = "https://api.civisanalytics.com/"
_lock = threading.Lock()
def __init__(self, session, return_type='civis'):
self._session = session
self._return_type = return_type
def _build_path(self, path):
if not path:
return self._base_url
return tostr_urljoin(self._base_url, path.strip("/"))
def _make_request(self, method, path=None, params=None, data=None,
**kwargs):
url = self._build_path(path)
with self._lock:
response = self._session.request(method, url, json=data,
params=params, **kwargs)
if response.status_code in [204, 205]:
return
if response.status_code == 401:
auth_error = response.headers["www-authenticate"]
raise CivisAPIKeyError(auth_error) from CivisAPIError(response)
if not response.ok:
raise CivisAPIError(response)
return response
def _call_api(self, method, path=None, params=None, data=None, **kwargs):
iterator = kwargs.pop('iterator', False)
if iterator:
return PaginatedResponse(path, params, self)
else:
resp = self._make_request(method, path, params, data, **kwargs)
resp = convert_response_data_type(resp,
return_type=self._return_type)
return resp
| nilq/baby-python | python |
class InstantTest:
pass
| nilq/baby-python | python |
import os
import numpy as np
from PIL import Image
import cv2
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = []
x_train = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg") or file.endswith("JPG"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ","-").lower()
if label not in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
pil_image = Image.open(path).convert("L") #grayscale
size = (550,550)
final_image = pil_image.resize(size,Image.ANTIALIAS)
image_array = np.array(pil_image, "uint8")
faces = face_cascade.detectMultiScale(image_array)
for (x,y,w,h) in faces:
roi = image_array[y: y+h, x: x+h]
x_train.append(roi)
y_labels.append(id_)
#print(y_labels)
#print(x_train)
with open("labels.pickle", 'wb') as f:
pickle.dump(label_ids,f)
recognizer.train(x_train,np.array(y_labels))
recognizer.save("trainer.yml") | nilq/baby-python | python |
from rest_framework import status
from .base_test import BaseTestCase
class TestProfile(BaseTestCase):
"""Test the User profile GET responses"""
all_profiles_url = 'http://127.0.0.1:8000/api/profiles/'
my_profile_url = 'http://127.0.0.1:8000/api/profiles/jane'
def test_get_all_profiles_without_account_activation(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.all_profiles_url)
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('Your account is inactive', str(response.data))
def test_get_all_profiles_without_login2(self):
response = self.client.get(self.profile_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_update_my_profiles_without_login(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.my_profile_url)
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
def test_cannot_update_my_profiles_without_login2(self):
response = self.client.put(self.my_profile_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| nilq/baby-python | python |
import lambdser
import multiprocessing as mp
def make_proxy(para, *funcs):
# make proxy for the mp
ser_list = []
for f in funcs:
ser_list.append(lambdser.dumps(f))
return para, ser_list
def processor(*ser):
# unzip the proxy and to the work
para, funcs = ser
funcs = [lambdser.loads(ser) for ser in funcs]
res = None
for f in funcs:
res = f(para)
print(res)
return res
def do_stuff():
two = "2"
ser = make_proxy("4", lambda x: x + two)
mp.Process(target=processor, args=ser).start()
if __name__ == "__main__":
do_stuff()
| nilq/baby-python | python |
from numbers import Number
import torch
from torch.distributions import constraints, Gamma, MultivariateNormal
from torch.distributions.multivariate_normal import _batch_mv, _batch_mahalanobis
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, _standard_normal
from scipy import stats
import math
__all__ = ('GeneralizedNormal', 'DoubleGamma', 'MultivariateT')
class GeneralizedNormal(Distribution):
r"""
Creates a Generalized Normal distribution parameterized by :attr:`loc`, :attr:`scale`, and :attr:`beta`.
Example::
>>> m = GeneralizedNormal(torch.tensor([0.0]), torch.tensor([1.0]), torch.tensor(0.5))
>>> m.sample() # GeneralizedNormal distributed with loc=0, scale=1, beta=0.5
tensor([ 0.1337])
Args:
loc (float or Tensor): mean of the distribution
scale (float or Tensor): scale of the distribution
beta (float or Tensor): shape parameter of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive, 'beta': constraints.positive}
support = constraints.real
has_rsample = False
@property
def mean(self):
return self.loc
@property
def variance(self):
return self.scale.pow(2) * (torch.lgamma(3/self.beta) - torch.lgamma(1/self.beta)).exp()
@property
def stddev(self):
return self.variance()**0.5
def __init__(self, loc, scale, beta, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
(self.beta,) = broadcast_all(beta)
self.scipy_dist = stats.gennorm(loc=self.loc.cpu().detach().numpy(),
scale=self.scale.cpu().detach().numpy(),
beta=self.beta.cpu().detach().numpy())
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(GeneralizedNormal, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(GeneralizedNormal, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(GeneralizedNormal, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
sample_shape = sample_shape + self.loc.size()
return torch.tensor(self.scipy_dist.rvs(
list(sample_shape),
random_state=torch.randint(2**32, ()).item()), # Make deterministic if torch is seeded
dtype=self.loc.dtype, device=self.loc.device)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return (-torch.log(2 * self.scale) - torch.lgamma(1/self.beta) + torch.log(self.beta)
- torch.pow((torch.abs(value - self.loc) / self.scale), self.beta))
def cdf(self, value):
if isinstance(value, torch.Tensor):
value = value.numpy()
return torch.tensor(self.scipy_dist.cdf(value),
dtype=self.loc.dtype, device=self.loc.device)
def icdf(self, value):
raise NotImplementedError
def entropy(self):
return (1/self.beta) - torch.log(self.beta) + torch.log(2*self.scale) + torch.lgamma(1/self.beta)
class DoubleGamma(Gamma):
mean = 0.
@property
def variance(self):
return self.concentration * (1 + self.concentration) / self.rate.pow(2)
def rsample(self, sample_shape=torch.Size()):
x = super().rsample(sample_shape)
sign = torch.randint(0, 2, x.size(), device=x.device, dtype=x.dtype).mul_(2).sub_(1)
return x*sign
def log_prob(self, value):
return super().log_prob(value.abs()) - math.log(2)
entropy = NotImplemented
_log_normalizer = NotImplemented
class MultivariateT(MultivariateNormal):
"""
Multivariate Student-t distribution, using hierarchical Gamma sampling.
(see https://arxiv.org/abs/1402.4306)
We only allow degrees of freedom > 2 for now,
because otherwise the covariance is undefined.
Uses the parameterization from Shah et al. 2014, which makes it covariance
equal to the covariance matrix.
"""
arg_constraints = {'df': constraints.positive,
'loc': constraints.real_vector,
'covariance_matrix': constraints.positive_definite,
'precision_matrix': constraints.positive_definite,
'scale_tril': constraints.lower_cholesky}
support = constraints.real
has_rsample = True
expand = NotImplemented
def __init__(self,
event_shape: torch.Size,
df=3.,
loc=0.,
covariance_matrix=None,
precision_matrix=None,
scale_tril=None,
validate_args=None):
super().__init__(loc=loc,
covariance_matrix=covariance_matrix,
precision_matrix=precision_matrix,
scale_tril=scale_tril,
validate_args=validate_args)
# self._event_shape is inferred from the mean vector and covariance matrix.
old_event_shape = self._event_shape
if not len(event_shape) >= len(old_event_shape):
raise NotImplementedError("non-elliptical MVT not in this class")
assert len(event_shape) >= 1
assert event_shape[-len(old_event_shape):] == old_event_shape
# Cut dimensions from the end of `batch_shape` so the `total_shape` is
# the same
total_shape = list(self._batch_shape) + list(self._event_shape)
self._batch_shape = torch.Size(total_shape[:-len(event_shape)])
self._event_shape = torch.Size(event_shape)
self.df, _ = broadcast_all(df, torch.ones(self._batch_shape))
self.gamma = Gamma(concentration=self.df/2., rate=1/2)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
r_inv = self.gamma.rsample(sample_shape=sample_shape)
scale = ((self.df-2) / r_inv).sqrt()
# We want 1 gamma for every `event` only. The size of self.df and this
# `.view` provide that
scale = scale.view(scale.size() + torch.Size([1] * len(self._event_shape)))
return self.loc + scale * _batch_mv(self._unbroadcasted_scale_tril, eps)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff)
n_dim = len(self._event_shape)
p = diff.size()[-n_dim:].numel()
if n_dim > 1:
M = M.sum(tuple(range(-n_dim+1, 0)))
log_diag = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log()
if n_dim > log_diag.dim():
half_log_det = log_diag.sum() * (p / log_diag.numel())
else:
half_log_det = log_diag.sum(tuple(range(-n_dim, 0))) * (
p / log_diag.size()[-n_dim:].numel())
lambda_ = self.df - 2.
lp = torch.lgamma((p+self.df)/2.) \
- ((p/2.) * torch.log(math.pi * lambda_)) \
- torch.lgamma(self.df / 2.) \
- half_log_det \
- ((self.df+p)/2.) * torch.log(1 + M/lambda_)
return lp
| nilq/baby-python | python |
import json
from pathlib import Path
from typing import Tuple
from segmantic.seg import dataset
def dataset_mockup(root_path: Path, size: int = 3) -> Tuple[Path, Path]:
image_dir, labels_dir = root_path / "image", root_path / "label"
image_dir.mkdir()
labels_dir.mkdir()
for idx in range(size):
(image_dir / f"img-{idx}.nii.gz").touch()
(labels_dir / f"img-{idx}.nii.gz").touch()
return image_dir, labels_dir
def test_PairedDataSet(tmp_path: Path):
image_dir, labels_dir = dataset_mockup(root_path=tmp_path, size=3)
ds = dataset.PairedDataSet(
image_dir=image_dir, labels_dir=labels_dir, valid_split=0.2
)
assert len(ds.training_files()) == 2
assert len(ds.validation_files()) == 1
ds.check_matching_filenames()
ds = dataset.PairedDataSet(
image_dir=image_dir, labels_dir=labels_dir, valid_split=0
)
assert len(ds.training_files()) == 3
assert len(ds.validation_files()) == 0
ds.check_matching_filenames()
def test_load_from_json(tmp_path: Path):
image_dir, labels_dir = dataset_mockup(root_path=tmp_path, size=3)
dataset_file = tmp_path / "dataset.json"
dataset_file.write_text(
json.dumps(
{
"training": [
{
"image": f"{image_dir.name}/*.nii.gz",
"label": f"{labels_dir.name}/*.nii.gz",
}
]
}
)
)
ds = dataset.PairedDataSet.load_from_json(dataset_file, valid_split=0.2)
assert len(ds.training_files()) == 2
assert len(ds.validation_files()) == 1
ds.check_matching_filenames()
# now dump and try to re-load
dataset_file2 = tmp_path / "dataset_dump.json"
dataset_file2.write_text(ds.dump_dataset())
ds = dataset.PairedDataSet.load_from_json(dataset_file2, valid_split=0.2)
assert len(ds.training_files()) == 2
assert len(ds.validation_files()) == 1
ds.check_matching_filenames()
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Created on Mon Apr 10 17:41:24 2017
# DEPENDENCIES:
import numpy as np
import random
# FUNCTION THAT CREATES GAUSSIAN MULTIVARIATE 2D DATASETS, D = features, N = observations
def create_multivariate_Gauss_2D_dataset(mean, sigma, N_observations):
np.random.seed(444445) # Seeding for consistency and reproducibility seed>100000 prefereably,
MEAN_2D = np.array([mean,mean])
I_2D = np.matrix(np.eye(2)) # Creating m1,aka MEAN1 as an np.array
COV_MATRIX_2D = sigma*I_2D # Could use np.array as well instead of eye, np.array([[1,0,0],[0,1,0],[0,0,1]])
SAMPLE_SET = np.random.multivariate_normal(MEAN_2D,COV_MATRIX_2D , N_observations).T
#print("MEAN_2D:\n", MEAN_2D); print("\nCOV_MATRIX_2D:\n", COV_MATRIX_2D); print("\nI_2D:\n", I_2D) ; print("\nSAMPLE_SET.shape:", SAMPLE_SET.shape)
return(SAMPLE_SET)
#%%
# Calling create_multivariate_Gauss_2D_dataset function with desired parameters:
SAMPLE_SET_220 = (create_multivariate_Gauss_2D_dataset(1,0.5,220))
SAMPLE_SET_280 = (create_multivariate_Gauss_2D_dataset(-1,0.75,280))
# Merge into one unified unlabeled dataset:
DATASET = np.concatenate((SAMPLE_SET_220, SAMPLE_SET_280), axis=1)
#%%
# CODE BLOCK FOR PLOTTING UNIFIED DATASET, NO LABELS:
from matplotlib import pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#from mpl_toolkits.mplot3d import proj3d
from matplotlib import style
style.use('bmh')
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
#plt.rcParams['legend.fontsize'] = 7
ax.plot(SAMPLE_SET_220 [0,:], SAMPLE_SET_220 [1,:], '.', markersize=8, color='yellow', alpha=0.567, label='SUBSET 220')
ax.plot(SAMPLE_SET_280 [0,:], SAMPLE_SET_280 [1,:], '.', markersize=8, color='teal', alpha=0.567, label='SUBSET 280')
plt.title('DATA POINTS OF THE TWO SUBSETS')
ax.legend(loc='lower left')
plt.show()
## for the maxiters_counter, upon loop completion do: maxiters_counter -=1
#def K_MEANS(X, k, maxiters):#maxiters_counter = maxiters
# Foolproofing iteration through dataset; for i in x_vectors take sample, observation (D,) array AND NOT feature (N,) array!
#%%
# Temporarily dumped here:
def K_means(DATASET, k, maxiters):
X_vectors = [j for j in DATASET.T] #x_vector.shape = (1,2) ; type(x_vector) = matrix
# Generate a list with k random samples from the DATASET as first centroids:
random_k_centroids_list = [random.choice(X_vectors) for k in range(0,k)]
#for i in range reps:
iter_counter = 0
# Init just once and outside while
centroids_list = random_k_centroids_list
SSSE = 0 # Sum of Sum Standard Errors of k clusters
while iter_counter != maxiters: # or maxiters_counter!=0: #Converge or stop it!
# A list that denotes the label has an obeservation (D,) of the dataset e.g. [0, 0, 1, 2 , 0 ..]
# label is the cluster number, 1,2 etc
y = []
# Initalizing a dict with as many keys as the number of clusters, k
clusters_dict = {}
# Looping through k number of centroids to create k keys of the dictionary:
# each key is a cluster label
for i in range(0,len(centroids_list)):
# Initializing each dictionary key's values, setting it as an empty list
# Key values will be populated with the samples allocated to the cluster
clusters_dict[i] = []
# Looping through observations to calculate distance from centroids & allocate to centroid with minimum distance
for j in X_vectors:
distances = [np.linalg.norm(j - c) for c in centroids_list] # calculating at once distances from all centroids
label = distances.index(min(distances)) # the index of the min distance is the label of the cluster
clusters_dict[label].append(j) # append the observation of this loop, to the values of the dict key with the respective label
y.append(label) # keep a list that holds in which cluster the observations have been allocated;
SSSE+= distances[label] #distortion measure , Bishop 9.1 ?
for i in range(0,k):
print("centroid_"+str(i),": ", (centroids_list)[i].T) # temporary, just for checking the random centroids
centroids_from_mean = [] # initialize a list that will hold the new centroids, as calculated by the mean of all observations that made it in the cluster
for u in range(0,k):
try:
centroids_from_mean.append(sum(clusters_dict[u])/len(clusters_dict[u])) # mean calculation for each key-value pair
except:
centroids_from_mean.append(0*clusters_dict[u][0]) #handling zero div error, if no sample has been allocated to a cluster
print("cluster_"+str(u),": ", len(clusters_dict[u]))
print("cluster_"+str(u),"mean: ", sum(clusters_dict[u])/len(clusters_dict[u]))
#centroids_list = centroids_list
print("\n\ncentroids_from_mean:", centroids_from_mean)
print("\n\ncentroids_list:", centroids_list)
print("len(y)", len(y))
#print(centroids_from_mean)
# Check for convergence or keep them centroids dancing around:
# np.allclose found here: http://stackoverflow.com/questions/10580676/comparing-two-numpy-arrays-for-equality-element-wise
# np.allclse official docum page:
if np.allclose(np.matrix(centroids_list),np.matrix(centroids_from_mean)) == False: # if this was True it would mean that the centroids only slightly change, tolerance = 0.001, very low
centroids_list = centroids_from_mean # assign centroids_from_mean to the centroids_list, for the following iter
iter_counter += 1 # substract 1, like a stopwatch, when counter==0 , break bc enough is enough
print("iteration:" ,iter_counter)
else:
from matplotlib import style
style.use('bmh')
colors = [ "teal","coral", "yellow", "#37BC61", "pink","#CC99CC","teal", 'coral']
for cluster in clusters_dict:
color = colors[cluster]
for vector in np.asarray(clusters_dict[cluster]):
plt.scatter(vector[0], vector[1], marker="o", color=color, s=2, linewidths=4, alpha=0.876)
for centroid in range(0,len(centroids_from_mean)):
plt.scatter(centroids_from_mean[centroid][0], centroids_from_mean[centroid][1], marker="x", color="black", s=100, linewidths=4)
plt.title("Clustering (K-means) with k = "+str(k)+" and SSSE = "+str(int(SSSE)) )
plt.savefig("clustering_Kmeans_with_k_eq_"+str(k)+"_cristina_"+str(int(SSSE))+".png", dpi=300)
return(SSSE, y, centroids_from_mean, plt.show())
break
#==============================================================================
# #%%
#==============================================================================
# print("\n\ntype(SAMPLE_SET_220)", type(SAMPLE_SET_220))
# print("\n\nSAMPLE_SET_220.shape:", SAMPLE_SET_220.shape)
# print("type(clusters_dict[0])",type(clusters_dict[0]))
# print("\n\ntype(np.asarray(clusters_dict[0]))", type(np.asarray(clusters_dict[0])))
# print("\n\nnp.asarray(clusters_dict[0])", np.asarray(clusters_dict[0]).shape)
#==============================================================================
#==============================================================================
# RUN FOR REPS:
# clusterings = []
# for k in range(1,10):
# clusterings.append(K_means(DATASET,5, 100))
# #
#==============================================================================
#==============================================================================
#clustering_0 = K_means(DATASET,4, 100)
#%%
# CAUTION!! BUILT-INS KICK IN :
#%% elbow plot: Distortion - Number of Clusters
#==============================================================================
# FIND OUT HOW MANY k YOU SHOULD USE FOR THE CLUSTERING, "Elbow Method"
#==============================================================================
#==============================================================================
# from sklearn.cluster import KMeans
# import matplotlib.pyplot as plt
# distortions = [] # Distortion, the Sum of Squared errors within a cluster.
# for i in range(1, 11): # Let's test the performance of clusterings with different k, kE[1,11]
# km = KMeans(n_clusters=i,
# init='k-means++',
# n_init=10,
# max_iter=300,
# random_state=0)
# km.fit(DATASET.T) # sklearn wants the data .T if you have them Features x Observations
# distortions.append(km.inertia_)
# plt.plot(range(1,11), distortions, marker='o', color = "coral")
# plt.xlabel('Number of clusters')
# plt.ylabel('Distortion')
# plt.title("Elbow Curve Method: Choose Optimal Number of Centroids", fontsize = 10) # color = "teal")
#
# plt.show()
#==============================================================================
#==============================================================================
# #%%
# from sklearn.cluster import KMeans
# km = KMeans(n_clusters=3,
# init='k-means++',
# n_init=10,
# max_iter=300,
# tol=1e-04,
# random_state=0)
# y_km = km.fit_predict(DATASET.T)
#
#
#
# import numpy as np
# from matplotlib import cm
# from sklearn.metrics import silhouette_samples
# cluster_labels = np.unique(y_km)
# n_clusters = cluster_labels.shape[0]
# silhouette_vals = silhouette_samples(DATASET.T, y_km, metric='euclidean')
#
# y_ax_lower, y_ax_upper = 0, 0
# yticks = []
#
#
# colors = [ "teal","coral", "yellow", "#37BC61", "pink","#CC99CC","teal", 'coral']
# for i, c in enumerate(cluster_labels):
# c_silhouette_vals = silhouette_vals[y_km == c]
# c_silhouette_vals.sort()
# y_ax_upper += len(c_silhouette_vals)
# color = colors[i]
#
# plt.barh(range(y_ax_lower, y_ax_upper),
# c_silhouette_vals,
# height=1.0,
# edgecolor='none',
# color=color)
#
# yticks.append((y_ax_lower + y_ax_upper) / 2)
# y_ax_lower += len(c_silhouette_vals)
#
# silhouette_avg = np.mean(silhouette_vals)
# plt.axvline(silhouette_avg, color="red", linestyle="--")
#
# plt.yticks(yticks, cluster_labels + 1)
# plt.ylabel('Cluster')
# plt.xlabel('Silhouette coefficient')
# plt.title("Silhouette coefficient plot for k = 3")
# plt.savefig("silh_coeff_k_eq3"+".png", dpi=300)
# plt.show()
#==============================================================================
#%%
#%%
#==============================================================================
# from sklearn.cluster import KMeans
# km = KMeans(n_clusters=2,
# init='k-means++',
# n_init=10,
# max_iter=300,
# tol=1e-04,
# random_state=0)
# y_km = km.fit_predict(DATASET.T)
#
#==============================================================================
#==============================================================================
#
# import numpy as np
# from matplotlib import cm
# from sklearn.metrics import silhouette_samples
# cluster_labels = np.unique(y_km)
# n_clusters = cluster_labels.shape[0]
# silhouette_vals = silhouette_samples(DATASET.T, y_km, metric='euclidean')
#
# y_ax_lower, y_ax_upper = 0, 0
# yticks = []
#
#
# colors = [ "teal","coral", "yellow", "#37BC61", "pink","#CC99CC","teal", 'coral']
# for i, c in enumerate(cluster_labels):
# c_silhouette_vals = silhouette_vals[y_km == c]
# c_silhouette_vals.sort()
# y_ax_upper += len(c_silhouette_vals)
# color = colors[i]
#
# plt.barh(range(y_ax_lower, y_ax_upper),
# c_silhouette_vals,
# height=1.0,
# edgecolor='none',
# color=color)
#
# yticks.append((y_ax_lower + y_ax_upper) / 2)
# y_ax_lower += len(c_silhouette_vals)
#
# silhouette_avg = np.mean(silhouette_vals)
# plt.axvline(silhouette_avg, color="red", linestyle="--")
#
# plt.yticks(yticks, cluster_labels + 1)
# plt.ylabel('Cluster')
# plt.xlabel('Silhouette coefficient')
# plt.title("Silhouette coefficient plot for k = 2")
# plt.savefig("silh_coeff_k_eq2"+".png", dpi=300)
# plt.show()
#
#==============================================================================
| nilq/baby-python | python |
# Generated by Django 3.1.7 on 2021-03-17 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0027_auto_20210317_1314'),
]
operations = [
migrations.AlterField(
model_name='partitionformulla',
name='input',
field=models.IntegerField(default=1, verbose_name='Quantité en entrée'),
),
migrations.AlterField(
model_name='partitionformulla',
name='input_unit',
field=models.CharField(default='', max_length=100, verbose_name='Unité de mesure en entrée'),
),
]
| nilq/baby-python | python |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from users.models import UserProfile
# Create your models here.
class Tab(models.Model):
name = models.CharField(max_length=50, verbose_name='标签名称')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def getNodes(self):
return Node.objects.filter(tab=self)
def __unicode__(self):
return self.name
class Node(models.Model):
name = models.CharField(max_length=50, verbose_name='节点名称', unique=True)
tab = models.ForeignKey(Tab, verbose_name='所属标签', null=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
desc = models.CharField(default='', max_length=200, verbose_name='描述')
image = models.ImageField(max_length=200, upload_to='image/%Y/%m', null=True, default='image/default/node.png',
verbose_name='节点图片')
class Meta:
verbose_name = '论坛节点'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class Topic(models.Model):
title = models.CharField(max_length=100, verbose_name='标题')
content = models.TextField(verbose_name='内容')
node = models.ForeignKey(Node, verbose_name='节点', null=True)
created_by = models.ForeignKey(UserProfile, verbose_name='创建者')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
modify_time = models.DateTimeField(verbose_name='修改时间', blank=True, null=True)
click_nums = models.IntegerField(default=0, verbose_name='点击数')
# last_reply_user = models.CharField(max_length=50, verbose_name='最新回复用户名', null=True, default='')
class Meta:
verbose_name = '主题'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.title
class Reply(models.Model):
content = models.TextField(verbose_name='内容')
created_by = models.ForeignKey(UserProfile, verbose_name='创建者')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
modify_time = models.DateTimeField(verbose_name='修改时间', blank=True, null=True)
topic = models.ForeignKey(Topic, verbose_name='所属主题')
seq_num = models.IntegerField(verbose_name='序号')
class Meta:
verbose_name = '主题回复'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.topic.title[:20] + str(self.seq_num) + 'L 回复'
| nilq/baby-python | python |
import unittest
import requests
from pyalt.api.objects import AltObject
class TestAPIObjects(unittest.TestCase):
def setUp(self):
url_fmt = "https://online-shkola.com.ua/api/v2/users/1269/thematic/subject/{}"
self.responses = {
requests.get(url_fmt.format(n))
for n in (3, 4, 6)
}
def _verify(self, src, dest):
if isinstance(src, list):
for src_item, dest_item in zip(src, dest):
self._verify(src_item, dest_item)
return
if isinstance(src, dict):
for key, src_value in src.items():
dest_value = getattr(dest, key)
self._verify(src_value, dest_value)
return
self.assertEqual(src, dest)
def test__from_json(self):
for response in self.responses:
self._verify(
response.json(),
AltObject.from_json(response.content)
)
def test__from_request(self):
for response in self.responses:
self._verify(
response.json(),
AltObject.from_response(response),
)
| nilq/baby-python | python |
import logging
import re
from collections import OrderedDict
from io import StringIO
import numpy as np
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register
from .._mesh import CellBlock, Mesh
float_pattern = r"[+-]?(?:\d+\.?\d*|\d*\.?\d+)"
float_re = re.compile(float_pattern)
point_pattern = r"{0}\s+{0}\s+{0}(?:\s+{0})?".format(float_pattern)
point_re = re.compile(point_pattern)
triangle_pattern = r"\(\s*\(\s*({})\s*\)\s*\)".format(
r"\s*,\s*".join(point_pattern for _ in range(4))
)
triangle_re = re.compile(triangle_pattern)
tin_pattern = fr"TIN\s*\((?:\s*{triangle_pattern}\s*,?)*\s*\)"
tin_re = re.compile(tin_pattern)
def read_str(s):
s = s.strip()
tin_match = tin_re.match(s)
if tin_match is None:
raise ReadError("Invalid WKT TIN")
point_idxs = OrderedDict()
tri_idxs = []
for tri_match in triangle_re.finditer(tin_match.group()):
tri_point_idxs = []
for point_match in point_re.finditer(tri_match.group()):
point = []
for float_match in float_re.finditer(point_match.group()):
point.append(float(float_match.group()))
point = tuple(point)
if point not in point_idxs:
point_idxs[point] = len(point_idxs)
tri_point_idxs.append(point_idxs[point])
if tri_point_idxs[-1] != tri_point_idxs[0]:
raise ValueError("Triangle is not a closed linestring")
tri_idxs.append(tri_point_idxs[:-1])
try:
point_arr = np.array(list(point_idxs), np.float64)
except ValueError as e:
if len({len(p) for p in point_idxs}) > 1:
raise ReadError("Points have mixed dimensionality")
else:
raise e
tri_arr = np.array(tri_idxs, np.uint64)
return Mesh(point_arr, [CellBlock("triangle", tri_arr)])
def arr_to_str(arr):
return " ".join(str(item) for item in arr)
def read(filename):
with open_file(filename) as f:
return read_str(f.read())
def write(filename, mesh):
with open_file(filename, "w") as f:
write_buffer(f, mesh)
def write_buffer(f, mesh):
skip = [c for c in mesh.cells if c.type != "triangle"]
if skip:
logging.warning('WTK only supports triangle cells. Skipping {", ".join(skip)}.')
triangles = mesh.get_cells_type("triangle")
f.write("TIN (")
joiner = ""
for tri_points in mesh.points[triangles]:
f.write(
"{0}(({1}, {2}, {3}, {1}))".format(
joiner, *(arr_to_str(p) for p in tri_points)
)
)
joiner = ", "
f.write(")")
def write_str(mesh):
buf = StringIO()
write_buffer(buf, mesh)
buf.seek(0)
return buf.read()
register("wkt", [".wkt"], read, {"wkt": write})
| nilq/baby-python | python |
# -*- coding:utf-8 -*-
# author:Anson
from __future__ import unicode_literals
import os
import sys
import re
from datetime import date, datetime, timedelta
from docx import Document
import xlwt
from settings import MD_PATH, SITE_1, SITE_2, CELL
reload(sys)
sys.setdefaultencoding('utf-8')
def get_file_path(path, week_of, table1, table2, first_date, today, worksheet, site_1, site_2,
first_date_of, today_of):
style = xlwt.XFStyle()
bl = xlwt.Borders()
bl.left = xlwt.Borders.THIN
bl.right = xlwt.Borders.THIN
bl.top = xlwt.Borders.THIN
bl.bottom = xlwt.Borders.THIN
al = xlwt.Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
style.alignment = al
style.borders = bl
nums = 0
file_date = date.today().strftime('%Y-%m')
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
group_name = re.findall(r'.*2019-08-(.*)..*', filename)[0][0:-2]
fd = filename[:7]
md = file_path[-2:]
if md == 'md':
if fd == file_date:
with open(file_path) as f:
lines = f.readlines()
lines = [i.strip('-').strip() for i in lines]
if len(lines) == 0:
first_index = 0
else:
for key, value in enumerate(lines):
if value == week_of:
first_index = key
else:
first_index = 0
k = 0
line_list = []
index = 0
while k < len(lines):
if lines[k] == week_of:
index += 1
first_index = k
line_list.append(lines[k])
else:
if k > first_index:
if lines[k][:1] == '#':
break
else:
line_list.append(lines[k])
k += 1
line = [i.strip('#').strip() for i in line_list]
d = 0
trade_today = False
yearst_today = False
s1 = ''
s2 = ''
sor_index = 0
while d < len(line):
if line[d].strip()[:1] == '*':
if sor_index != 0:
worksheet.write(site_1, 2, s1, style)
worksheet.write(site_2, 2, s2, style)
s1 = ''
s2 = ''
yearst_today = False
nums += 1
site_1 += 1
site_2 += 1
name = line[d].strip('*').strip()
worksheet.write(site_1, 1, str(nums), style)
worksheet.write(site_1, 3, first_date, style)
worksheet.write(site_1, 4, today, style)
worksheet.write(site_1, 5, name, style)
worksheet.write(site_2, 1, str(nums), style)
worksheet.write(site_2, 3, first_date_of, style)
worksheet.write(site_2, 4, today_of, style)
worksheet.write(site_2, 5, name, style)
table1.rows[nums].cells[0].add_paragraph(str(nums))
table1.rows[nums].cells[2].add_paragraph(first_date)
table1.rows[nums].cells[3].add_paragraph(today)
table1.rows[nums].cells[4].add_paragraph(name)
table1.rows[nums].cells[5].add_paragraph(group_name)
table2.rows[nums].cells[0].add_paragraph(str(nums))
table2.rows[nums].cells[2].add_paragraph(first_date_of)
table2.rows[nums].cells[3].add_paragraph(today_of)
table2.rows[nums].cells[4].add_paragraph(name)
table2.rows[nums].cells[5].add_paragraph(group_name)
d += 1
sor_index += 1
if line[d] == '本周工作':
trade_today = True
d += 1
if (line[d].strip()[1:2] == '.' or line[d].strip()[1:2] == ')') and trade_today:
# 本周工作内容
table1.rows[nums].cells[1].add_paragraph(line[d])
s1 = s1 + ' ' + line[d]
if line[d] == '下周工作' or line[d] == '下周计划':
trade_today = False
yearst_today = True
d += 1
if (line[d].strip()[1:2] == '.' or line[d].strip()[1:2] == ')') and yearst_today:
# 下周工作内容
table2.rows[nums].cells[1].add_paragraph(line[d])
s2 = s2 + ' ' + line[d]
d += 1
worksheet.write(site_1, 2, s1, style)
worksheet.write(site_2, 2, s2, style)
def get_week_of_month(year, month, day):
"""
获取指定的某天是某个月中的第几周
周一作为一周的开始
"""
end = int(datetime(year, month, day).strftime("%W"))
begin = int(datetime(year, month, 1).strftime("%W"))
star_date = end - begin + 1
if star_date == 1:
week_of = '# 第一周'
elif star_date == 2:
week_of = '# 第二周'
elif star_date == 3:
week_of = '# 第三周'
elif star_date == 4:
week_of = '# 第四周'
elif star_date == 5:
week_of = '# 第五周'
else:
week_of = '# 第六周'
return week_of
def create_table_one_cell(document, content):
"""创建单行列表"""
create_table = document.add_table(rows=1, cols=1, style='Table Grid')
create_table.rows[0].cells[0].add_paragraph(content)
def create_table_more_cell(document, rows, cols, contents):
"""创建多行多列的列表"""
create_table = document.add_table(rows=rows, cols=cols, style='Table Grid')
index = 0
for content in contents:
for key, value in enumerate(content):
create_table.rows[index].cells[key].add_paragraph(value)
index += 1
def create_fixed_cell(document, first_date, end_date):
"""表前半部分固定内容"""
create_table_one_cell(document, '项目基本情况')
create_table_more_cell(document, 2, 2, [['项目名称', '厦开项目组'], ['客户名称', '中国建设银行厦门开发中心']])
create_table_more_cell(document, 3, 6, [['客户负责人', '李晓敦', '电话', '', 'Email', ''],
['(必填)', '闫立志', '电话', '', 'Email', ''],
['', '', '电话', '', 'Email', '']])
create_table_more_cell(document, 4, 2, [['开始日期', first_date], ['项目经理', '赖志勇'],
['项目组成员', '柳惠阳、许华语、郭健超、何卧岩、郑炜、黄惠章、朱俊龙、李稳定、'
'黄建鸣、陈浩1、叶晟君、张叶桃、陈晓衍、曾国荣、肖凯、刘安森、'
'林秋霞、姜渊、肖金平、周丽荣、钟晓杰、黄祯鸿、李志阳、刘程川、'
'张俊钦、邓松进、林丹丹、姜琪、钟高镇、方若琳、、谢源鑫、罗庭颖、'
'魏治邦、白艺伟、付敏、肖金龙、颜炳煜、庄华琼、董凯华、黄忠强、'
'徐鸿能、江养根、何龙伙、肖丽琴、罗万春、曾林华、、张一浓、郭吉、、'
'吴招辉、林泉、、苏雪梅、张祖琦、、陈浩'],
['项目描述', '']])
create_table_one_cell(document, '计划关键时间点(必填)')
create_table_more_cell(document, 6, 4, [['关键时间点', '预计完成时间', '关键时间点', '预计完成时间'],
['1、需求分析', '', '6、技术测试(单元测试)', ''],
['2、技术方案(项目实施方案)', '', '7、业务测试(集成测试)', ''],
['3、概要设计', '', '8、上线时间', ''],
['4、详细设计', '', '9、后期维护', ''],
['5、编码', '', '10、结项', '']])
create_table_one_cell(document, '实际关键时间点(必填)')
create_table_more_cell(document, 6, 4, [['关键时间点', '实际完成时间', '关键时间点', '实际完成时间'],
['1、需求分析', '', '6、技术测试(单元测试)', ''],
['2、技术方案(项目实施方案)', '', '7、业务测试(集成测试)', ''],
['3、概要设计', '', '8、上线时间', ''],
['4、详细设计', '', '9、后期维护', ''],
['5、编码', '', '10、结项', '']])
create_table_one_cell(document, '人力资源状况(包括人员的入职、离职;入场、离场、休假、请假等情况).'
'时间以到达、离开现场为准')
create_table_one_cell(document, '预计新增资源(必填)')
create_table_more_cell(document, 4, 6, [['姓名', '', '预计到场时间', '', '任务描述', ''],
['姓名', '', '预计到场时间', '', '任务描述', ''],
['姓名', '', '预计到场时间', '', '任务描述', ''],
['姓名', '', '预计到场时间', '', '任务描述', '']])
create_table_one_cell(document, '预计撤离资源(必填)')
create_table_more_cell(document, 3, 6, [['姓名', '', '预计离场时间', '', '撤离原因', ''],
['姓名', '', '预计离场时间', '', '撤离原因', ''],
['姓名', '', '预计离场时间', '', '撤离原因', '']])
create_table_one_cell(document, '本周人员变动情况(必填)')
create_table_more_cell(document, 5, 4, [['序号', '到场人员姓名', '到场时间', '备注'],
['1', '', '', ''], ['2', '', '', ''],
['3', '', '', ''], ['4', '', '', '']])
create_table_more_cell(document, 5, 4, [['序号', '离场人员姓名', '离场时间', '备注'],
['1', '', '', ''], ['2', '', '', ''],
['3', '', '', ''], ['4', '', '', '']])
create_table_one_cell(document, '本周项目情况')
create_table_one_cell(document, '项目所处阶段(必填)')
create_table_more_cell(document, 2, 5, [['1、需求分析', '2、概要设计', '3、详细设计', '4、编码', '5、技术测试'],
['6、业务测试', '7、试运行 ', '8、部分上线', '9、整体完工', '10、后期维护']])
create_table_one_cell(document, '项目经理自评(必填)')
create_table_more_cell(document, 5, 2, [['是否完成以下事项', '未完成的理由及说明'],
['是否组织周例会会议纪要? 【□是 □否】', ''],
['本周工作是否按计划完成?【□是 □否】', ''],
['是否跟客户项目负责人汇报本周工作?【□是 □否】', ''],
['下周计划安排是否与项目成员落实?【□是 □否】 ', '']])
create_table_one_cell(document, '需求变更情况(必填)')
create_table_more_cell(document, 3, 2, [['需求变更描述', '对后续的影响'], ['无', ''], ['', '']])
create_table_one_cell(document, '方案变更情况(必填)')
create_table_more_cell(document, 3, 2, [['方案变更描述', '对后续的影响'], ['', ''], ['', '']])
create_table_one_cell(document, '项目计划变更情况(必填)')
create_table_more_cell(document, 3, 2, [['项目计划变更描述', '对后续的影响'], ['', ''], ['', '']])
create_table_one_cell(document, '本周未完成的任务情况(必填)')
create_table_more_cell(document, 4, 3, [['未完成的任务描述', '任务未完成的原因', '对后续的影响'],
['', '', ''], ['', '', ''], ['', '', '']])
create_table_one_cell(document, '存在的问题及解决方案(必填)')
create_table_more_cell(document, 5, 4, [['问题描述及原因分析', '解决方案', '预计完成日期', '负责人'],
['', '', '', ''], ['', '', '', ''], ['', '', '', ''],
['', '', '', '']])
create_table_one_cell(document, '说明:如需求、技术方案有变化,请将信的需求文档、技术方案文档与周报一起,提交给公司归档')
create_table_one_cell(document, '项目进展和计划')
create_table_one_cell(document, '一、本周工作完成情况( {0}日至 {1}) (以下必填)'.format(first_date, end_date))
create_table_more_cell(document, 12, 4, [['编号', '本周重要里程碑事件', '完成日期', '完成标志'],
['1', '', '', ''], ['2', '', '', ''], ['', '', '', ''],
['编号', '上周计划的工作内容,但本周已完成', '完成日期', '负责人'],
['1', '', '', ''], ['2', '', '', ''], ['3', '', '', ''],
['4', '', '', ''], ['5', '', '', ''], ['6', '', '', ''],
['7', '', '', '']])
def create_fixed_cell_tow(document):
"""表后半部分固定内容"""
create_table_one_cell(document, '项目组下周预计借支情况')
create_table_more_cell(document, 5, 3, [['借支内容摘要', '金额', '备注'], ['', '', ''], ['', '', ''],
['合计', '', '']])
create_table_one_cell(document, '已提交给客户的阶段性文档和代码(必填)')
create_table_more_cell(document, 4, 4, [['资料名称', '提交时间', '接收人', '备注']])
create_table_one_cell(document, '已提交给公司的阶段性文档和代码(必填)')
create_table_more_cell(document, 4, 4, [['资料名称', '提交时间', '接收人', '备注']])
create_table_one_cell(document, '负责人对此项目本周工作的反馈意见')
create_table_more_cell(document, 3, 2, [['对项目进展评价', ''],
['对“项目情况”中,变更情况及存在问题的评述', ''],
['后续项目实施建议', '']])
def to_excel(worksheet, first_date, end_date):
style = xlwt.XFStyle()
title_str = '新一代核心系统建设项目周报\n' \
'\n' \
'(周期:{0}至{1})'.format(first_date, end_date)
bl = xlwt.Borders()
bl.left = xlwt.Borders.THIN
bl.right = xlwt.Borders.THIN
bl.top = xlwt.Borders.THIN
bl.bottom = xlwt.Borders.THIN
al = xlwt.Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
al.wrap = 1 # 自动换行
style.alignment = al
style.borders = bl
worksheet.write_merge(0, 3, 0, 9, title_str, style)
worksheet.write_merge(SITE_1, SITE_2-1, 0, 0, '一.本周计划进展情况', style)
worksheet.write(SITE_1, 1, '序号', style)
worksheet.write(SITE_1, 2, '工作事项名称', style)
worksheet.write(SITE_1, 3, '开始时间', style)
worksheet.write(SITE_1, 4, '完成时间', style)
worksheet.write(SITE_1, 5, '责任人', style)
worksheet.write(SITE_1, 6, '计划%', style)
worksheet.write(SITE_1, 7, '实际%', style)
worksheet.write(SITE_1, 8, '偏差%', style)
worksheet.write(SITE_1, 9, '进展说明', style)
worksheet.write_merge(SITE_2, SITE_2+31, 0, 0, '二.下周工作计划', style)
worksheet.write(SITE_2, 1, '序号', style)
worksheet.write(SITE_2, 2, '工作事项名称', style)
worksheet.write(SITE_2, 3, '开始时间', style)
worksheet.write(SITE_2, 4, '完成时间', style)
worksheet.write(SITE_2, 5, '责任人', style)
worksheet.write_merge(SITE_2, SITE_2, 6, 8, '计划输出结果', style)
worksheet.write(SITE_2, 9, '说明', style)
worksheet.write_merge(SITE_2+32, SITE_2+41, 0, 0, '三.目前存在的问题以及需要协调解决的事项', style)
worksheet.write(SITE_2+32, 1, '序号', style)
worksheet.write(SITE_2+32, 2, '问题名称', style)
worksheet.write_merge(SITE_2+32, SITE_2+32, 3, 4, '问题描述', style)
worksheet.write(SITE_2+32, 5, '提出日期', style)
worksheet.write(SITE_2+32, 6, '提出人团体', style)
worksheet.write(SITE_2+32, 7, '解决责任团队', style)
worksheet.write(SITE_2+32, 8, '预期解决时间', style)
worksheet.write(SITE_2+32, 9, '解决建议方案和计划', style)
worksheet.write_merge(SITE_2+42, SITE_2+47, 0, 0, '四.本周质量管理方面的工作总结', style)
worksheet.write(SITE_2+42, 1, '序号', style)
worksheet.write_merge(SITE_2+42, SITE_2+42, 2, 9, '进展说明', style)
worksheet.write_merge(SITE_2+48, SITE_2+53, 0, 0, '五.本周配置管理方面的工作总结', style)
worksheet.write(SITE_2+48, 1, '序号', style)
worksheet.write_merge(SITE_2+48, SITE_2+48, 2, 9, '进展说明', style)
def main():
site_1 = SITE_1
site_2 = SITE_2
time_now = date.today()
# time_now = date(2019, 7, 26)
today = time_now.strftime("%Y-%m-%d")
first_date = (time_now + timedelta(days=-4)).strftime("%Y-%m-%d")
end_date = (time_now + timedelta(days=2)).strftime("%Y-%m-%d")
first_date_of = (time_now + timedelta(days=3)).strftime("%Y-%m-%d")
end_date_of = (time_now + timedelta(days=7)).strftime("%Y-%m-%d")
# 生成excel表格
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('周报', cell_overwrite_ok=True)
to_excel(worksheet, first_date, end_date)
# 获取第几周
week = get_week_of_month(time_now.year, time_now.month, time_now.day)
# week = get_week_of_month(2019, 8, 2)
document = Document()
document.add_heading('项目周报({0})'.format(week.strip('#').strip()), level=1)
document.add_paragraph('填表人:廖虹媛 报告周期:{date1}到{date2} 填表日期:{date3}'.format(
date1=first_date, date2=end_date, date3=today))
# # 创建固定列表函数
# create_fixed_cell(document, first_date, end_date)
# 本周工作内容表格
table1 = document.add_table(rows=CELL, cols=6, style='Table Grid')
table1.rows[0].cells[0].add_paragraph('编号')
table1.rows[0].cells[1].add_paragraph('本周工作内容')
table1.rows[0].cells[2].add_paragraph('计划完成时间')
table1.rows[0].cells[3].add_paragraph('实际完成时间')
table1.rows[0].cells[4].add_paragraph('负责人')
table1.rows[0].cells[5].add_paragraph('项目组')
# 下周工作内容表格
create_table_one_cell(document, '项目进展和计划')
create_table_one_cell(document, '一、下周工作完成情况( {0}至 {1}) (以下必填)'.format(first_date, end_date))
table2 = document.add_table(rows=CELL, cols=6, style='Table Grid')
table2.rows[0].cells[0].add_paragraph('编号')
table2.rows[0].cells[1].add_paragraph('下周工作内容')
table2.rows[0].cells[2].add_paragraph('计划完成时间')
table2.rows[0].cells[3].add_paragraph('实际完成时间')
table2.rows[0].cells[4].add_paragraph('负责人')
table2.rows[0].cells[5].add_paragraph('项目组')
# 主要内容写入
get_file_path(MD_PATH, week, table1, table2, first_date, today, worksheet,
site_1, site_2, first_date_of, end_date_of)
# # 后半部函数
# create_fixed_cell_tow(document)
save_name = '厦开项目组周报{0}至{1}.docx'.format(first_date, end_date)
document.save(save_name)
excel_name = '新一代核心系统建设项目周报{0}_天用厦开安全项目组.xls'.format(end_date)
workbook.save(excel_name)
if __name__ == '__main__':
main()
| nilq/baby-python | python |
##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from pyxcli.response import XCLIResponse
from pyxcli.helpers.xml_util import ElementNotFoundException
from pyxcli.helpers import xml_util as etree
class XCLIError(Exception):
"""Base class of all XCLI-related errors"""
pass
class BaseScsiException(Exception):
pass
class CommandExecutionError(XCLIError):
"""
Base class of all XCLI command execution errors: invalid command,
parameters, operation failed, etc. This is the "stable API" for
catching XCLI exceptions - there are subclasses for specific errors,
but these should be considered unstable and may change over time
"""
KNOWN_CODES = {}
KNOWN_LEVELS = {}
def __init__(self, code, status, xml, return_value=None):
XCLIError.__init__(self, code, status, xml)
self.code = code
self.status = status
self.xml = xml
if return_value is not None:
self.return_value = return_value
else:
self.return_value = XCLIResponse(xml)
def __str__(self):
return self.status
@classmethod
def instantiate(cls, rootelem, cmdroot, encoding):
try:
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
# "status/@value"
level = etree.xml_find(cmdroot, "status", "value")
# "status_str/@value"
status = etree.xml_find(cmdroot, "status_str", "value")
except ElementNotFoundException:
code = None
level = None
status = "Unknown reason"
xcli_response = XCLIResponse.instantiate(cmdroot, encoding)
if code in cls.KNOWN_CODES:
concrete = cls.KNOWN_CODES[code]
elif level in cls.KNOWN_LEVELS:
concrete = cls.KNOWN_LEVELS[level]
else:
concrete = CommandFailedUnknownReason
return concrete(code, status, cmdroot, xcli_response)
@classmethod
def register(cls, *codes):
def deco(concrete):
for code in codes:
cls.KNOWN_CODES[code] = concrete
return concrete
return deco
@classmethod
def register_level(cls, *codes):
def deco(concrete):
for code in codes:
cls.KNOWN_LEVELS[code] = concrete
return concrete
return deco
class CommandFailedUnknownReason(CommandExecutionError):
pass
##############################################################################
# Concrete Error Levels
##############################################################################
@CommandExecutionError.register_level("1")
class CommandFailedConnectionError(CommandExecutionError):
pass
@CommandExecutionError.register_level("2")
class CommandFailedSyntaxError(CommandExecutionError):
pass
@CommandExecutionError.register_level("3")
class CommandFailedRuntimeError(CommandExecutionError):
pass
@CommandExecutionError.register_level("4")
class CommandFailedPassiveManager(CommandExecutionError):
pass
@CommandExecutionError.register_level("5")
class CommandFailedInternalError(CommandExecutionError):
pass
##############################################################################
# Concrete Error Codes
##############################################################################
@CommandExecutionError.register("MCL_TIMEOUT")
class MCLTimeoutError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PARTIAL_SUCCESS")
class PartialSuccessError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("TRNS_ERROR_WITH_EXTENDED_INFO")
class OperationFailedWithExtendedInfoError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_BAD_NAME")
class VolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SOURCE_VOLUME_BAD_NAME")
class SourceVolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("TARGET_VOLUME_BAD_NAME")
class TargetVolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("BASE_VOLUME_BAD_NAME")
class BaseVolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("BASE_VOLUME_INVALID")
class BaseVolumeInvalidError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_EXISTS")
class VolumeExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_IS_MAPPED")
class VolumeIsMappedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_SIZE_ABOVE_LIMIT")
class VolumeSizeAboveLimitError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_NO_MIRROR")
class VolumeHasNoMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_DATA_MIGRATION")
class VolumeHasDataMigrationError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_BELONGS_TO_MIRRORED_CONS_GROUP")
class VolumeIsPartOfMirroredCgError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("ALU_BAD_NAME")
class ALUBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_BAD_NAME")
class CgBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_NO_MIRROR")
class CgHasNoMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_NOT_SYNCHRONIZED")
class MirrorNotSynchronizedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_ASYNC")
class MirrorIsAsyncError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_INITIAL")
class MirrorInitializingError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_ACTIVE")
class MirrorActiveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SYNC_ALREADY_INACTIVE")
class SyncAlreadyInactiveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SYNC_ALREADY_ACTIVE")
class SyncAlreadyActiveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_NON_OPERATIONAL")
class MirrorNonOperationalError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("REMOTE_TARGET_NOT_CONNECTED")
class RemoteTargetNotConnectedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("LOCAL_PEER_IS_NOT_MASTER")
class LocalIsNotMasterError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_SPACE")
class PoolOutOfSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_HARD_SPACE")
class PoolOutOfHardSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_SNAPSHOT_SPACE")
class PoolOutOfSnapshotSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NO_SPACE")
class SystemOutOfSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_SPACE_ON_REMOTE_MACHINE")
class RemotePoolOutOfSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_IS_SNAPSHOT")
class OperationNotPermittedOnSnapshotError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("BAD_PARAMS")
class BadParameterError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_NAME_EXISTS")
class HostNameAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_PORT_EXISTS")
class HostWithPortIdAlreadyDefined(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("POOL_DOES_NOT_EXIST")
class PoolDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("POOL_SNAPSHOT_LIMIT_REACHED")
class PoolSnapshotLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("REMOTE_VOLUME_IS_MASTER")
class RemoteVolumeIsMasterError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONF_PATH_DOES_NOT_EXIST")
class PathDoesNotExistInConfigurationError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("ILLEGAL_VALUE")
class IllegalValueForArgumentError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("ILLEGAL_NAME")
class IllegalNameForObjectError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("COMPONENT_TYPE_MUST_HAVE_COMPONENT_ID")
class ComponentTypeMustHaveComponentIDError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_PROFILE_UPDATE_TOO_FREQUENT")
class HostProfileUpdateTooFrequentError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_BAD_NAME")
class HostBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CLUSTER_BAD_NAME")
class ClusterBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MAX_HOST_PROFILES_REACHED")
class MaxHostProfilesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SSD_CACHING_NOT_ENABLED")
class SSDCachingNotEnabledError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("UNRECOGNIZED_EVENT_CODE")
class UnrecognizedEventCodeError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("UNRECOGNIZED_COMMAND")
class UnrecognizedCommandError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("CAN_NOT_SHRINK_VOLUME")
class VolumeSizeCannotBeDecreased(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("OBJECT_BAD_NAME")
class ReferencedObjectDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("OPERATION_FORBIDDEN_FOR_USER_CATEGORY")
class OperationForbiddenForUserCategoryError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("ACCESS_DENIED")
class AccessDeniedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMMAND_NOT_SUPPORTED_FOR_OLVM_VOLUMES")
class CommandNotSupportedForOLVMVolumes(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_LOCKED")
class VolumeLocked(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_OLVM")
class VolumeHasOlvm(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_MIRROR")
class VolumeHasMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_BELONGS_TO_CG")
class VolumeBelongsToCGError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_MAX_DB_REACHED")
class MetadataServiceMaxDBReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_DB_DOES_NOT_EXIST")
class MetadataServiceDBDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_DB_ALREADY_EXISTS")
class MetadataServiceDBAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_KEY_DOES_NOT_EXIST")
class MetadataServiceKeyDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_KEY_ALREADY_EXISTS")
class MetadataServiceKeyAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_MAX_ENTRIES_REACHED")
class MetadataServiceMaxEntriesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_INVALID_TOKEN")
class MetadataServiceInvalidTokenError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("LDAP_AUTHENTICATION_IS_NOT_ACTIVE")
class LDAPAuthenticationIsNotActive(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("LDAP_IS_NOT_FULLY_CONFIGURED")
class LDAPIsNotFullyConfigured(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_INCOMPATIBLE_SIZE")
class VolumeIncompatibleSizeError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSION_DISABLED")
class CompressionDisabledError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSION_REQUIRES_THIN_PROVISIONED_POOL")
class CompressionRequiresThinPoolError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUMES_LIMIT_REACHED")
class CompressedVolumesLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_CAPACITY_LIMIT_REACHED")
class CompressedCapacityLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUME_TOO_BIG")
class CompressedVolumeTooBigError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUME_TOO_SMALL")
class CompressedVolumeTooSmallError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SOURCE_VOLUME_COMPRESSED_TARGET_UNCOMPRESSED")
class SourceVolumeCompressedTargetUncompressedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SOURCE_VOLUME_UNCOMPRESSED_TARGET_COMPRESSED")
class SourceVolumeUncompressedTargetCompressedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CANNOT_SHRINK_COMPRESSED_VOLUME")
class CannotShrinkCompressedVolumeError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_TRANSFORM")
class VolumeHasTransformError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_IS_COMPRESSED")
class VolumeIsCompressedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUME_IS_MAPPED")
class CompressedVolumeIsMappedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CAN_NOT_MAP_SLAVE_COMPRESSED_VOLUME")
class CannotMapSlaveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_NAME_EXISTS")
class CgNameExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_DOES_NOT_EXIST")
class CgDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MAX_CONS_GROUPS_REACHED")
class CgLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_HAS_MIRROR")
class CgHasMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_NOT_EMPTY")
class CgNotEmptyError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_EMPTY")
class CgEmptyError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_MISMATCH")
class CgMismatchError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_MIRROR_PARAMS_MISMATCH")
class CgMirrorParamsMismatchError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_MIRRORING_NOT_SUPPORTED_IN_TARGET")
class CgMirroringNotSupportedOnTargetError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_GROUP_BAD_NAME")
class SnapshotGroupDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_IS_MAPPED")
class SnapshotIsMappedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_HAS_ACTIVE_SYNC_JOB")
class SnapshotIsSynchronisingError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MAX_VOLUMES_REACHED")
class MaxVolumesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("DOMAIN_MAX_VOLUMES_REACHED")
class DomainMaxVolumesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_GROUP_BAD_PREFIX")
class SnapshotGroupIsReservedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_GROUP_NAME_EXISTS")
class SnapshotGroupAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register(
"OVERWRITE_SNAPSHOT_GROUP_DOES_NOT_BELONG_TO_GIVEN_GROUP")
class SnapshotGroupMismatchError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_NOT_CONNECTED_TO_ANY_PERF_CLASS")
class VolumeNotConnectedToPerfClassError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_BAD_NAME")
class PerfClassNotExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_ALREADY_IN_PERF_CLASS")
class VolumeAlreadyInPerfClassError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_ASSOCIATED_WITH_HOSTS")
class PerfClassAssociatedWithHostError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_ASSOCIATED_WITH_POOLS_OR_DOMAINS")
class PerfClassAssociatedWithPoolsOrDomainsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_ASSOCIATED_WITH_VOLUMES")
class PerfClassAssociatedWithVolumesError(CommandFailedRuntimeError):
pass
##############################################################################
# CredentialsError
# we explicitly want to differentiate CredentialsError from
# CommandExecutionError, so although it is raised by _build_response,
# it derives from XCLIError directly
##############################################################################
@CommandExecutionError.register("LOGIN_FAILURE_USER_FAILED_TO_LOGIN",
"USER_NAME_DOES_NOT_EXIST",
"DEFAULT_USER_IS_NOT_DEFINED",
"INCORRECT_PASSWORD",
"LOGIN_FAILURE_USER_NOT_FOUND_IN_LDAP_SERVERS",
"LOGIN_FAILURE_USER_NOT_AUTHENTICATED_BY_ \
LDAP_SERVER")
class CredentialsError(XCLIError):
"""Raises when an XCLI command fails due to invalid credentials.
Inherits directly from XCLIError, not CommandExecutionError,
although it is raised during the execution of a command
to explicitly differentiate the two
"""
def __init__(self, code, status, xml, return_value=None):
XCLIError.__init__(self, code, status, xml)
self.code = code
self.status = status
self.xml = xml
if return_value is not None:
self.return_value = return_value
else:
self.return_value = XCLIResponse(xml)
def __str__(self):
ret_str = ""
if isinstance(self.xml, str):
ret_str = "%s\n\n%s" % (self.status, self.xml)
else:
ret_str = "%s\n\n%s" % (etree.tostring(self.xml))
return ret_str
##############################################################################
# AServer ("delivery") errors
##############################################################################
class CommandFailedAServerError(CommandExecutionError):
"""AServer related errors"""
REMOTE_TARGET_ERRORS = frozenset(["TARGET_IS_NOT_CONNECTED",
"TARGET_DOES_NOT_EXIST",
"SEND_TO_TARGET_FAILED",
"GETTING_RESPONSE_FROM_TARGET_FAILED"])
@classmethod
def instantiate(cls, aserver, rootelem):
if aserver in cls.REMOTE_TARGET_ERRORS:
return CommandFailedRemoteTargetError(aserver, aserver, rootelem)
else:
return CommandFailedAServerError(aserver, aserver, rootelem)
class CommandFailedRemoteTargetError(CommandFailedAServerError):
pass
##############################################################################
# Misc
##############################################################################
class UnsupportedNextraVersion(XCLIError):
pass
class CorruptResponse(XCLIError):
pass
##############################################################################
# Transport
##############################################################################
class TransportError(XCLIError):
"""Base class of all transport-related errors"""
pass
class ConnectionError(TransportError):
"""Represents errors that occur during connection"""
pass
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict as odict
from copy import deepcopy
from functools import partial
import sys
import bindings as bi
from custom import get_customizations_for, reformat_block
PY3 = sys.version_info[0] == 3
str_type = str if PY3 else (str, unicode)
get_customizations_for = partial(get_customizations_for, 'R')
def get_customizations_or_defaults_for(algo, prop, default=None):
return get_customizations_for(algo, prop, get_customizations_for('defaults', prop, default))
# ----------------------------------------------------------------------------------------------------------------------
# Generate per-model classes
# ----------------------------------------------------------------------------------------------------------------------
def gen_module(schema, algo, module):
# print(str(schema))
rest_api_version = get_customizations_for(algo, 'rest_api_version', 3)
doc_preamble = get_customizations_for(algo, 'doc.preamble')
doc_returns = get_customizations_for(algo, 'doc.returns')
doc_seealso = get_customizations_for(algo, 'doc.seealso')
doc_references = get_customizations_for(algo, 'doc.references')
doc_examples = get_customizations_for(algo, 'doc.examples')
required_params = get_customizations_or_defaults_for(algo, 'extensions.required_params', [])
extra_params = get_customizations_or_defaults_for(algo, 'extensions.extra_params', [])
model_name = algo_to_modelname(algo)
update_param_defaults = get_customizations_for('defaults', 'update_param')
update_param = get_customizations_for(algo, 'update_param')
yield "# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py"
yield "# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details) \n#'"
yield "# -------------------------- %s -------------------------- #" % model_name
# start documentation
if doc_preamble:
yield "#'"
yield reformat_block(doc_preamble, prefix="#' ")
yield "#'"
# start doc for signature
required_params = odict([(p[0] if isinstance(p, tuple) else p, p[1] if isinstance(p, tuple) else None)
for p in required_params])
schema_params = odict([(p['name'], p)
for p in schema['parameters']])
extra_params = odict([(p[0] if isinstance(p, tuple) else p, p[1] if isinstance(p, tuple) else None)
for p in extra_params])
all_params = list(required_params.keys()) + list(schema_params.keys()) + list(extra_params.keys())
def get_schema_params(pname):
param = deepcopy(schema_params[pname])
updates = None
for update_fn in [update_param, update_param_defaults]:
if callable(update_fn):
updates = update_fn(pname, param)
if updates is not None:
param = updates
break
return param if isinstance(param, (list, tuple)) else [param] # always return array to support deprecated aliases
tag = "@param"
pdocs = odict()
for pname in all_params:
if pname in pdocs: # avoid duplicates (esp. if already included in required_params)
continue
if pname in schema_params:
for param in get_schema_params(pname): # retrieve potential aliases
pname = param.get('name')
if pname:
pdocs[pname] = get_customizations_or_defaults_for(algo, 'doc.params.'+pname, get_help(param, indent=len(tag)+4))
else:
pdocs[pname] = get_customizations_or_defaults_for(algo, 'doc.params.'+pname)
for pname, pdoc in pdocs.items():
if pdoc:
yield reformat_block("%s %s %s" % (tag, pname, pdoc.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_returns:
tag = "@return"
yield reformat_block("%s %s" % (tag, doc_returns.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_seealso:
tag = "@seealso"
yield reformat_block("%s %s" % (tag, doc_seealso.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_references:
tag = "@references"
yield reformat_block("%s %s" % (tag, doc_references.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_examples:
yield "#' @examples"
yield "#' \dontrun{"
yield reformat_block(doc_examples, prefix="#' ")
yield "#' }"
yield "#' @export"
# start function signature
sig_pnames = []
sig_params = []
for k, v in required_params.items():
sig_pnames.append(k)
sig_params.append(k if v is None else '%s = %s' % (k, v))
for pname in schema_params:
params = get_schema_params(pname)
for param in params:
pname = param.get('name') # override local var as param can be an alias of pname
if pname in required_params or not pname: # skip schema params already added by required_params, and those explicitly removed
continue
sig_pnames.append(pname)
sig_params.append("%s = %s" % (pname, get_sig_default_value(param)))
for k, v in extra_params.items():
sig_pnames.append(k)
sig_params.append("%s = %s" % (k, v))
param_indent = len("h2o.%s <- function(" % module)
yield reformat_block("h2o.%s <- function(%s)" % (module, ',\n'.join(sig_params)), indent=param_indent, indent_first=False)
# start function body
yield "{"
validate_frames = get_customizations_or_defaults_for(algo, 'extensions.validate_frames')
if validate_frames:
yield " # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object"
yield reformat_block(validate_frames, indent=2)
else:
frames = get_customizations_or_defaults_for(algo, 'extensions.frame_params', [])
if frames:
yield " # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object"
for frame in frames:
if frame in sig_pnames:
required_val = str(frame in required_params).upper()
yield " {frame} <- .validate.H2OFrame({frame}, required={required})".format(frame=frame, required=required_val)
validate_required_params = get_customizations_or_defaults_for(algo, 'extensions.validate_required_params')
if validate_required_params:
yield ""
yield " # Validate other required args"
yield reformat_block(validate_required_params, indent=2)
validate_params = get_customizations_or_defaults_for(algo, 'extensions.validate_params')
if validate_params:
yield ""
yield " # Validate other args"
yield reformat_block(validate_params, indent=2)
yield ""
yield " # Build parameter list to send to model builder"
yield " parms <- list()"
set_required_params = get_customizations_or_defaults_for(algo, 'extensions.set_required_params')
if set_required_params:
yield reformat_block(set_required_params, indent=2)
skip_default_set_params = get_customizations_or_defaults_for(algo, 'extensions.skip_default_set_params_for', [])
yield ""
for pname in schema_params:
if pname in skip_default_set_params:
continue
# leave the special handling of 'loss' param here for now as it is used by several algos
if pname == "loss":
yield " if(!missing(loss)) {"
yield " if(loss == \"MeanSquare\") {"
yield " warning(\"Loss name 'MeanSquare' is deprecated; please use 'Quadratic' instead.\")"
yield " parms$loss <- \"Quadratic\""
yield " } else "
yield " parms$loss <- loss"
yield " }"
else:
yield " if (!missing(%s))" % pname
yield " parms$%s <- %s" % (pname, pname)
set_params = get_customizations_or_defaults_for(algo, 'extensions.set_params')
if set_params:
yield ""
yield reformat_block(set_params, indent=2)
yield ""
yield " # Error check and build model"
verbose = 'verbose' if 'verbose' in extra_params else 'FALSE'
yield " model <- .h2o.modelJob('%s', parms, h2oRestApiVersion=%d, verbose=%s)" % (algo, rest_api_version, verbose)
with_model = get_customizations_for(algo, 'extensions.with_model')
if with_model:
yield ""
yield reformat_block(with_model, indent=2)
yield " return(model)"
yield "}"
# start additional functions
module_extensions = get_customizations_for(algo, 'extensions.module')
if module_extensions:
yield ""
yield module_extensions
def algo_to_modelname(algo):
if algo == "aggregator": return "H2O Aggregator Model"
if algo == "deeplearning": return "Deep Learning - Neural Network"
if algo == "xgboost": return "XGBoost"
if algo == "drf": return "Random Forest Model in H2O"
if algo == "gbm": return "Gradient Boosting Machine"
if algo == "glm": return "H2O Generalized Linear Models"
if algo == "glrm": return "Generalized Low Rank Model"
if algo == "kmeans": return "KMeans Model in H2O"
if algo == "naivebayes": return "Naive Bayes Model in H2O"
if algo == "pca": return "Principal Components Analysis"
if algo == "svd": return "Singular Value Decomposition"
if algo == "stackedensemble": return "H2O Stacked Ensemble"
if algo == "psvm": return "Support Vector Machine"
if algo == "targetencoder": return "Target Encoder"
return algo
def get_help(param, indent=0):
pname = param.get('name')
ptype = param.get('type')
pvalues = param.get('values')
pdefault = param.get('default_value')
phelp = param.get('help')
if not phelp:
return
if ptype == 'boolean':
phelp = "\code{Logical}. " + phelp
if pvalues:
phelp += " Must be one of: %s." % ", ".join('"%s"' % v for v in pvalues)
if pdefault is not None:
phelp += " Defaults to %s." % get_doc_default_value(param)
return bi.wrap(phelp, width=120-indent)
def get_doc_default_value(param):
ptype = param['type']
ptype = 'str' if ptype.startswith('enum') else ptype # for doc, default value is actually a str for enum types.
return as_R_repr(ptype, param.get('default_value'))
def get_sig_default_value(param):
ptype = param['type']
value = (param.get('values') if ptype.startswith('enum') # for signature, default value is whole enum (to provide parameter hint).
else param.get('default_value'))
return as_R_repr(ptype, value)
def as_R_repr(ptype, value):
if value is None:
return (0 if ptype in ['short', 'int', 'long', 'double']
else "list()" if ptype == 'list'
else 'NULL')
if ptype == 'boolean':
return str(value).upper()
if ptype == 'double':
return '%.10g' % value
if ptype == 'list':
return "list(%s)" % ', '.join('"%s"' % v for v in value)
if ptype.startswith('enum'):
return "c(%s)" % ', '.join('"%s"' % v for v in value)
if ptype.endswith('[]'):
return "c(%s)" % ', '.join('%s' % v for v in value)
return value
# ----------------------------------------------------------------------------------------------------------------------
# MAIN:
# ----------------------------------------------------------------------------------------------------------------------
def main():
bi.init("R", "../../../h2o-r/h2o-package/R", clear_dir=False)
for name, mb in bi.model_builders().items():
module = name
file_name = name
if name == "drf":
module = "randomForest"
file_name = "randomforest"
if name == "isolationforest": module = "isolationForest"
if name == "naivebayes": module = "naiveBayes"
if name == "stackedensemble": module = "stackedEnsemble"
if name == "pca": module = "prcomp"
bi.vprint("Generating model: " + name)
bi.write_to_file("%s.R" % file_name, gen_module(mb, name, module))
if __name__ == "__main__":
main()
| nilq/baby-python | python |
"""
1. Clarification
2. Possible solutions
- Dynamic programming
- Divide and Conquer
3. Coding
4. Tests
"""
# T=O(n), S=O(1)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if not nums: return 0
maxn, subSum = -math.inf, 0
for num in nums:
subSum += num
maxn = max(maxn, subSum)
if subSum < 0:
subSum = 0
return maxn
# T=O(n), S=O(lgn)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
def divide_and_conquer(nums, left, right):
if left == right: return (nums[left], nums[left], nums[left], nums[left])
mid = (left + right) >> 1
a1, m1, b1, s1 = divide_and_conquer(nums, left, mid)
a2, m2, b2, s2 = divide_and_conquer(nums, mid + 1, right)
a = max(a1, s1 + a2)
b = max(b2, s2 + b1)
m = max(m1, m2, b1 + a2)
s = s1 + s2
return (a, m, b, s)
if not nums: return 0
_, m, _, _ = divide_and_conquer(nums, 0, len(nums) - 1)
return m
| nilq/baby-python | python |
import logging
from easyjoblite import state, constants
from easyjoblite.utils import kill_process
logger = logging.getLogger(__name__)
class WorkerManager(object):
@staticmethod
def stop_all_workers(worker_type):
"""
stops all the workers of the given type
:param worker_type:
:return:
"""
logger = logging.getLogger("stop_all_workers")
service_state = state.ServiceState()
worker_type_list = [constants.WORK_QUEUE, constants.RETRY_QUEUE, constants.DEAD_LETTER_QUEUE]
if worker_type in worker_type_list:
WorkerManager.kill_workers(service_state, worker_type)
logger.info("Done stopping all the workers of worker_type {}".format(worker_type))
elif worker_type == constants.STOP_TYPE_ALL:
for local_type in worker_type_list:
WorkerManager.kill_workers(service_state, local_type)
logger.info("Done stopping all the workers ")
else:
raise KeyError
service_state.refresh_all_workers_pid()
@staticmethod
def kill_workers(service_state, type):
"""
function to kill all the workers of the given type
:param service_state: current state of the service
:param type: the type of the worker to kill
:return:
"""
logger.info("Started killing : " + type + " with list " + str(service_state.get_pid_list(type)))
pid_list = list(service_state.get_pid_list(type))
for pid in pid_list:
kill_process(pid)
logging.info("Done killing : " + str(pid))
| nilq/baby-python | python |
#!/usr/bin/env python2
# Copyright (C) 2001 Jeff Epler <jepler@unpythonic.dhs.org>
# Copyright (C) 2006 Csaba Henk <csaba.henk@creo.hu>
# Copyright (C) 2011 Marek Kubica <marek@xivilization.net>
#
# This program can be distributed under the terms of the GNU LGPLv3.
import os, sys
from errno import *
from stat import *
import fcntl
import fuse
from fuse import Fuse
import os.path
import errno
from logbook import FileHandler, debug, DEBUG
log_handler = FileHandler('/tmp/libraryfuse.log', level=DEBUG)
log_handler.push_application()
debug('Starting')
fuse.fuse_python_api = (0, 2)
fuse.feature_assert('stateful_files', 'has_init')
directories_to_merge = ['/var', '/usr']
class LibraryFuse(Fuse):
def __init__(self, *args, **kw):
Fuse.__init__(self, *args, **kw)
self.directories_to_merge = directories_to_merge
def getattr(self, path):
debug('getattr with %s' % path)
for library_part in self.directories_to_merge:
real_path = library_part + path
debug('trying %s' % real_path)
if os.path.exists(real_path):
return os.lstat(real_path)
def readlink(self, path):
debug('readlink called with {}'.format(path))
for library_part in self.directories_to_merge:
real_path = library_part + path
if os.path.exists(real_path):
return os.readlink(real_path)
def readdir(self, path, offset):
debug('readdir called with {0} and offset {1}'.format(path, offset))
elements = set()
# gather elements
for library_part in self.directories_to_merge:
real_path = library_part + path
if not os.path.exists(real_path):
continue
for e in os.listdir(real_path):
elements.add(e)
# return elements
for element in elements:
yield fuse.Direntry(element)
def unlink(self, path):
debug('unlink called')
return -ENOSYS
os.unlink("." + path)
def rmdir(self, path):
debug('rmdir')
return -ENOSYS
os.rmdir("." + path)
def symlink(self, path, path1):
debug('symlink')
return -ENOSYS
os.symlink(path, "." + path1)
def rename(self, path, path1):
debug('rename')
return -ENOSYS
os.rename("." + path, "." + path1)
def link(self, path, path1):
debug('link')
return -ENOSYS
os.link("." + path, "." + path1)
def chmod(self, path, mode):
debug('chmod')
return -ENOSYS
os.chmod("." + path, mode)
def chown(self, path, user, group):
debug('chown')
return -ENOSYS
os.chown("." + path, user, group)
def truncate(self, path, len):
debug('truncate')
return -ENOSYS
f = open("." + path, "a")
f.truncate(len)
f.close()
def mknod(self, path, mode, dev):
debug('mknod')
return -ENOSYS
os.mknod("." + path, mode, dev)
def mkdir(self, path, mode):
debug('mkdir')
return -ENOSYS
os.mkdir("." + path, mode)
def utime(self, path, times):
debug('utime')
return -ENOSYS
os.utime("." + path, times)
# The following utimens method would do the same as the above utime method.
# We can't make it better though as the Python stdlib doesn't know of
# subsecond preciseness in acces/modify times.
#
# def utimens(self, path, ts_acc, ts_mod):
# os.utime("." + path, (ts_acc.tv_sec, ts_mod.tv_sec))
def access(self, path, mode):
debug('access {0} in mode {1}'.format(path, mode))
for library_part in self.directories_to_merge:
real_path = library_part + path
if os.path.exists(real_path):
if not os.access(real_path, mode):
return -errno.EACCES
def statfs(self):
"""
Should return an object with statvfs attributes (f_bsize, f_frsize...).
Eg., the return value of os.statvfs() is such a thing (since py 2.2).
If you are not reusing an existing statvfs object, start with
fuse.StatVFS(), and define the attributes.
To provide usable information (ie., you want sensible df(1)
output, you are suggested to specify the following attributes:
- f_bsize - preferred size of file blocks, in bytes
- f_frsize - fundamental size of file blcoks, in bytes
[if you have no idea, use the same as blocksize]
- f_blocks - total number of blocks in the filesystem
- f_bfree - number of free blocks
- f_files - total number of file inodes
- f_ffree - nunber of free file inodes
"""
debug('statvfs')
return os.statvfs(".")
def main(self, *a, **kw):
return Fuse.main(self, *a, **kw)
def main():
server = LibraryFuse()
#server.parser.add_option(mountopt="root", metavar="PATH", default='/',
# help="mirror filesystem from under PATH [default: %default]")
server.parse(values=server, errex=1)
server.main()
if __name__ == '__main__':
main()
| nilq/baby-python | python |
import igraph
import numpy as np
import pandas as pd
from tqdm import tqdm
from feature_engineering.tools import lit_eval_nan_proof
# this script adds the feature shortest_path to the files training_features and testing_features
# this script takes approximately 1000 minutes to execute
# progress bar for pandas
tqdm.pandas(tqdm())
# path
path_to_data = "data/"
# loading data
converter_dict = {'authors': lit_eval_nan_proof, 'journal': lit_eval_nan_proof,
'title': lit_eval_nan_proof, 'abstract': lit_eval_nan_proof}
nodes = pd.read_csv(path_to_data + "nodes_preprocessed.csv", converters=converter_dict)
nodes.set_index("id", inplace=True)
training = pd.read_csv(path_to_data + "training_features.txt")
training.set_index("my_index", inplace=True)
testing = pd.read_csv(path_to_data + "testing_features.txt")
testing.set_index("my_index", inplace=True)
# placeholders for graph features
shortest_path = []
# IDs for training set
id1 = training['id1'].values
id2 = training['id2'].values
target = training["target"].values
# creating graph of citations
# create empty directed graph
g = igraph.Graph(directed=True)
# some nodes may not be connected to any other node
# hence the need to create the nodes of the graph from node_info.csv,
# not just from the edge list
nodes = nodes.index.values
str_vec = np.vectorize(str)
nodes = str_vec(nodes)
# add vertices
g.add_vertices(nodes)
# create and add edges
edges = [(str(id1[i]), str(id2[i])) for i in range(len(id1)) if target[i] == 1]
g.add_edges(edges)
for i in tqdm(range(len(id1))):
if target[i] == 1:
g.delete_edges([(str(id1[i]), str(id2[i]))])
shortest_path.append(g.shortest_paths_dijkstra(source=str(id1[i]), target=str(id2[i]), mode="OUT")[0][0])
if target[i] == 1:
g.add_edge(str(id1[i]), str(id2[i]))
# adding feature to dataframe
training["shortest_path"] = shortest_path
# repeat process for test set
shortest_path_test = []
id1 = testing['id1'].values
id2 = testing['id2'].values
for i in tqdm(range(len(id1))):
shortest_path_test.append(g.shortest_paths_dijkstra(source=str(id1[i]), target=str(id2[i]), mode="OUT")[0][0])
if target[i] == 1:
g.add_edge(str(id1[i]), str(id2[i]))
testing["shortest_path"] = shortest_path_test
# save data sets
training.to_csv(path_to_data + "training_features.txt")
testing.to_csv(path_to_data + "testing_features.txt")
| nilq/baby-python | python |
# encoding: utf-8
"""
lxml custom element classes for shape tree-related XML elements.
"""
from __future__ import absolute_import
from .autoshape import CT_Shape
from .connector import CT_Connector
from ...enum.shapes import MSO_CONNECTOR_TYPE
from .graphfrm import CT_GraphicalObjectFrame
from ..ns import qn
from .picture import CT_Picture
from .shared import BaseShapeElement
from ..xmlchemy import BaseOxmlElement, OneAndOnlyOne, ZeroOrOne
class CT_GroupShape(BaseShapeElement):
"""
Used for the shape tree (``<p:spTree>``) element as well as the group
shape (``<p:grpSp>``) element.
"""
nvGrpSpPr = OneAndOnlyOne('p:nvGrpSpPr')
grpSpPr = OneAndOnlyOne('p:grpSpPr')
_shape_tags = (
qn('p:sp'), qn('p:grpSp'), qn('p:graphicFrame'), qn('p:cxnSp'),
qn('p:pic'), qn('p:contentPart')
)
def add_autoshape(self, id_, name, prst, x, y, cx, cy):
"""
Append a new ``<p:sp>`` shape to the group/shapetree having the
properties specified in call.
"""
sp = CT_Shape.new_autoshape_sp(id_, name, prst, x, y, cx, cy)
self.insert_element_before(sp, 'p:extLst')
return sp
def add_cxnSp(self, id_, name, type_member, x, y, cx, cy, flipH, flipV):
"""
Append a new ``<p:cxnSp>`` shape to the group/shapetree having the
properties specified in call.
"""
prst = MSO_CONNECTOR_TYPE.to_xml(type_member)
cxnSp = CT_Connector.new_cxnSp(
id_, name, prst, x, y, cx, cy, flipH, flipV
)
self.insert_element_before(cxnSp, 'p:extLst')
return cxnSp
def add_pic(self, id_, name, desc, rId, x, y, cx, cy):
"""
Append a ``<p:pic>`` shape to the group/shapetree having properties
as specified in call.
"""
pic = CT_Picture.new_pic(id_, name, desc, rId, x, y, cx, cy)
self.insert_element_before(pic, 'p:extLst')
return pic
def add_placeholder(self, id_, name, ph_type, orient, sz, idx):
"""
Append a newly-created placeholder ``<p:sp>`` shape having the
specified placeholder properties.
"""
sp = CT_Shape.new_placeholder_sp(
id_, name, ph_type, orient, sz, idx
)
self.insert_element_before(sp, 'p:extLst')
return sp
def add_table(self, id_, name, rows, cols, x, y, cx, cy):
"""
Append a ``<p:graphicFrame>`` shape containing a table as specified
in call.
"""
graphicFrame = CT_GraphicalObjectFrame.new_table_graphicFrame(
id_, name, rows, cols, x, y, cx, cy
)
self.insert_element_before(graphicFrame, 'p:extLst')
return graphicFrame
def add_textbox(self, id_, name, x, y, cx, cy):
"""
Append a newly-created textbox ``<p:sp>`` shape having the specified
position and size.
"""
sp = CT_Shape.new_textbox_sp(id_, name, x, y, cx, cy)
self.insert_element_before(sp, 'p:extLst')
return sp
def get_or_add_xfrm(self):
"""
Return the ``<a:xfrm>`` grandchild element, newly-added if not
present.
"""
return self.grpSpPr.get_or_add_xfrm()
def iter_ph_elms(self):
"""
Generate each placeholder shape child element in document order.
"""
for e in self.iter_shape_elms():
if e.has_ph_elm:
yield e
def iter_shape_elms(self):
"""
Generate each child of this ``<p:spTree>`` element that corresponds
to a shape, in the sequence they appear in the XML.
"""
for elm in self.iterchildren():
if elm.tag in self._shape_tags:
yield elm
@property
def xfrm(self):
"""
The ``<a:xfrm>`` grandchild element or |None| if not found
"""
return self.grpSpPr.xfrm
class CT_GroupShapeNonVisual(BaseShapeElement):
"""
``<p:nvGrpSpPr>`` element.
"""
cNvPr = OneAndOnlyOne('p:cNvPr')
class CT_GroupShapeProperties(BaseOxmlElement):
"""
The ``<p:grpSpPr>`` element
"""
xfrm = ZeroOrOne('a:xfrm', successors=(
'a:noFill', 'a:solidFill', 'a:gradFill', 'a:blipFill', 'a:pattFill',
'a:grpFill', 'a:effectLst', 'a:effectDag', 'a:scene3d', 'a:extLst'
))
| nilq/baby-python | python |
import sys
import sh
def app(name, *args, _out=sys.stdout, _err=sys.stderr, _tee=True, **kwargs):
try:
return sh.Command(name).bake(
*args, _out=_out, _err=_err, _tee=_tee, **kwargs
)
except sh.CommandNotFound:
return sh.Command(sys.executable).bake(
"-c",
(
f"import sys; import click; click.secho('Command `{name}` "
f"not found', fg='red'); sys.exit(1)"
),
)
# Shell commands
ls = app("ls")
rm = app("rm", "-rf")
cp = app("cp", "-rf")
find = app("find", _out=None)
mount = app("mount")
umount = app("umount", "-f")
# Python commands
python = app(sys.executable)
pip = app("pip")
pytest = app("py.test", "-s", _tee=False, _ok_code=[0, 1, 2, 3, 4, 5])
black = app("black")
flake8 = app("flake8", _ok_code=[0, 1])
pydocstyle = app("pydocstyle", _ok_code=[0, 1])
# Docker
docker = app("docker")
| nilq/baby-python | python |
"""
This options file demonstrates how to run a stripping line
from a specific stripping version on a local MC DST file
It is based on the minimal DaVinci DecayTreeTuple example
"""
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
from Configurables import (
EventNodeKiller,
ProcStatusCheck,
DaVinci,
DecayTreeTuple
)
from GaudiConf import IOHelper
from DecayTreeTuple.Configuration import *
# Node killer: remove the previous Stripping
event_node_killer = EventNodeKiller('StripKiller')
event_node_killer.Nodes = ['/Event/AllStreams', '/Event/Strip']
# Build a new stream called 'CustomStream' that only
# contains the desired line
strip = 'stripping28r1'
streams = buildStreams(stripping=strippingConfiguration(strip),
archive=strippingArchive(strip))
line = 'D2hhPromptDst2D2KKLine'
custom_stream = StrippingStream('CustomStream')
custom_line = 'Stripping'+line
for stream in streams:
for sline in stream.lines:
if sline.name() == custom_line:
custom_stream.appendLines([sline])
# Create the actual Stripping configurable
filterBadEvents = ProcStatusCheck()
sc = StrippingConf(Streams=[custom_stream],
MaxCandidates=2000,
AcceptBadEvents=False,
BadEventSelection=filterBadEvents)
# Create an ntuple to capture D*+ decays from the StrippingLine line
dtt = DecayTreeTuple('TupleDstToD0pi_D0ToKK')
# The output is placed directly into Phys, so we only need to
# define the stripping line here
dtt.Inputs = ['/Event/Phys/{0}/Particles'.format(line)]
dtt.Decay = '[D*(2010)+ -> (D0 -> K- K+) pi+]CC'
# Configure DaVinci
# Important: The selection sequence needs to be inserted into
# the Gaudi sequence for the stripping to run
DaVinci().appendToMainSequence([event_node_killer, sc.sequence()])
DaVinci().UserAlgorithms += [dtt]
DaVinci().InputType = 'DST'
DaVinci().TupleFile = 'DVntuple.root'
DaVinci().PrintFreq = 1000
DaVinci().DataType = '2016'
DaVinci().Simulation = True
# Only ask for luminosity information when not using simulated data
DaVinci().Lumi = not DaVinci().Simulation
DaVinci().EvtMax = 5000
DaVinci().CondDBtag = 'sim-20161124-2-vc-md100'
DaVinci().DDDBtag = 'dddb-20150724'
# Use the local input data
IOHelper().inputFiles([
'./00062514_00000001_7.AllStreams.dst'
], clear=True)
| nilq/baby-python | python |
from .base import BaseField
class IntegerField(BaseField):
pass
| nilq/baby-python | python |
def ingredients(count):
"""Prints ingredients for making `count` arepas."""
print('{:.2} cups arepa flour'.format(0.1*count))
print('{:.2} cups cheese'.format(0.1*count))
print('{:.2} cups water'.format(0.025*count))
| nilq/baby-python | python |
from __future__ import absolute_import, unicode_literals
import os
import celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_project.settings')
app = celery.Celery('test_project') # noqa: pylint=invalid-name
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self: celery.Task):
return 'Request: {0!r}'.format(self.request.task)
| nilq/baby-python | python |
'''
Created on Apr 15, 2016
@author: Drew
'''
class CogTV:
def __init__(self):
pass
def setScreen(self, scene):
pass
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.