text stringlengths 38 1.54M |
|---|
{
"id": "mgm4442949.3",
"metadata": {
"mgm4442949.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2119637,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3192,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/100.preprocess.removed.fna.stats"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1281682,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 532114,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/150.dereplication.removed.fna.gz"
},
"150.dereplication.removed.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/150.dereplication.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 481,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/205.screen.h_sapiens_asm.info"
},
"205.screen.h_sapiens_asm.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 175,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/205.screen.h_sapiens_asm.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 4301,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1281441,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 2130,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 605695,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 855669,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 656388,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 485288,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 317,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 216264,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 49,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5236,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 1338,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 1265,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 11196,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/450.rna.sims.info"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 514935,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 121,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 129736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 49,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/550.cluster.aa90.mapping.stats"
},
"640.loadAWE.info": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/640.loadAWE.info"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 292340,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 121692,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 268284,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 121347,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 538828,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 194252,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 6743,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 81157,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 109788,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 94652,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 1270427,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 641,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 151,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 59,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 592,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 1773,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 47,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 4545,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 4378,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 3147,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 638,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22713,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 84,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 9168,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442949.3/file/999.done.species.stats"
}
},
"id": "mgm4442949.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4442949.3"
}
},
"raw": {
"mgm4442949.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4442949.3"
}
}
} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Gavin
from odoo import models, fields, api, exceptions
class Checkout(models.Model):
_name = 'library.checkout'
_description = 'Checkout Request'
_inherit = ['mail.thread','mail.activity.mixin']
# track_visibility='onchange'表示带有该属性值的字段在被修改时记录到message中,但是track_visibility='onchange'不起作用。
member_id = fields.Many2one(comodel_name='library.member', required=True, track_visibility='onchange')
user_id = fields.Many2one(comodel_name='res.users', string='Librarion', default=lambda s: s.env.uid,
track_visibility='onchange')
request_date = fields.Date(default=lambda s: fields.Date.today(), track_visibility='always')
line_ids = fields.One2many(comodel_name='library.checkout.line',inverse_name='checkout_id', string='Borrowed Books')
num_other_checkouts = fields.Integer(compute='_compute_num_other_checkouts')
def _compute_num_other_checkouts(self):
for checkout in self:
domain = [('member_id','=',checkout.member_id.id),('state','in',['open']),('id','!=',checkout.id)]
checkout.num_other_checkouts = self.search_count(domain)
@api.model
def _default_stage(self):
return self.env['library.checkout.stage'].search([], limit=1)
@api.model
def _group_expand_stage_id(self, stages, domain, order):
return stages.search([], order=order)
'''
group_expand参数重载字段的分组方式,默认的分组操作行为是仅能看到使用过的阶段,而不带有借阅文档的阶段不会显示。
在我们的例子中,我们想要不同的效果:我们要看到所有的阶段,
哪怕它没有文档。_group_expand_stage_id() 帮助函数返回分组操作需使用组记录列表。
本例中返回所有已有阶段,不论其中是否包含图书借阅记录。
Odoo 10中的修改: group_expand字段在Odoo 10中引入,但在官方文档中没有介绍.
'''
stage_id = fields.Many2one(comodel_name='library.checkout.stage', default=_default_stage,
group_expand = '_group_expand_stage_id')
state = fields.Selection(related='stage_id.state')
checkout_date = fields.Date(readonly=True)
closed_date = fields.Date(readonly=True)
member_image = fields.Binary(related='member_id.partner_id.image')
num_books = fields.Integer(compute='_compute_num_books', store=True)
# 让用户组织他们的工作项,标记什么应优先处理
priority = fields.Selection(
[('0', 'Low'),
('1', 'Normal'),
('2', 'High')],
'Priority',
default='1')
# 标记是否应移向下一阶段或因某种原因原地不动
kanban_state = fields.Selection(
[('normal', 'In Progress'),
('blocked', 'Blocked'),
('done', 'Ready for next stage')],
'Kanban State',
default='normal')
# 用于存储看板卡片显示的颜色,并可通过看板视图中的颜色拾取器菜单设置
color = fields.Integer(string='Color Index')
@api.depends('line_ids')
def _compute_num_books(self):
for checkout in self:
checkout.num_books = len(checkout.line_ids)
@api.onchange('member_id')
def onchange_member_id(self):
today = fields.Date.today()
if self.request_date and self.request_date != today:
self.request_date = today
return {
'warning':{
'title': 'Changed Request Date',
'message': 'Request date change to today.'
}
}
'''
change方法还可以返回字段上的域限制字段取值范围:
{‘user_id’: [(’email’, ‘!=’, False)]}
'''
# 自odoo12开始,create()现在也可批量创建数据,这通过把单个字典对象修改为字典对象列表来传参进行实现。
# 这由带有@api.model_create_multi装饰器的create() 方法来进行支持。
@api.model
def create(self, vals):
# Code before create: should use the `vals` dict
if 'stage_id' in vals:
Stage = self.env['library.checkout.stage']
new_state = Stage.browse(vals['stage_id']).state
if new_state == 'open':
vals['checkout_date'] = fields.Date.today()
new_record = super().create(vals)
# Code after create: can use the `new_record` created
if new_record.state == 'done':
raise exceptions.UserError('Not allowed to create a checkout in the done state.')
return new_record
@api.multi
def write(self, vals):
for checkout in self:
# Code before write: can use `self`, with the old values
if 'stage_id' in vals:
new_state = self.env['library.checkout.stage'].browse(vals['stage_id']).state
if new_state == 'open' and checkout.state != 'open':
vals['checkout_date'] = fields.Date.today()
if new_state == 'done' and checkout.state != 'done':
vals['closed_date'] = fields.Date.today()
result = super().write(vals)
# Code after write: can use `self`, with the updated values
return result
@api.multi
def button_done(self):
done_stage = self.env['library.checkout.stage'].search([('state','=','done')], limit=1)
for checkout in self:
checkout.stage_id = done_stage
return True
class CheckoutLine(models.Model):
_name = 'library.checkout.line'
_description = 'Borrow Request Line'
checkout_id = fields.Many2one(comodel_name='library.checkout')
book_id = fields.Many2one(comodel_name='library.book') |
import sys
from Crypto.Cipher import AES
import struct
import argparse
def decrypt_file(key, in_file, out_file=None):
"""
AES file decryption script. Adaptation of script written by Eli Bendersky.
params:
- key: key to used to decrypt file
- in_file: encrypted file to decrypt
- out_file: Optional, if not provided in_file will be used with a .zip suffix.
"""
if not out_file:
out_file = in_file + '.zip'
with open(in_file, 'rb') as infile:
orig_file_size = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_file, 'wb') as outfile:
outfile.write(decryptor.decrypt(infile.read()))
outfile.truncate(orig_file_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='AES File Decryptor')
parser.add_argument('-k', '--key', type=str, help='Key to decrypt file', required=True)
parser.add_argument('-i', '--in_file', type=str, help='Encrypted file', required=True)
parser.add_argument('-o', '--out_file', type=str, help='Output file')
args = parser.parse_args()
decrypt_file(args.key, args.in_file, args.out_file)
print('File decrypted successfully.')
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
counter = {}
offset = 0
for idx, n in enumerate(nums):
if counter.get(n,0)==2:
offset += 1
continue
counter[n] = counter.get(n,0) + 1
nums[idx-offset] = n
return len(nums)-offset
|
import sys
import os
import urllib.request, urllib.error
import csv
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUi
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
loadUi("mainwindow.ui", self)
self.main_window = self
self.current_version = 0 # Changes after the update check function is run
self.text_input = self.findChild(QLineEdit, "inputText")
self.convert_button = self.findChild(QPushButton, "convertButton")
self.output_box = self.findChild(QPlainTextEdit, "outputBox")
self.copy_button = self.findChild(QPushButton, "copyButton")
self.copy_label = self.findChild(QLabel, "copyLabel")
self.clear_button = self.findChild(QPushButton, "clearButton")
self.updateAvailableLabel = self.findChild(QLabel, "updateAvailableLabel")
self.v_nr_label = self.findChild(QLabel, "versionNrLabel")
self.update_button = self.findChild(QPushButton, "updateNowButton")
update_available = self.check_for_updates() # Return true or false
self.v_nr_label.setText("V. " + str(self.current_version))
if update_available:
self.updateAvailableLabel.setText("UPDATE AVAILABLE")
self.updateAvailableLabel.setStyleSheet("color: green;")
self.update_button.pressed.connect(self.update_button_pressed)
else:
self.updateAvailableLabel.setText("")
self.update_button.hide()
self.convert_button.pressed.connect(self.convert_text)
self.copy_button.pressed.connect(self.copy_button_pressed)
self.clear_button.pressed.connect(self.clear_button_pressed)
self.copy_label.setText("") # sets the label as blank
self.number_dict = {0: ":zero:", 1: ":one:", 2: ":two:", 3: ":three:", 4: ":four:", 5: ":five:", 6: ":six:",
7: ":seven:", 8: ":eight:", 9: ":nine:", 10: ":ten:"}
self.alpha_list = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z"]
def convert_text(self):
print("first")
self.output_box.setPlainText("")
data_string = ""
print("dr1")
for char in self.text_input.text():
print("dr2")
try:
int(char) + 0
for key, value in self.number_dict.items():
if int(char) == key:
data_string += value + " "
except ValueError:
if char.lower() in self.alpha_list:
data_string += ":regional_indicator_" + char.lower() + ": "
elif char == " ":
data_string += " "
else:
msgbox = QMessageBox()
msgbox.setWindowTitle("Error")
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText("\nConverting the letter '" + char + "' did not work properly.. please either remove"
" or change that letter.\n")
msgbox.setDefaultButton(QMessageBox.Ok)
msgbox.exec_()
self.output_box.setPlainText(data_string)
self.copy_label.setText("")
def copy_button_pressed(self):
if len(self.output_box.document().toPlainText()) < 1:
msgbox = QMessageBox()
msgbox.setWindowTitle("Warning")
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText("\nNo text in output box\n")
msgbox.setDefaultButton(QMessageBox.Ok)
msgbox.exec_()
else:
try:
command = 'echo ' + self.output_box.document().toPlainText() + ' | clip'
os.system(command)
self.copy_label.setText("Copied to clipboard")
self.copy_label.setStyleSheet("color: green;")
except:
self.copy_label.setText("Copy failed")
self.copy_label.setStyleSheet("color: red;")
def clear_button_pressed(self):
self.output_box.setPlainText("")
self.copy_label.setText("")
self.text_input.setText("")
def update_button_pressed(self):
print("b4")
os.system("start updater.exe")
print("after")
sys.exit()
def check_for_updates(self):
try:
new_version = 0
# Temp folder created with the newest version number
local_filename, headers = urllib.request.urlretrieve('http://helgisteinarr.com/discord_letters/version'
'/newest_version')
with open(local_filename, "r") as version_file:
data = csv.reader(version_file, delimiter=",")
for i in data:
new_version = i[0]
version_file.close()
with open("version", "r") as version_file:
self.current_version = version_file.read()
version_file.close()
print("current v:" + self.current_version)
print("new v:" + new_version)
if new_version > self.current_version:
return True
else:
return False
except urllib.error.URLError:
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
# Lists
a = [66.25, 333, 333, 1, 1234.5]
print a.count(333), a.count(66.25), a.count('x')
a.insert(2, -1)
a.append(333)
print a
a.remove(333)
print a
a.reverse()
print a
a.sort()
print a
a.pop()
# Functional Programming Tools
def f(x): return x % 3 == 0 or x % 5 == 0
print filter(f, range(2, 25))
def cube(x): return x*x*x
print map(cube,range(1,11))
print filter(cube,range(1,11)) |
import pygame
from pygame.locals import *
from stack import *
import time
pygame.init()
#COLORS
BLACK=[0,0,0]
WHITE=[255,255,255]
GREY=[128,128,128]
GREEN=[0,255,0]
RED=[255,0,0]
BLUE=[0,0,255]
SILVER=[192,192,192]
def reset():
#stack
global s,emptystr,highscore,show_score,launched,Start,win,speedvar,change_speed,changespeedinterval,bar,lostlife
global total_score,Level_Score,block_list,all_sprites_list,player_sprite,sprited_block_rect,iron_block_list,hs,level,run
s=Stack()
emptystr=str()
highscore=int()
show_score=bool()
launched=bool()
Start=bool()
win=bool()
speedvar=int() #speedchange for each changespeedinterval
change_speed=bool() # whether the speed change at the instant
changespeedinterval=int() # for no.of.Level_Score increase, the speed will increase
bar=0
lostlife=0
total_score=0 # total Level_Score
Level_Score=0 # Level_Score of each level
#Group
block_list=pygame.sprite.Group()
all_sprites_list=pygame.sprite.Group()
player_sprite=pygame.sprite.Group()
#List
sprited_block_rect=list()
iron_block_list=list()
hs=0
try:
with open("HighScore.txt",'r') as f:
hs=f.read()
except FileNotFoundError:
hs = 0
clock=pygame.time.Clock()
level=1
run=True
# self.life=3
class Block(pygame.sprite.Sprite):
def __init__(self,color,w,h):
super().__init__()
self.color=color
self.image=pygame.Surface([w,h])
self.image.fill(self.color)
self.rect=self.image.get_rect()
self.health= 200 if color == [128,128,128] else 100
def update(self):
self.rect.y+=10
class Bar(pygame.sprite.Sprite):
def __init__(self,color,w,h):
super().__init__()
self.length=w
self.height=h
self.color=color
self.image=pygame.Surface([self.length,self.height])
self.image.fill(self.color)
self.rect=self.image.get_rect()
self.rect.x= screen_width//2
self.rect.y= screen_height - (self.height-1) # (h is the bar's length)
class Ball(pygame.sprite.Sprite):
def __init__(self,color,radius,speed,speedvar):
global lostlife
super().__init__()
self.color=color
self.radius=radius
self.x_speed=self.y_speed=speed
self.speedvar=speedvar
self.life=3-lostlife
self.image=pygame.Surface([self.radius,self.radius],pygame.SRCALPHA) #SCRALPHA for TRANSOARENCY
self.image.fill(BLUE)
self.image.set_colorkey(WHITE)
self.rect=self.image.get_rect()
pygame.draw.ellipse(self.image,self.color,self.rect)
def update(self):
global Start
global changespeedinterval
global changespeed
global win
global launched
global bar
global lostlife
global fall_list
index=0
if self.rect.x+self.radius > screen_width or self.rect.x < 0 :
self.x_speed*=-1
#index=self.rect.collidelistall(all_block_rect)
hit=True if len(sprited_block_rect) >0 else False
# hit become True if the ball collides with any blocks
if hit :
self.rect.y+=10
self.y_speed*=-1
if self.rect.y< 0 or self.rect.colliderect(bar.rect) :
self.y_speed*=-1
if bar.rect.x<= self.rect.x <= bar.rect.x + bar.length//3 :
if self.x_speed > 0 :
self.x_speed*=-1
elif bar.rect.x+bar.length//2 <= self.rect.x <= bar.rect.x + bar.length :
if self.x_speed < 0 :
self.x_speed*=-1
if self.rect.y> screen_height and self.life >0.5 and launched :
self.life-=1
lostlife+=1
time.sleep(0.5)
self.rect.x=bar.rect.x+30
self.rect.y=bar.rect.y-30
launched=False
if self.rect.y+self.radius > screen_height and self.life <=1:
Start=False
win=False
for block in fall_list:
if block.rect.y<screen_height:
block.update()
if block.rect.y>screen_height:
fall_list.remove(block)
if block.rect.colliderect(bar.rect):
for block in fall_list:
block.rect.y+=screen_height
fall_list.clear()
if self.life > 0.5:
self.life-=0.5
lostlife+=0.5
self.rect.x=bar.rect.x+30
self.rect.y=bar.rect.y-30
launched=False
else:
Start=False
win=False
break
if len(block_list) <= 0 : #when all the blocks are demolished, finish the level
Start=False
win=True
if Level_Score == changespeedinterval : #for every changespeedinterval value increment the speed of the ball
changespeed=True
if Level_Score%10== 0 and Level_Score>=10 and changespeed: #Level_Score>=10 is not to increment the speed for values less than zero since mod 10 of every value below 11 is 0
self.x_speed*=self.speedvar
self.y_speed*=self.speedvar
changespeed=False
changespeedinterval+=10
# finally increment x and y speed
self.rect.x+=self.x_speed
self.rect.y-=self.y_speed
try:
with open("HighScore.txt",'r') as f:
hs=f.read()
except FileNotFoundError:
hs=0
def introscreen():
global hs
global Start
global emptystr
global show_score
global level
titlefont=pygame.font.SysFont("Comic sans MS",48)
font=pygame.font.SysFont(None,48)
title=titlefont.render("Araknoid",True,BLACK,GREEN)
title_rect=title.get_rect()
play_text=font.render("Play",True,BLACK,GREEN)
play_rect=play_text.get_rect()
quit_text=font.render("Quit",True,BLACK,GREEN)
quit_rect=quit_text.get_rect()
high_score=font.render("High Score",True,BLACK,GREEN)
high_score_rect=high_score.get_rect()
show_hscore=font.render(emptystr,True,BLACK)
show_hscore_rect=show_hscore.get_rect()
title_rect.center=screen.get_rect().center
title_rect.centery=screen.get_rect().centery - 130
play_rect.center=screen.get_rect().center
quit_rect.centerx=screen.get_rect().centerx
quit_rect.centery=screen.get_rect().centery +60
high_score_rect.centerx=screen.get_rect().centerx
high_score_rect.centery=screen.get_rect().centery +120
show_hscore_rect.centerx=screen.get_rect().centerx
show_hscore_rect.centery=screen.get_rect().centery +200
screen.blit(title,title_rect)
screen.blit(play_text,play_rect)
screen.blit(quit_text,quit_rect)
screen.blit(high_score,high_score_rect)
screen.blit(show_hscore,show_hscore_rect)
for event in pygame.event.get():
mpos=pygame.mouse.get_pos()
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type==pygame.MOUSEBUTTONDOWN and event.button == 1:
if play_rect.collidepoint(mpos) :
Start=True
break
elif quit_rect.collidepoint(mpos):
pygame.quit()
quit()
# setting highscore button as ON AND OFF switch
elif high_score_rect.collidepoint(mpos) and not show_score :
emptystr=str(hs)
show_score=True
elif high_score_rect.collidepoint(mpos) and show_score :
emptystr=str()
show_score=False
def Start_Game(iron):
# iron arguments says number of iron block rows
pygame.mouse.set_visible(False)
posy=0
for i in range(5):
posx=0
color= GREY if i<=iron-1 else RED
for j in range(10):
block=Block(color,47,25)
block.rect.x=posx
block.rect.y=posy
#screen.blit(block.image,block.rect)
block_list.add(block)
all_sprites_list.add(block)
if i <=iron-1:
iron_block_list.append(block)
#all_block_rect.append(block.rect)
posx+=50
posy+=28
all_sprites_list.add(bar)
def GameOver(message,Escape):
global highscore
global total_score
if Escape:
score=total_score
else:
score=highscore
basicfont=pygame.font.SysFont('Comic Sans MS',46)
scorefont=pygame.font.SysFont(None,50)
text=basicfont.render(message,True,WHITE,BLUE) # render(text,anti-aliasing,color,background)
textrect=text.get_rect()
textrect.centerx=screen.get_rect().centerx
textrect.centery=screen.get_rect().centery-50
show_score=scorefont.render(str(score),True,RED,GREEN)
score_rect=show_score.get_rect()
score_rect.centerx=screen.get_rect().centerx
score_rect.centery=screen.get_rect().centery
intro=scorefont.render("Press Enter to Play Again",True,BLACK,BLUE)
introrect=intro.get_rect()
introrect.centerx=screen.get_rect().centerx
introrect.centery=screen.get_rect().centery+60
screen.blit(text,textrect)
screen.blit(show_score,score_rect)
screen.blit(intro,introrect)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN :
return True
def showscore(ball):
font=pygame.font.SysFont(None,40)
levelfont=font.render("LEVEL "+str(level),True,BLACK,BLUE)
lifefont=font.render("LIFE:"+str(ball.life),True,BLACK,BLUE)
scorefont=font.render("SCORE:"+str(total_score),True,BLACK,BLUE)
level_rect=levelfont.get_rect()
life_rect=lifefont.get_rect()
score_rect=scorefont.get_rect()
level_rect.centerx=screen.get_rect().centerx
level_rect.centery=screen.get_rect().centery
score_rect.centerx=screen.get_rect().centerx+70
score_rect.centery=screen.get_rect().centery+40
life_rect.centerx=screen.get_rect().centerx-60
life_rect.centery=screen.get_rect().centery+40
screen.blit(levelfont,level_rect)
screen.blit(scorefont,score_rect)
screen.blit(lifefont,life_rect)
pygame.init()
screen_width=500
screen_height=500
screen=pygame.display.set_mode([screen_width,screen_height])
pygame.display.set_caption("ARAKNOID")
clock=pygame.time.Clock()
level=1
run=True
fall_list=[]
#is_falling=bool()
#fall_img=pygame.Surface([30,30])
#fall_img.fill(RED)
#fall_rect=fall_img.get_rect()
def NextStage(speedVar,bar_len,ball_rad,ball_speed):
global emptystr
global hs # high score to be stored in file
global highscore
global show_score
global Start
global win
global change_speed
global changespeedinterval
global speedvar
global bar
global Level_Score
global total_score
global level
global run
global launched
global fall_list
# global fall_rect
speedvar=speedVar
#bar
bar=Bar(SILVER,bar_len,20)
emptystr=str()
show_score=False
Start=False if level==1 else True
win=False
#execute at the beginning level only
while not Start and level==1:
for event in pygame.event.get():
if event.type == pygame.QUIT :
pygame.quit()
exit()
break
screen.fill(BLUE)
introscreen()
pygame.display.flip()
block_hit_list=[]
ball=Ball(GREEN,ball_rad,ball_speed,speedvar)
ball.rect.x=screen_width//2 + 20
ball.rect.y=screen_height-45
all_sprites_list.add(ball)
player_sprite.add(ball)
#creating blocks
Start_Game(level)
launched=False
while Start:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type==pygame.KEYDOWN and event.key==pygame.K_SPACE and not launched :
player_sprite.update()
launched=True
#for quitting in middle of the running game
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run=False
changeHighscore(total_score,hs)
while True:
screen.fill(BLUE)
pygame.mouse.set_visible(True)
if GameOver("You Escaped , Coward !!",True):
return
pygame.display.update()
keys=pygame.key.get_pressed()
if keys[pygame.K_LEFT] and bar.rect.x>0 :
if not launched :
ball.rect.x-=6 # to move the ball with respect to bar when it is not launched yet
bar.rect.x-=7
if keys[pygame.K_RIGHT] and bar.rect.x<screen_width-bar.length:
if not launched :
ball.rect.x+=6
bar.rect.x+=7
screen.fill(BLUE)
remove=bool()
iron=[block.rect for block in iron_block_list]
if ball.rect.collidelist(iron)+1 : # the rect1.collidelist(rect2list) will return the index of the total rectangles of rect2list that collide with rect1
for block in iron_block_list:
if ball.rect.colliderect(block.rect):
remove=False
block.image.fill(RED)
iron_block_list.remove(block)
break
else:
remove=True
block_hit_list=pygame.sprite.spritecollide(ball,block_list,remove)
for block in block_hit_list:
if block.color == RED and len(fall_list)<3:
block.rect.width-=35
block.rect.height-=5
fall_list.append(block)
sprited_block_rect.append(block.rect)
total_score+=1
Level_Score+=1
if launched:
player_sprite.update()
sprited_block_rect.clear()
all_sprites_list.draw(screen)
player_sprite.draw(screen)
for block in fall_list:
pygame.draw.rect(screen,block.color,block.rect)
showscore(ball)
clock.tick(60)
pygame.display.flip()
while not Start :
ball.image.fill(BLUE)
bar.image.fill(BLUE)
player_sprite.remove(ball) # del ball
pygame.mouse.set_visible(True)
screen.fill(BLUE)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
highscore=total_score
if win:
if gotonextlevel():
break
else:
run=False
changeHighscore(highscore,hs)
if GameOver('GAME OVER',False):
return
pygame.display.flip()
Level_Score=0
#NextStage(ch_s,ch_in,bar_len,ball_rad,ball_speed)
def gotonextlevel():
global level
if level != 5:
font=pygame.font.SysFont(None,28)
nextlev=font.render("Press ENTER to continue",True,BLACK,GREEN)
nextlev_rect=nextlev.get_rect()
nextlev_rect.center=screen.get_rect().center
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN :
level+=1
return True
screen.blit(nextlev,nextlev_rect)
pygame.display.update()
else:
font=pygame.font.SysFont(None,28)
nextlev=font.render("You Won\n Score :"+str(total_score),True,BLACK,GREEN)
nextlev_rect=nextlev.get_rect()
nextlev_rect.center=screen.get_rect().center
screen.blit(nextlev,nextlev_rect)
return
def changeHighscore(highscore,hs):
if highscore>int(hs):
with open("HighScore.txt",'w') as f:
f.write(str(highscore))
reset()
def Next():
global run,level,lostlife,s
while run and not s.isEmpty() :
arg=s.pop()
if level>1:
lostlife-=1
NextStage(arg[0],arg[1],arg[2],arg[3])
while True:
if run :
Next()
else:
reset()
|
# A partir de la lista de "numeros" que contiene numeros del 1 al 10, obtener mediante filter
# una lista llamda "pares" con los numeros pares de la lista "numeros"
numeros = list(range(10+1))
# def pares():
# pares = list()
# for numero in numeros:
# if numero % 2 == 0:
# pares.append(numero)
# print(pares)
# pares()
def par(numero):
if numero % 2 == 0:
return True
else:
return False
resultado = filter(par, numeros)
pares = list(resultado)
print(pares) |
import gzip
import os
import urllib.request
import numpy as np
import pickle
class Mnist:
def __init__(self, dataset_dir: str) -> None:
self._url_base: str = 'http://yann.lecun.com/exdb/mnist/'
self._mnist_files: str = {
'train_img':'train-images-idx3-ubyte.gz',
'train_label':'train-labels-idx1-ubyte.gz',
'test_img':'t10k-images-idx3-ubyte.gz',
'test_label':'t10k-labels-idx1-ubyte.gz'
}
self._img_dim = (1, 28, 28)
self._img_size = self._img_dim[1] * self._img_dim[2]
self.dataset_dir: str = dataset_dir
os.makedirs(self.dataset_dir, exist_ok=True)
self._mnist_pkl = f"{dataset_dir}/mnist.pkl"
def _file_path(self, file_name: str) -> str:
return f"{self.dataset_dir}/{file_name}"
def download(self) -> None:
for file_name in self._mnist_files.values():
self._download_file(file_name)
def _download_file(self, file_name: str) -> None:
file_path = self._file_path(file_name)
if os.path.exists(file_path):
print(f"{file_name} already exists.")
return
print(f"Downloading {file_name}")
urllib.request.urlretrieve(f'{self._url_base}/{file_name}', file_path)
print("Done!")
def _load_label(self, file_name: str) -> np.ndarray:
file_path = self._file_path(file_name)
with gzip.open(file_path, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return labels
def _load_img(self, file_name: str) -> np.ndarray:
file_path = self._file_path(file_name)
with gzip.open(file_path, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, self._img_size)
return data
def _convert_ndarray(self) -> dict:
dataset = {}
dataset["train_img"] = self._load_img(self._mnist_files['train_img'])
dataset["train_label"] = self._load_label(self._mnist_files['train_label'])
dataset["test_img"] = self._load_img(self._mnist_files['test_img'])
dataset["test_label"] = self._load_label(self._mnist_files['test_label'])
return dataset
def init_dataset(self):
self.download()
dataset: dict = self._convert_ndarray()
with open(self._mnist_pkl, "wb") as f:
pickle.dump(dataset, f, -1)
def load(self):
if not os.path.exists(self._mnist_pkl):
self.init_dataset()
with open(self._mnist_pkl, 'rb') as f:
dataset = pickle.load(f)
return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])
|
import PySimpleGUI as sg
layout = [[sg.Text('What is your name?')],
[sg.InputText()],
[sg.Button('Ok')]]
window = sg.Window('Title of Window', layout)
event, values = window.read()
window.close()
sg.popup('Hello {}'.format(values[0]))
|
import os
import docx #python-docx needs to be installed via pip (give details?)
"""
Solving people with more than two names still required
I'm sure we can tidy this up too
"""
#establish working directory
directory = './rename/files_to_rename/'
files = os.listdir(directory)
for f in files:
#split filename from extension, save as separate variables
#needs to take the string from reading in directory
file_name, f_ext = os.path.splitext(f)
#split filename at spaces
fname = file_name.split()
#the first and last names are by default split. now to isolate the id number
#unfortunately, this will not work, as there are triple-barrel names
#eg. 'Brienne of Tarth'. I need to count indexes until I find the ID, then
#use the last of those as the lastname, and concatenate however many preceeding
#as first name. eg. 'Tarth, Brienne of'
#**EDIT** actually need to count indexes until if find the first '(', since
#there are rare cases where the id is oddly preceeded by the students name:
#eg. 'Sansa Stark(Sansa Stark - id(878756785)'
#so need to loop over the list, the element directly preceeding '(' is the
#last name, while the others in order are the first names, then keep looping
#until an elemnt with 'id' is found to assign to the id variable.
id = fname[2]
id = id[id.find("id"):id.find(")")]
## print fname[2]
## print id
## print f_ext
#if docx then process
if f_ext == '.docx':
def getText(filename):
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
return '\n'.join(fullText)
textComplete = getText(directory + f)
textSplit = textComplete.split()
## if fname[0] == 'Jeor':
## print textSplit
#check for keywords in first 200 words (originally 400, but that was too many)
#append the first instance of keywords in range to list(topic), or None if none found
#remove(filter) None types from list
#if the remaining list contains exactly one element, return that element(topic[0])
#otherwise return "UNSURE" to prompt manual evaluation of topic
def topicCheck(textSplit):
topic = []
topic.append(next(("Spell" for i in xrange(200) if (textSplit[i] == "curse" or textSplit[i] == "spell" or textSplit[i] == "invoke")), None))
topic.append(next(("Language" for i in xrange(200) if (textSplit[i] == "language")), None))
topic = filter(None, topic)
#print topic, len(topic)
if len(topic) == 1:
return topic[0]
else:
return "UNSURE"
topic = topicCheck(textSplit)
#print last, first, id, topic, extension
#not sure why it ended up a tuple, but .join sorts it back to string
#look into this
newname = "".join((fname[1]+', ', fname[0] + ' (', id + ') - ' + topic + f_ext))
os.rename(directory + f, directory + newname)
#if unsupported filetype
else:
newname = "".join((fname[1]+', ', fname[0] + ' (', id + ') - ' + 'UNSUPPORTED FILETYPE' + f_ext))
os.rename(directory + f, directory + newname)
## print type(newname)
print newname
|
class Solution:
def longestDiverseString(self, a: int, b: int, c: int) -> str:
ret = ""
array = [[a,"a"], [b,"b"], [c,"c"]]
array.sort(reverse= True)
while 1:
top = array[0][0]
mid = array[1][0]
bot = array[2][0]
if top > mid+bot and top >1:
ret += array[0][1] * 2
array[0][0] -=2
elif top:
ret += array[0][1]
array[0][0] -= 1
top = array[0][0]
if mid + bot == 0:
break
if mid:
if mid + bot >top and mid >1:
ret += array[1][1] * 2
array[1][0] -=2
elif mid >0:
ret += array[1][1] * 1
array[1][0] -= 1
elif bot:
if bot > top and bot>1:
ret += array[2][1] * 2
array[2][0] -= 2
elif bot >0:
ret += array[2][1] * 1
array[2][0] -= 1
return ret
sol = Solution()
a = 0
b = 1
c = 7
print(sol.longestDiverseString(a,b,c)) |
# ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# formats: ipynb,py:percent
# notebook_metadata_filter: language_info
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.5.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.6
# ---
# %% {"tags": ["parameters"]}
# Our default parameters
# This cell has a "parameters" tag, means that it defines the parameters for use in the notebook
run_date = "2018-04-28"
source_id = 'sensor1'
# %%
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import scrapbook as sb
from datetime import datetime, timedelta
import time
import os
plt.ioff()
# %%
run_datetime = datetime.strptime(run_date, '%Y-%m-%d')
ts = pd.date_range("00:00", "23:59", freq="5min")
td = ts - timedelta((datetime.now() - run_datetime).days)
data = pd.DataFrame(np.random.randn(len(td)), columns=['mydata'])
data = data.rolling(70, min_periods=1, center=True).mean() # Smooth it so it looks purdy
data['date'] = td
data['hour'] = data['date'].apply(lambda x: datetime.strftime(x, "%H"))
# %%
print(data['date'].describe())
data.describe()
# %%
data = data.sort_values('date').set_index('date', drop=True)
data.head(5)
# %%
fig, ax = plt.subplots()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.gcf().autofmt_xdate()
ax.plot(data.index, data['mydata'], c='k', alpha=.5)
ax.set(title="Activity for the day of {}".format(run_date))
sb.glue('activity_day_fig', fig, display=True)
# %%
month_partition = run_datetime.strftime("%Y-%m")
output_file = "../data/output/step1/" + month_partition + "/" + run_date + '-' + source_id + '.csv'
print(output_file)
# %%
os.makedirs(os.path.dirname(output_file), exist_ok=True)
data.to_csv(output_file)
|
from instagram_private_api import Client, ClientCompatPatch
import json
import sys
user_name = 'socialweb554'
password = 'socialweb554.'
#1518284433 - rober downey jr
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python3 get_feed.py [query] [file_result]')
sys.exit(0)
query = sys.argv[1]
api = Client(user_name, password)
user_feed = api.user_feed(query)
feed_data = []
for photo in user_feed["items"]:
media_id = photo["pk"]
media = api.media_n_comments(media_id, n=5)
media_data = {
"media_id": media_id,
"username": None,
"full_name": None,
"caption": None,
"comment_count": None,
"like_count": None,
"comments": []
}
for comment in media:
# print(comment['text'])
media_data["comments"].append(comment['text'])
media_data["caption"] = photo["caption"]["text"]
media_data["comment_count"] = photo["comment_count"]
media_data["like_count"] = photo["like_count"]
media_data["username"] = photo["user"]["username"]
media_data["full_name"] = photo["user"]["full_name"]
feed_data.append(media_data)
print(json.dumps(feed_data))
sys.stdout.flush() |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from datetime import datetime, timedelta
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams.http.auth import NoAuth
from dateutil.parser import isoparse
from pytest import raises
from source_paypal_transaction.source import Balances, PaypalTransactionStream, Transactions
def test_minimum_allowed_start_date():
start_date = now() - timedelta(days=10 * 365)
stream = Transactions(authenticator=NoAuth(), start_date=start_date)
assert stream.start_date != start_date
def test_transactions_transform_function():
start_date = now() - timedelta(days=10 * 365)
stream = Transactions(authenticator=NoAuth(), start_date=start_date)
transformer = stream.transformer
input_data = {"transaction_amount": "123.45", "transaction_id": "111", "transaction_status": "done"}
schema = stream.get_json_schema()
schema["properties"] = {
"transaction_amount": {"type": "number"},
"transaction_id": {"type": "integer"},
"transaction_status": {"type": "string"},
}
transformer.transform(input_data, schema)
expected_data = {"transaction_amount": 123.45, "transaction_id": 111, "transaction_status": "done"}
assert input_data == expected_data
def test_get_field():
record = {"a": {"b": {"c": "d"}}}
# Test expected result - field_path is a list
assert "d" == PaypalTransactionStream.get_field(record, field_path=["a", "b", "c"])
# Test expected result - field_path is a string
assert {"b": {"c": "d"}} == PaypalTransactionStream.get_field(record, field_path="a")
# Test failures - not existing field_path
assert None is PaypalTransactionStream.get_field(record, field_path=["a", "b", "x"])
assert None is PaypalTransactionStream.get_field(record, field_path=["a", "x", "x"])
assert None is PaypalTransactionStream.get_field(record, field_path=["x", "x", "x"])
# Test failures - incorrect record structure
record = {"a": [{"b": {"c": "d"}}]}
assert None is PaypalTransactionStream.get_field(record, field_path=["a", "b", "c"])
record = {"a": {"b": "c"}}
assert None is PaypalTransactionStream.get_field(record, field_path=["a", "b", "c"])
record = {}
assert None is PaypalTransactionStream.get_field(record, field_path=["a", "b", "c"])
def test_update_field():
# Test success 1
record = {"a": {"b": {"c": "d"}}}
PaypalTransactionStream.update_field(record, field_path=["a", "b", "c"], update=lambda x: x.upper())
assert record == {"a": {"b": {"c": "D"}}}
# Test success 2
record = {"a": {"b": {"c": "d"}}}
PaypalTransactionStream.update_field(record, field_path="a", update=lambda x: "updated")
assert record == {"a": "updated"}
# Test failure - incorrect field_path
record = {"a": {"b": {"c": "d"}}}
PaypalTransactionStream.update_field(record, field_path=["a", "b", "x"], update=lambda x: x.upper())
assert record == {"a": {"b": {"c": "d"}}}
# Test failure - incorrect field_path
record = {"a": {"b": {"c": "d"}}}
PaypalTransactionStream.update_field(record, field_path=["a", "x", "x"], update=lambda x: x.upper())
assert record == {"a": {"b": {"c": "d"}}}
def now():
return datetime.now().replace(microsecond=0).astimezone()
def test_transactions_stream_slices():
start_date_max = {"hours": 0}
# if start_date > now - **start_date_max then no slices
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max) - timedelta(minutes=2),
)
transactions.get_last_refreshed_datetime = lambda x: None
stream_slices = transactions.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
# start_date <= now - **start_date_max
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max),
)
transactions.get_last_refreshed_datetime = lambda x: None
stream_slices = transactions.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max) + timedelta(minutes=2),
)
transactions.get_last_refreshed_datetime = lambda x: None
stream_slices = transactions.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max) - timedelta(hours=2),
)
transactions.get_last_refreshed_datetime = lambda x: None
stream_slices = transactions.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max) - timedelta(days=1),
)
transactions.get_last_refreshed_datetime = lambda x: None
transactions.stream_slice_period = {"days": 1}
stream_slices = transactions.stream_slices(sync_mode="any")
assert 2 == len(stream_slices)
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max) - timedelta(days=1, hours=2),
)
transactions.get_last_refreshed_datetime = lambda x: None
transactions.stream_slice_period = {"days": 1}
stream_slices = transactions.stream_slices(sync_mode="any")
assert 2 == len(stream_slices)
transactions = Transactions(
authenticator=NoAuth(),
start_date=now() - timedelta(**start_date_max) - timedelta(days=30, minutes=1),
)
transactions.get_last_refreshed_datetime = lambda x: None
transactions.stream_slice_period = {"days": 1}
stream_slices = transactions.stream_slices(sync_mode="any")
assert 31 == len(stream_slices)
# tests with specified end_date
transactions = Transactions(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-04T12:00:00+00:00"),
)
transactions.get_last_refreshed_datetime = lambda x: None
transactions.stream_slice_period = {"days": 1}
stream_slices = transactions.stream_slices(sync_mode="any")
assert [
{"start_date": "2021-06-01T10:00:00+00:00", "end_date": "2021-06-02T10:00:00+00:00"},
{"start_date": "2021-06-02T10:00:00+00:00", "end_date": "2021-06-03T10:00:00+00:00"},
{"start_date": "2021-06-03T10:00:00+00:00", "end_date": "2021-06-04T10:00:00+00:00"},
{"start_date": "2021-06-04T10:00:00+00:00", "end_date": "2021-06-04T12:00:00+00:00"},
] == stream_slices
# tests with specified end_date and stream_state
transactions = Transactions(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-04T12:00:00+00:00"),
)
transactions.get_last_refreshed_datetime = lambda x: None
transactions.stream_slice_period = {"days": 1}
stream_slices = transactions.stream_slices(sync_mode="any", stream_state={"date": "2021-06-02T10:00:00+00:00"})
assert [
{"start_date": "2021-06-02T10:00:00+00:00", "end_date": "2021-06-03T10:00:00+00:00"},
{"start_date": "2021-06-03T10:00:00+00:00", "end_date": "2021-06-04T10:00:00+00:00"},
{"start_date": "2021-06-04T10:00:00+00:00", "end_date": "2021-06-04T12:00:00+00:00"},
] == stream_slices
transactions = Transactions(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-04T12:00:00+00:00"),
)
transactions.get_last_refreshed_datetime = lambda x: None
stream_slices = transactions.stream_slices(sync_mode="any", stream_state={"date": "2021-06-04T10:00:00+00:00"})
assert [{"start_date": "2021-06-04T10:00:00+00:00", "end_date": "2021-06-04T12:00:00+00:00"}] == stream_slices
def test_balances_stream_slices():
"""Test slices for Balance stream.
Note that <end_date> is not used by this stream.
"""
now = datetime.now().replace(microsecond=0).astimezone()
# Test without end_date (it equal <now> by default)
balance = Balances(authenticator=NoAuth(), start_date=now)
balance.get_last_refreshed_datetime = lambda x: None
stream_slices = balance.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
balance = Balances(authenticator=NoAuth(), start_date=now - timedelta(minutes=1))
balance.get_last_refreshed_datetime = lambda x: None
stream_slices = balance.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
balance = Balances(
authenticator=NoAuth(),
start_date=now - timedelta(hours=23),
)
balance.get_last_refreshed_datetime = lambda x: None
stream_slices = balance.stream_slices(sync_mode="any")
assert 1 == len(stream_slices)
balance = Balances(
authenticator=NoAuth(),
start_date=now - timedelta(days=1),
)
balance.get_last_refreshed_datetime = lambda x: None
balance.stream_slice_period = {"days": 1}
stream_slices = balance.stream_slices(sync_mode="any")
assert 2 == len(stream_slices)
balance = Balances(
authenticator=NoAuth(),
start_date=now - timedelta(days=1, minutes=1),
)
balance.get_last_refreshed_datetime = lambda x: None
balance.stream_slice_period = {"days": 1}
stream_slices = balance.stream_slices(sync_mode="any")
assert 2 == len(stream_slices)
# test with custom end_date
balance = Balances(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-03T12:00:00+00:00"),
)
balance.get_last_refreshed_datetime = lambda x: None
balance.stream_slice_period = {"days": 1}
stream_slices = balance.stream_slices(sync_mode="any")
assert [
{"start_date": "2021-06-01T10:00:00+00:00", "end_date": "2021-06-02T10:00:00+00:00"},
{"start_date": "2021-06-02T10:00:00+00:00", "end_date": "2021-06-03T10:00:00+00:00"},
{"start_date": "2021-06-03T10:00:00+00:00", "end_date": "2021-06-03T12:00:00+00:00"},
] == stream_slices
# Test with stream state
balance = Balances(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-03T12:00:00+00:00"),
)
balance.get_last_refreshed_datetime = lambda x: None
balance.stream_slice_period = {"days": 1}
stream_slices = balance.stream_slices(sync_mode="any", stream_state={"date": "2021-06-02T10:00:00+00:00"})
assert [
{"start_date": "2021-06-02T10:00:00+00:00", "end_date": "2021-06-03T10:00:00+00:00"},
{"start_date": "2021-06-03T10:00:00+00:00", "end_date": "2021-06-03T12:00:00+00:00"},
] == stream_slices
balance = Balances(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-03T12:00:00+00:00"),
)
balance.get_last_refreshed_datetime = lambda x: None
balance.stream_slice_period = {"days": 1}
stream_slices = balance.stream_slices(sync_mode="any", stream_state={"date": "2021-06-03T11:00:00+00:00"})
assert [{"start_date": "2021-06-03T11:00:00+00:00", "end_date": "2021-06-03T12:00:00+00:00"}] == stream_slices
balance = Balances(
authenticator=NoAuth(),
start_date=isoparse("2021-06-01T10:00:00+00:00"),
end_date=isoparse("2021-06-03T12:00:00+00:00"),
)
balance.get_last_refreshed_datetime = lambda x: None
balance.stream_slice_period = {"days": 1}
stream_slices = balance.stream_slices(sync_mode="any", stream_state={"date": "2021-06-03T12:00:00+00:00"})
assert [{"start_date": "2021-06-03T12:00:00+00:00", "end_date": "2021-06-03T12:00:00+00:00"}] == stream_slices
def test_max_records_in_response_reached(transactions, requests_mock):
balance = Transactions(
authenticator=NoAuth(),
start_date=isoparse("2021-07-01T10:00:00+00:00"),
end_date=isoparse("2021-07-29T12:00:00+00:00"),
)
error_message = {
"name": "RESULTSET_TOO_LARGE",
"message": "Result set size is greater than the maximum limit. Change the filter " "criteria and try again.",
}
url = "https://api-m.paypal.com/v1/reporting/transactions"
requests_mock.register_uri(
"GET",
url + "?start_date=2021-07-01T12%3A00%3A00%2B00%3A00&end_date=2021-07-29T12%3A00%3A00%2B00%3A00",
json=error_message,
status_code=400,
)
requests_mock.register_uri(
"GET", url + "?start_date=2021-07-01T12%3A00%3A00%2B00%3A00&end_date=2021-07-15T12%3A00%3A00%2B00%3A00", json=transactions
)
requests_mock.register_uri(
"GET", url + "?start_date=2021-07-15T12%3A00%3A00%2B00%3A00&end_date=2021-07-29T12%3A00%3A00%2B00%3A00", json=transactions
)
month_date_slice = {"start_date": "2021-07-01T12:00:00+00:00", "end_date": "2021-07-29T12:00:00+00:00"}
assert len(list(balance.read_records(sync_mode="any", stream_slice=month_date_slice))) == 2
requests_mock.register_uri(
"GET",
url + "?start_date=2021-07-01T12%3A00%3A00%2B00%3A00&end_date=2021-07-01T12%3A00%3A00%2B00%3A00",
json=error_message,
status_code=400,
)
one_day_slice = {"start_date": "2021-07-01T12:00:00+00:00", "end_date": "2021-07-01T12:00:00+00:00"}
with raises(Exception):
assert next(balance.read_records(sync_mode="any", stream_slice=one_day_slice))
def test_unnest_field():
record = {"transaction_info": {"transaction_id": "123", "transaction_initiation_date": "2014-07-11T04:03:52+0000"}}
# check the cursor is not on the root level
assert Transactions.cursor_field not in record.keys()
PaypalTransactionStream.unnest_field(record, Transactions.nested_object, Transactions.cursor_field)
# check the cursor now on the root level
assert Transactions.cursor_field in record.keys()
def test_get_last_refreshed_datetime(requests_mock, prod_config, api_endpoint):
stream = Balances(authenticator=NoAuth(), **prod_config)
requests_mock.post(f"{api_endpoint}/v1/oauth2/token", json={"access_token": "test_access_token", "expires_in": 12345})
url = f"{api_endpoint}/v1/reporting/balances" + "?as_of_time=2021-07-01T00%3A00%3A00%2B00%3A00"
requests_mock.get(url, json={})
assert not stream.get_last_refreshed_datetime(SyncMode.full_refresh)
def test_get_updated_state(transactions):
start_date = "2021-06-01T10:00:00+00:00"
stream = Transactions(
authenticator=NoAuth(),
start_date=isoparse(start_date),
end_date=isoparse("2021-06-04T12:00:00+00:00"),
)
state = stream.get_updated_state(current_stream_state={}, latest_record={})
assert state == {"date": start_date}
record = transactions[stream.data_field][0][stream.nested_object]
expected_state = {"date": now().isoformat()}
state = stream.get_updated_state(current_stream_state=expected_state, latest_record=record)
assert state == expected_state
|
print "this is fun \n"
#print "I love coding on the shell"
print "wft, are you talking about"
x = 10
print "the value of x is ", x
|
import argparse
import enum
import os
import sys
import time
import Bio
import Bio.PDB
import Bio.PDB.Vector
import numpy as np
import simtk
import simtk.openmm
import simtk.openmm.app
import simtk.unit
basepath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, basepath)
import grid
def extract_atomic_features(pdb_filename):
"""Extract atomic features from pdb"""
# Parse structure with biopython
pdb_parser = Bio.PDB.PDBParser()
pdb_id = os.path.basename(pdb_filename).split(".")[0]
structure = pdb_parser.get_structure(pdb_id, pdb_filename)
first_model = structure.get_list()[0]
ppb = Bio.PDB.PPBuilder()
sequence = [] # Only used for an assertion / sanity check
sequence_onehot = []
resids_pdb = []
chain_ids = []
for i, chain in enumerate(first_model):
# Append chainid
chain_ids.append(chain.id)
# Sequence of residue names for this chain
sequence_chain = []
for res in chain.get_residues():
resids_pdb.append(res.id[1])
sequence_chain.append(res.resname.strip())
# Add to global container for this protein
sequence.append(sequence_chain)
# Convert residue names to amino acid indices
aa_indices = []
for aa in sequence_chain:
try:
aa_index = Bio.PDB.Polypeptide.three_to_index(aa)
except:
aa_index = 20
aa_indices.append(aa_index)
# Convert to one-hot encoding
aa_onehot_chain = np.zeros((len(aa_indices), 21))
aa_onehot_chain[np.arange(len(aa_indices)), aa_indices] = 1
sequence_onehot.append(aa_onehot_chain)
# Keep track of boundaries of individual chains
chain_boundary_indices = np.cumsum([0] + [len(entry) for entry in sequence_onehot])
# Collapse all chain segments into one. The individual chains
# will be recoverable through chain_boundary_indices
sequence_onehot = np.concatenate(sequence_onehot)
# Convert resids of pdb to arr
resids_pdb = np.array(resids_pdb)
# Extract positions using OpenMM.
pdb_simtk = simtk.openmm.app.PDBFile(pdb_filename)
positions = pdb_simtk.getPositions()
# Save features in a dictionary
features = {}
features["atom_names"] = [] # Atom names
features[
"res_indices"
] = [] # Global res indices (no reset across chains and starts from 0)
features["resids_pdb"] = [] # Resids in the actual odbs
features["x"] = []
features["y"] = []
features["z"] = []
# Iterate over chain,residue,atoms and extract features
for i, chain in enumerate(pdb_simtk.getTopology().chains()):
chain_start_index = chain_boundary_indices[i]
for j, residue in enumerate(chain.residues()):
for atom in residue.atoms():
# Extract atom features
index = atom.index
position = list(positions[index].value_in_unit(simtk.unit.angstrom))
features["atom_names"].append(atom.name)
features["res_indices"].append(residue.index)
features["x"].append(position[0])
features["y"].append(position[1])
features["z"].append(position[2])
# Sanity check
residue_index_local = residue.index - chain_start_index
assert residue.name == sequence[i][residue_index_local]
# Convert valid lists to numpy arrays
# (even convert atom_names since its simpler to mask with despite being str)
features["atom_names"] = np.array(features["atom_names"], dtype="a5")
features["res_indices"] = np.array(features["res_indices"], dtype=np.int32)
features["x"] = np.array(features["x"], dtype=np.float32)
features["y"] = np.array(features["y"], dtype=np.float32)
features["z"] = np.array(features["z"], dtype=np.float32)
return features, sequence_onehot, chain_ids, chain_boundary_indices, resids_pdb
def extract_coordinates(features, max_radius, include_center):
"""Extract environment coordinates within specifies crieteria"""
# Extract coordinates as normal numpy array
position_array = np.vstack([features["x"], features["y"], features["z"]]).T
# Retrieve residue indices as numpy int array
# This array has many repeats, since it follows the sequence of atoms,
# not residues. It counts globally across chains, i.e. no resets and
# starts from zero.
res_indices_glob = features["res_indices"]
res_indices_uniq = np.unique(
res_indices_glob
) # Has length == number of total residues
# Begin
selector_list = []
indices_list = []
xyz_ref_origo_list = []
for residue_index in res_indices_uniq:
# Extract origin mask
if (
np.logical_and(
res_indices_glob == residue_index, features["atom_names"] == b"N"
).any()
and np.logical_and(
res_indices_glob == residue_index, features["atom_names"] == b"CA"
).any()
and np.logical_and(
res_indices_glob == residue_index, features["atom_names"] == b"C"
).any()
):
N_mask = np.logical_and(
res_indices_glob == residue_index, features["atom_names"] == b"N"
)
CA_mask = np.logical_and(
res_indices_glob == residue_index, features["atom_names"] == b"CA"
)
C_mask = np.logical_and(
res_indices_glob == residue_index, features["atom_names"] == b"C"
)
else:
# Store None to maintain indices
indices_list.append(None)
selector_list.append(None)
continue
# Extract origin
pos_N = np.array(
[features["x"][N_mask], features["y"][N_mask], features["z"][N_mask]]
).squeeze()
pos_CA = np.array(
[features["x"][CA_mask], features["y"][CA_mask], features["z"][CA_mask]]
).squeeze()
pos_C = np.array(
[features["x"][C_mask], features["y"][C_mask], features["z"][C_mask]]
).squeeze()
# Define Cartesian coordinate system
coordinate_system = grid.CoordinateSystem["cartesian"]
z_direction = grid.ZDirection[grid.ZDirection.outward.name]
# Define local coordinate system
rot_matrix = grid.define_coordinate_system(pos_N, pos_CA, pos_C, z_direction)
# Calculate coordinates relative to origin
xyz = position_array - pos_CA
# Rotate to the local reference
xyz = np.dot(rot_matrix, xyz.T).T
# Calculate radius
r = np.sqrt(xyz[:, 0] ** 2 + xyz[:, 1] ** 2 + xyz[:, 2] ** 2)
if include_center:
selector = np.where(r < max_radius)[0]
else:
# ALSO exclude features from residue itself
selector = np.where(
np.logical_and(r < max_radius, res_indices_glob != residue_index)
)[0]
xyz_ref_origo_list.append(xyz[selector])
selector_list.append(selector)
# Find max number of atoms in an environment with radias = max_radius
max_selector = max(
[len(selector) for selector in selector_list if selector is not None]
)
selector_array = np.full((len(selector_list), max_selector), -1, dtype=np.int32)
for i, selector in enumerate(selector_list):
if selector is not None:
selector_array[i, : len(selector)] = selector.astype(np.int32)
# Output actual coordinates and atom types rather than embedding in a grid.
atom_type_list = ["C", "N", "O", "H", "S", "P"]
atom_types_numeric = np.array(
[atom_type_list.index(x.decode("utf-8")[0]) for x in features["atom_names"]]
) # Zero refers to the first letter of atom name
# Save selected coordinates in array with shape defined by the max number of atoms in any of environment
xyz_ref_origo_arr = np.full(
shape=[len(selector_list), max_selector, 3],
fill_value=[-99, -99, -99],
dtype=np.float32,
)
for i, xyz_selected in enumerate(xyz_ref_origo_list):
if xyz_selected is not None:
xyz_ref_origo_arr[i, : xyz_selected.shape[0], :] = xyz_selected
return xyz_ref_origo_arr, atom_types_numeric, selector_array
def extract_environments(
pdb_filename: str,
pdb_id: str,
max_radius: float = 9.0,
out_dir: str = "./",
include_center: bool = False,
):
"""
Extract residue environments from PDB file. Outputs .npz file.
Parameters
----------
pdb_filename: str
PDB filename to extract environments from
pdb_id: str
PDBID. Used as a prefix for the output file, and does not have to follow
the standard 4 character nomenclature
max_radiues: float
Max radius from center CA atom in Angstrom
include_center: bool
Whether to include the center residue. For the cavity model, this only
makes sense with set to False, since we are classifying the missing
center residue.
"""
# Extract atomic features and other relevant info
(
features,
sequence_onehot,
chain_ids,
chain_boundary_indices,
resids_pdb,
) = extract_atomic_features(pdb_filename)
# Extract relevant coordinates (already masked with selector and referenced),
# all atom types and the mask for each residue (selector_array)
xyz_ref_origo_arr, atom_types_numeric, selector_array = extract_coordinates(
features, max_radius, include_center
)
# Save as .npz
np.savez_compressed(
out_dir + f"/{pdb_id}_coordinate_features",
atom_types_numeric=atom_types_numeric,
positions=xyz_ref_origo_arr,
selector=selector_array,
aa_onehot=sequence_onehot,
chain_boundary_indices=chain_boundary_indices,
chain_ids=chain_ids,
residue_numbers=resids_pdb,
)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
if __name__ == "__main__":
t0 = time.time()
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--pdb_in", type=str)
parser.add_argument("--max_radius", type=float, default=9.0)
parser.add_argument("--include_center", type=str2bool, default=False)
parser.add_argument("--out_dir", type=str, default="./")
args_dict = vars(parser.parse_args())
# Settings
pdb_filename = args_dict["pdb_in"]
pdb_id = os.path.basename(pdb_filename).split(".")[0]
max_radius = args_dict["max_radius"]
out_dir = args_dict["out_dir"]
include_center = args_dict["include_center"]
# Extract
extract_environments(pdb_filename, pdb_id, max_radius, out_dir, include_center)
t1 = time.time()
sys.stdout.flush()
print(f"Time for parsing environments from {pdb_filename}: {t1-t0}")
|
'''
Sorting
Given a list of toy prices and an amount to spend,
determine the maximum number of gifts you can buy.
- Each toy can only be purchased once.
'''
prices1, k1 = [1,2,3,4], 7 # 3 items
prices2, k2 = [1, 12, 5, 111, 200, 1000, 10], 50 # 4 items
def maximumToys(prices, k):
# loop through prices
# sum of toys < k
# maximize by sorting?
count = 0
total = 0
prices = sorted(prices)
print(prices)
for i in prices:
if total + i < k:
total += i
count += 1
return count
print(maximumToys(prices1, k1)) |
# This test file provides unit tests for functions in prediction.py
from datetime import datetime
from prediction import diff_month, check_date, timeseries_predict, adjust_predict, predict_price
from collections import OrderedDict
import pandas as pd
# test diff_month()
def test_diff_month():
d1 = datetime.strptime('2017-12', '%Y-%m')
d2 = datetime.strptime('2016-06', '%Y-%m')
expected = 18
assert diff_month(d1,d2) == expected
# test check_date()
def test_check_date():
test_date = '2018-04'
expected = True
assert check_date(test_date) == expected
# test timeseries_predict()
def test_timeseries_predict():
df = {'zipcode': ['10025', '60201'], 'residual_median': [100, 200],
'residual_sd':[40000,10000], 'intercept':[1500000,500000],
'slope':[1300,1000],'c1':[12,6],'c2':[11,5],'c3':[10,4],
'c4':[9,3],'c5':[8,2],'c6':[7,1],'c7':[7,1],'c8':[8,2],
'c9':[9,3],'c10':[10,4],'c11':[11,5],'c12':[12,6]}
all_parameters = pd.DataFrame(OrderedDict(df))
zipcode = '10025'
test_date = '2018-12'
expected = ['10025',1546909,40000]
result = timeseries_predict(all_parameters,zipcode,test_date)
assert [x.values[0] for x in result] == expected
# test adjust_predict()
def test_adjust_predict():
df = {'zipcode': ['10025', '60201'], 'residual_median': [100, 200],
'residual_sd':[40000,10000], 'intercept':[1500000,500000],
'slope':[1300,1000],'c1':[12,6],'c2':[11,5],'c3':[10,4],
'c4':[9,3],'c5':[8,2],'c6':[7,1],'c7':[7,1],'c8':[8,2],
'c9':[9,3],'c10':[10,4],'c11':[11,5],'c12':[12,6]}
all_parameters = pd.DataFrame(OrderedDict(df))
base_result = timeseries_predict(all_parameters,'10025','2018-12')
zipcode = 10025
hometype = 'bed2'
df_means = {'zipcode': ['10025'], 'bed1':[-0.2],'bed2':[-0.1],'bed3':[0],
'bed4':[0.5],'bed5':[1],'single':[1.5],'condo':[0]}
all_means = pd.DataFrame(OrderedDict(df_means))
all_means = pd.read_csv("static/data/prediction/adjust_means.csv")
result = adjust_predict(all_means, base_result, hometype, zipcode)
expected = [1916860.544750242, 1838460.544750242, 1995260.544750242]
assert expected == result
# test predict_price()
def test_predict_price():
zipcode = '10025'
test_date = '2018-12'
hometype = 'bed2'
df = {'zipcode': ['10025', '60201'], 'residual_median': [100, 200],
'residual_sd':[40000,10000], 'intercept':[1500000,500000],
'slope':[1300,1000],'c1':[12,6],'c2':[11,5],'c3':[10,4],
'c4':[9,3],'c5':[8,2],'c6':[7,1],'c7':[7,1],'c8':[8,2],
'c9':[9,3],'c10':[10,4],'c11':[11,5],'c12':[12,6]}
all_parameters = pd.DataFrame(OrderedDict(df))
df_means = {'zipcode': ['10025'], 'bed1':[-0.2],'bed2':[-0.1],'bed3':[0],
'bed4':[0.5],'bed5':[1],'single':[1.5],'condo':[0]}
all_means = pd.DataFrame(OrderedDict(df_means))
result = predict_price(zipcode, test_date, hometype, all_parameters, all_means)
expected = [1392218.1000000001, 1313818.1000000001, 1470618.1000000001]
assert expected == result
|
import os
import datetime
import numpy as np
import xarray as xr
import requests
import logging
log=logging.getLogger('noaa_coops')
from ... import utils
from .common import periods
all_products=dict(
water_level="water_level",
air_temperature="air_temperature",
water_temperature="water_temperature",
wind="wind",
air_pressure="air_pressure",
air_gap="air_gap",
conductivity="conductivity",
visibility="visibility",
humidity="humidity",
salinity="salinity",
hourly_height="hourly_height",
high_low="high_low",
daily_mean="daily_mean",
monthly_mean="monthly_mean",
one_minute_water_level="one_minute_water_level",
predictions="predictions",
datums="datums",
currents="currents")
all_datums=dict(
CRD="Columbia River Datum",
MHHW="Mean Higher High Water",
MHW="Mean High Water",
MTL="Mean Tide Level",
MSL="Mean Sea Level",
MLW="Mean Low Water",
MLLW="Mean Lower Low Water",
NAVD="North American Vertical Datum",
STND="Station Datum")
def coops_json_to_ds(json,params):
""" Mold the JSON response from COOPS into a dataset
"""
ds=xr.Dataset()
if 'metadata' in json:
meta=json['metadata']
ds['station']=( ('station',), [meta['id']])
for k in ['name','lat','lon']:
val=meta[k]
if k in ['lat','lon']:
val=float(val)
ds[k]= ( ('station',), [val])
else:
# predictions do not come back with metadata
ds['station']= ('station',),[params['station']]
def float_or_nan(s):
try:
return float(s)
except ValueError:
return np.nan
times=[]
values=[]
qualities=[]
if 'data' in json:
data=json['data']
elif 'predictions' in json:
# Why do they present predictions data in such a different format?
data=json['predictions']
for row in data:
times.append( np.datetime64(row['t']) )
if params['product']=='wind':
# For wind data:
# {'t': '1996-02-20 01:18', 's': '', 'd': '', 'dr': '', 'g': '', 'f': '1,1'}
# s: speed?
# d: compass direction (e.g. 267)
# dr: compass point (e.g. 'W')
# g: gust?
values.append( [float_or_nan(row['s']),
float_or_nan(row['d']),
float_or_nan(row['g'])] )
else:
# {'f': '0,0,0,0', 'q': 'v', 's': '0.012', 't': '2010-12-01 00:00', 'v': '0.283'}
values.append(float_or_nan(row['v']))
# for now, ignore flags, verified status.
values=np.array(values)
ds['time']=( ('time',),times)
if params['product']=='wind':
# values ~ [Ntime, {speed, direction, gust}]
ds['wind_speed'] = ('station','time'), [values[:,0]]
ds['wind_direction'] = ('station','time'), [values[:,1]]
ds['wind_gust'] = ('station','time'), [values[:,2]]
ds['wind_speed'].attrs['units']='m s-1'
ds['wind_direction'].attrs['units']='deg_compass'
ds['wind_gust'].attrs['units']='m s-1'
else:
ds[params['product']]=( ('station','time'), [values] )
bad_count=np.sum( np.isnan(values) )
if bad_count:
log.warning("%d of %d data values were missing for %s"%(bad_count,values.size,params['product']))
if params['product'] in ['water_level','predictions']:
ds[params['product']].attrs['datum'] = params['datum']
return ds
def coops_dataset(station,start_date,end_date,products,
days_per_request="M",cache_dir=None,refetch_incomplete=True):
"""
basic retrieval script for NOAA Tides and Currents data.
days_per_request: break up the request into chunks no larger than this many
days. for hourly data, this should be less than 365. for six minute, I think
the limit is 32 days.
"""
ds_per_product=[]
for product in products:
ds=coops_dataset_product(station=station,
product=product,
start_date=start_date,
end_date=end_date,
days_per_request=days_per_request,
refetch_incomplete=refetch_incomplete,
cache_dir=cache_dir)
if ds is not None:
ds_per_product.append(ds)
ds_merged=xr.merge(ds_per_product,join='outer')
return ds_merged
def coops_dataset_product(station,product,
start_date,end_date,days_per_request='M',
cache_dir=None,refetch_incomplete=True,
interval=None,datum=None,
clip=True,cache_only=False):
"""
Retrieve a single data product from a single station.
station: string or numeric identifier for COOPS station
product: string identifying the variable to retrieve, such as "water_level".
See all_products at the top of this file.
start_date,end_date: period to retrieve, as python datetime, matplotlib datenum,
or numpy datetime64.
days_per_request: batch the requests to fetch smaller chunks at a time.
if this is an integer, then chunks will start with start_date, then start_date+days_per_request,
etc.
if this is a string, it is interpreted as the frequency argument to pandas.PeriodIndex.
so 'M' will request month-aligned chunks. this has the advantage that requests for different
start dates will still be aligned to integer periods, and can reuse cached data.
cache_dir: if specified, save each chunk as a netcdf file in this directory,
with filenames that include the gage, period and products. The directory must already
exist.
returns an xarray dataset, or None if no data could be fetched
refetch_incomplete: if True, if a dataset is pulled from cache but appears incomplete
with respect to the start_date and end_date, attempt to fetch it again. Not that incomplete
here is meant for realtime data which has not yet been recorded, so the test is only
between end_date and the last time stamp of retrieved data.
clip: if true, return only data within the requested window, even if more data was fetched.
"""
start_date=utils.to_dt64(start_date)
end_date=utils.to_dt64(end_date)
fmt_date=lambda d: utils.to_datetime(d).strftime("%Y%m%d %H:%M")
base_url="https://tidesandcurrents.noaa.gov/api/datagetter"
if datum is not None:
datums=[datum]
else:
# not supported by this script: bin
# Some predictions are only in MLLW
datums=['NAVD','MSL','MLLW']
datasets=[]
if cache_only:
if cache_dir is None:
raise Exception("cache_dir=None is not compatible with cache_only=True")
refetch_incomplete=False
for interval_start,interval_end in periods(start_date,end_date,days_per_request):
if cache_dir is not None:
begin_str=utils.to_datetime(interval_start).strftime('%Y-%m-%d')
end_str =utils.to_datetime(interval_end).strftime('%Y-%m-%d')
cache_fn=os.path.join(cache_dir,
"%s_%s_%s_%s.nc"%(station,
product,
begin_str,
end_str))
else:
cache_fn=None
ds=None
if (cache_fn is not None) and os.path.exists(cache_fn):
log.info("Cached %s -- %s"%(interval_start,interval_end))
ds=xr.open_dataset(cache_fn)
if refetch_incomplete:
# This will fetch a bit more than absolutely necessary
# In the case that this file is up to date, but the sensor was down,
# we might be able to discern that if this was originally fetched
# after another request which found valid data from a later time.
if ds.time.values[-1]<min(utils.to_dt64(interval_end),
end_date):
log.warning(" but that was incomplete -- will re-fetch")
ds=None
if (ds is None) and cache_only:
continue
if ds is None:
log.info("Fetching %s -- %s"%(interval_start,interval_end))
params=dict(begin_date=fmt_date(interval_start),
end_date=fmt_date(interval_end),
station=str(station),
time_zone='gmt', # always!
application='stompy',
units='metric',
format='json',
product=product)
if interval is not None:
# Some predictions require interval='hilo'
params['interval']=interval
if product in ['water_level','hourly_height',"one_minute_water_level","predictions"]:
while 1:
# not all stations have NAVD, so fall back to MSL
params['datum']=datums[0]
try:
req=requests.get(base_url,params=params)
except requests.ConnectionError:
log.warning("Unable to connect to tidesandcurrents.noaa.gov -- possibly on HPC node")
data=dict(error=dict(message="Internet access error"))
break
try:
data=req.json()
except ValueError: # thrown by json parsing
log.warning("Likely server error retrieving JSON data from tidesandcurrents.noaa.gov")
data=dict(error=dict(message="Likely server error"))
break
if (('error' in data)
and (("datum" in data['error']['message'].lower())
or (product=='predictions'))):
# Actual message like 'The supported Datum values are: MHHW, MHW, MTL, MSL, MLW, MLLW, LWI, HWI'
# Predictions sometimes silently fail, as if there is no data, but really just need
# to try MSL.
log.warning(data['error']['message'])
datums.pop(0) # move on to next datum
continue # assume it's because the datum is missing
break
else:
req=requests.get(base_url,params=params)
data=req.json()
if 'error' in data:
msg=data['error']['message']
if "No data was found" in msg:
# station does not have this data for this time.
log.warning("No data found for this period")
else:
# Regardless, if there was an error we got no data.
log.warning("Unknown error - got no data back.")
log.warning("URL was %s"%(base_url))
log.warning("params were %s"%params)
log.warning(data)
log.debug("URL was %s"%(base_url))
continue
ds=coops_json_to_ds(data,params)
if cache_fn is not None:
if os.path.exists(cache_fn):
# simply overwriting often does not work, so try removing first
os.unlink(cache_fn)
ds.to_netcdf(cache_fn)
if len(datasets)>0:
# avoid duplicates in case they overlap
ds=ds.isel(time=ds.time.values>datasets[-1].time.values[-1])
datasets.append(ds)
if len(datasets)==0:
# could try to construct zero-length dataset, but that sounds like a pain
# at the moment.
return None
if len(datasets)>1:
# data_vars='minimal' is needed to keep lat/lon from being expanded
# along the time axis.
# For longer periods it's possible that lat/long change. Not sure
# if this is from an actual relocation of the sensor or from a
# change in output rounding. Explicitly setting the concatenation
# coordinates and compat='override' gets around this, at the expense
# of not catching potential errors or real changes in station location.
dataset=xr.concat( datasets, dim='time',coords=['time'],data_vars='minimal',
compat='override')
else:
dataset=datasets[0].copy(deep=True)
# better not to leave these lying around open
for d in datasets:
d.close()
if clip:
time_sel=(dataset.time.values>=start_date) & (dataset.time.values<end_date)
dataset=dataset.isel(time=time_sel)
dataset['time'].attrs['timezone']='UTC'
return dataset
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-05-02 12:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20161124_2342'),
]
operations = [
migrations.AddField(
model_name='performance',
name='eventid',
field=models.IntegerField(blank=True, null=True, unique=True),
),
migrations.AddField(
model_name='performance',
name='url',
field=models.URLField(blank=True, null=True),
),
]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# docker_services.py
# 暂未添加ssh管理接口,若要添加,直接连接至主机交换机即可
# 关于任何一步出错反馈的处理,最终需考虑到。(改成每个Popen单独执行一条命令然后捕捉返回值?)
import re
import sys
import logging
from midbox.southbound.remote_ssh import remote_ssh
from midbox._config import DOCKER_REGISTRY_IP, DOCKER_REGISTRY_PORT, CTRL_PLANE_SW_NAME, DATA_PLANE_SW_NAME
logger = logging.getLogger(__name__)
def addContainer(ip, password, cpu, mem, image_name, containerid, cip='192.168.1.1/24'):
"""
部署Docker容器
:param ip: 主机ip
:param password: 主机管理员密码
:param cpu: cpu百分比
:param mem: 内存大小
:param image_name: docker镜像名
:param containerid: 上层分配给容器的id
:param cip: 上层分配给功能实例的ip地址,附带网络掩码
:return:
"""
logger.debug('Start.')
cpu = str(int(int(cpu) * 1000000 / 100))
mem = str(mem)
image_name = DOCKER_REGISTRY_IP + ':' + DOCKER_REGISTRY_PORT + '/' + image_name
args = ('docker run -d -m ' + mem +
'M -v /home/dockertest/:/data --cap-add=NET_ADMIN --cpu-period=1000000 --cpu-quota=' + cpu +
' --net=none --name c' + containerid + ' ' + image_name + ' ')
exitstatus, rdata = remote_ssh(ip, password, args)
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'pid=$(docker inspect -f \'{{.State.Pid}}\' c' + containerid + ') && ' +
'mkdir -p /var/run/netns && ' +
'ln -s /proc/$pid/ns/net /var/run/netns/$pid && ' +
'echo $pid')
logger.info(rdata)
pid = rdata
pid = re.findall('\d+', pid)[0]
print('PID:' + pid)
exitstatus, rdata = remote_ssh(ip, password,
'ovs-vsctl add-br ' + DATA_PLANE_SW_NAME + ' && ' +
'ovs-vsctl add-br ' + CTRL_PLANE_SW_NAME)
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'ip link add in type veth peer name br-c' + containerid + '-in && ' +
'ip link add out type veth peer name br-c' + containerid + '-out')
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'ip link set in netns ' + pid + ' && ' +
'ip link set out netns ' + pid + ' && ' +
'ip link set br-c' + containerid + '-in up && ' +
'ip link set br-c' + containerid + '-out up')
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'ip netns exec ' + pid + ' ip link set in up && ' +
'ip netns exec ' + pid + ' ip link set out up')
logger.info(rdata)
# 容器内网络配置
exitstatus, rdata = remote_ssh(ip, password,
'docker exec c' + containerid + ' ovs-vsctl add-br sw && ' +
'docker exec c' + containerid + ' ovs-vsctl add-port sw in && ' +
'docker exec c' + containerid + ' ovs-vsctl add-port sw out')
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password, 'docker start c' + containerid)
logger.info(rdata)
# 注意:容器镜像内必须安装OVS2.9以上版本!!
# 断掉回路,等容器启用时再up该接口
exitstatus, rdata = remote_ssh(ip, password,
'ovs-vsctl add-port ' + DATA_PLANE_SW_NAME + ' br-c' + containerid + '-in && ' +
'ovs-vsctl add-port ' + DATA_PLANE_SW_NAME + ' br-c' + containerid + '-out')
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'ip link add ceth0 type veth peer name br-c' + containerid + ' && ' +
'ip link set ceth0 netns ' + pid + ' && ' +
'ip link set br-c' + containerid + ' up')
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'ovs-vsctl add-port ' + CTRL_PLANE_SW_NAME + ' br-c' + containerid + ' && ' +
'ip netns exec ' + pid + ' ip link set ceth0 up && ' +
'docker exec c' + containerid + ' ifconfig ceth0 ' + cip + ' up')
logger.info(rdata)
return 0
def delContainer(ip, password, containerid):
"""
删除Docker容器实例
# 暂未添加ssh管理接口,若要添加,直接连接至主机交换机即可
# containerid应该每台宿主机可重复,与物理机id建立映射关系保证唯一性以及可计算得到(how to do?)
# 关于任何一步出错反馈的处理,最终需考虑到。(改成每个Popen单独执行一条命令然后捕捉返回值?)
# 考虑容器初始时没有启动两块网卡,只有当有效的流表下发后才启动,下发流表是单独的行为。
:param ip: 主机ip
:param password: 主机管理员密码
:param containerid: 容器的id
:return:
"""
logger.debug('Start.')
exitstatus, rdata = remote_ssh(ip, password,
'pid=$(docker inspect -f \'{{.State.Pid}}\' c' + containerid + ') && ' +
'echo $pid')
logger.info(rdata)
pid = str(rdata)
pid = re.findall('\d+', pid)[0]
print('PID:' + pid)
exitstatus, rdata = remote_ssh(ip, password,
'docker stop c' + containerid + ' && ' +
'docker rm c' + containerid + ' && ' +
'rm -rf /var/run/netns/$' + pid)
logger.info(rdata)
exitstatus, rdata = remote_ssh(ip, password,
'ovs-vsctl del-port ' + DATA_PLANE_SW_NAME + ' br-c' + containerid + '-in && ' +
'ovs-vsctl del-port ' + DATA_PLANE_SW_NAME + ' br-c' + containerid + '-out && ' +
'ovs-vsctl del-port ' + CTRL_PLANE_SW_NAME + ' br-c' + containerid)
logger.info(rdata)
return 0
if __name__ == '__main__':
# 新增
# input('Input cpu limitation(%)' +
# '(single core, if container uses mutiple cores, this value can larger than 100):')
cpu = 20
mem = 128 # input('Input memory limitation(MB):')
image_name = 'bimage'
containerid = sys.argv[1]
ip = '127.0.0.1'
password = '123456'
# container_deploy(ip, password, cpu, mem, image_name, containerid)
# 删除
# image_name=input('Input image name:')
# containerid=input('Input container id:')
containerid = sys.argv[1]
ip = '127.0.0.1'
password = '123456'
# container_clear(ip, password, containerid)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-11 08:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='deploy',
name='branch',
field=models.CharField(max_length=32, null=True, verbose_name='分支'),
),
]
|
'''Enunciado:
Escreva um algoritimo que leia a largura e a altura de uma parede em metros,
calcule a sua área e a quantidade de tinta necessária para pintá-la,
sabendo que cada litro de tinta, pinta uma área de 2m2. '''
print('Calculo da quantidade tinta para pintar uma parede:')
print('obs.:fornecer o valor em metros')
H = float(input('Forneça a altura da parede: '))
L = float(input('Forneça a largura da parede: '))
s = H * L
print('Sua parede tem uma dimensão de {}x{} e sua área é de {}m2'.format(H, L, s))
Qntinta = s / 2
print('Será necessário {:0.1f}lts para pintar {}m2'.format(Qntinta, s))
|
import torch
import torch.nn as nn
import torch.optim as optim
# 裸写一个线性模型:定义数据,定义模型,定义train
model = nn.Linear(20, 1)
optimizer = optim.SGD(model.parameters(), lr = 1e-2)
def train(epochs, model, loss, optimizer, train_param, train_value, valid_param, valid_value):
for epoch in range(1, epochs + 1):
train_predict = model(train_param)
train_loss = loss(train_predict, train_value)
optimizer.zero_grad()
optimizer.step()
train_loss.backward()
if 0 == epochs % 10:
valid_predict = model(valid_param)
valid_loss = loss(valid_predict, valid_value)
print(f"Epoch {epoch}, Training loss {train_loss.item():.4f},"
f" Validation loss {valid_loss.item():.4f}")
|
import os
import pandas as pd
import numpy as np
import logging as log
from sutils import *
log.basicConfig(level = log.DEBUG)
log.basicConfig(format='[%(process)d]: %(levelname)s %(message)s')
file = "tsl.csv"
if (os.path.isfile(file)):
pass
else:
print "File \'"+file+"\' not found"
exit(0)
df = pd.read_csv(file, skiprows=range(1,7))
survey = { 'wtp': { 'que' : {'city': 'Q25', 'wild': 'Q26', 'tsla': 'Q28'},
'opt' : {1: 'price'}
},
'nps': { 'que' : {'city': 'Q12', 'wild': 'Q24', 'tsla': 'Q27'}
},
'act': { 'que' : {'city': 'Q11', 'wild': 'Q13', 'tsla': 'Q15'},
'opt' : {1: 'Commute', 2: 'At Work', 3: 'Outdoors', 4: 'Travel', 5: 'At Home', 6: 'Other'},
'code': {'Never': 1, 'Rarely': 2, 'Moderately': 3, 'Often': 4, 'Always': 5}
},
'brand': { 'que' : {'all': 'Q16'},
'nmsk': {'city': 'Q12', 'wild': 'Q24', 'tsla': 'Q27'},
'opt' : {1: 'honest', 2: 'original', 3: 'bold', 4: 'cool', 5: 'reliable', 6: 'technical', 7: 'aesthetic', 8: 'compact', 9: 'rugged', 10: 'sturdy'},
'code': {'1-low': 1, '2-moderate': 2, '3-average': 3, '4-good': 4, '5-high': 5}
},
'buy': { 'que' : {'all': 'Q17'},
'nmsk': {'city': 'Q12', 'wild': 'Q24', 'tsla': 'Q27'},
'code': {'This month': 1, 'In 1-6 months': 2, 'Within 12 months': 3, 'After 12 months': 4, 'Never': 5}
}
}
survey_data_extract(survey, df, '')
men_mask = df['Q4'].values != 'Female'
men_df = df[men_mask]
survey_data_extract(survey, men_df, 'men_')
women_mask = df['Q4'].values != 'Male'
women_df = df[women_mask]
survey_data_extract(survey, women_df, 'wom_')
|
# Write your function median_FITS here:
import numpy as np
import time
import sys
#import matplotlib.pyplot as plt
from astropy.io import fits
def median_datasets (datasetArr, m, n, l) :
istack = np.dstack(datasetArr)
median = np.median(istack, axis = 2)
return median
def load_fits (fname) :
hdulist = fits.open(fname)
#print(hdulist.info())
data = hdulist[0].data
#print(data)
#print(data.shape)
#print("max:",np.max(data))
return data
def median_fits (fnameArr) :
dataArr = []
tsize = 0
start = time.perf_counter()
for fname in fnameArr :
data = load_fits(fname)
tsize += sys.getsizeof(data)
dataArr.append(data)
m,n = dataArr[0].shape
#print(m,n)
istack = np.dstack(dataArr)
median = np.median(istack, axis = 2)
end = time.perf_counter() - start
tsize = (tsize + sys.getsizeof(istack))/1024
return median, end, tsize
# You can use this to test your function.
# Any code inside this `if` statement will be ignored by the automarker.
if __name__ == '__main__':
# Run your function with first example in the question.
result = median_fits(['image0.fits', 'image1.fits'])
print(result[0][100, 100], result[1], result[2])
# Run your function with second example in the question.
result = median_fits(['image{}.fits'.format(str(i)) for i in range(11)])
print(result[0][100, 100], result[1], result[2]) |
from mainHandler import *
from validTemp import validTemp
from solveConduct2d import *
import cStringIO
from plots import recentPlots, Plot
from geolocation import get_coords
import matplotlib.pyplot as plt
from time import sleep
from geolocation import gmaps_img
from google.appengine.api import memcache
def plotcontour(T, plotType):
Tdist = solveConduct2d(**T)
if plotType == "filled":
CS = plt.contourf(Tdist, 15)
else:
CS = plt.contour(Tdist, 15)
plt.clabel(CS, inline=1, fontsize=10)
plt.title("Temperature distribution")
plt.colorbar(CS, shrink = 0.8)
rv = cStringIO.StringIO()
plt.savefig(rv, format = "png")
plot = "data:image/png;base64," + rv.getvalue().encode("base64").strip()
plt.clf()
rv.close()
return plot
class conduct2d(mainHandler):
def post(self):
Tup = self.request.get('Tup')
Tleft = self.request.get('Tleft')
Tdown = self.request.get('Tdown')
Tright = self.request.get('Tright')
plotType = self.request.get('plotType')
Tuser = {'Tup':Tup, 'Tleft':Tleft, 'Tright':Tright, 'Tdown':Tdown}
T = validTemp(**Tuser)
#To avoid zero array passed to contourplot
if type(T) == dict:
flag = 0
for value in T.values():
if value != 0:
flag = 1
break
if flag == 0:
T = ["error" + key for key in Tuser.keys()]
if type(T) == list:
for t in T:
Tuser[t] = "Input invalid or beyond limits"
plots = recentPlots()
points=filter(None,(plot.geoPt for plot in plots))
geolocationUrl=None
if points:
geolocationUrl=gmaps_img(points)
self.renderPage("parameters.html", plots = plots,geolocationUrl = geolocationUrl, **Tuser)
else:
plot = plotcontour(T, plotType)
user = users.get_current_user()
if user:
p = Plot(user = user, image = plot, **T)
coords=get_coords(self.request.remote_addr)
if coords:
p.geoPt = coords
p.put()
sleep(0.1)
recentPlots(True)
self.renderPage("result.html", plot = plot, **T)
class cacheFlush(mainHandler):
def get(self):
memcache.flush_all()
self.redirect("/")
|
import multiprocessing
import os
import sys
from functools import partial
import numpy
import pandas as pd
from resources.functions import print_with_time, escape_invalid_xml_characters, escape_html_special_entities, \
text_to_lower, remove_only_special_characters_tokens, whitespace_tokenize_text
from multiclassification.parameters.dataset_parameters import parameters
new_representation_path = parameters['mimic_data_path'] + parameters['multiclassification_directory'] \
+ parameters['noteevents_anonymized_tokens_normalized_preprocessed']
if not os.path.exists(new_representation_path):
os.mkdir(new_representation_path)
def transform_representations(icustays, representation_path=None, new_representation_path=None, manager_queue=None):
for icustay in icustays:
if manager_queue is not None:
manager_queue.put(icustay)
if not os.path.exists(representation_path + "{}.csv".format(icustay)) \
or os.path.exists(new_representation_path + "{}.csv".format(icustay)):
continue
textual_data = pd.read_csv(representation_path + "{}.csv".format(icustay))
preprocessed_texts = []
for text_index, text_row in textual_data.iterrows():
text = text_row['Note']
for func in preprocessing_pipeline:
text = func(text)
preprocessed_texts.append(text)
textual_data['preprocessed_note'] = [' '.join(text) for text in preprocessed_texts]
textual_data['charttime'] = textual_data['Unnamed: 0']
textual_data = textual_data.drop(columns=['Unnamed: 0'])
textual_data.to_csv(new_representation_path + "{}.csv".format(icustay), index=False)
representation_path = parameters['mimic_data_path'] + parameters['multiclassification_directory'] \
+ parameters['noteevents_anonymized_tokens_normalized']
preprocessing_pipeline = [escape_invalid_xml_characters, escape_html_special_entities, text_to_lower,
whitespace_tokenize_text, remove_only_special_characters_tokens]
print_with_time("Loading data")
data_csv = pd.read_csv(parameters['mimic_data_path'] + parameters['multiclassification_directory']
+ parameters['all_stays_csv_w_events'])
icustays = data_csv['ICUSTAY_ID'].tolist()
with multiprocessing.Pool(processes=1) as pool:
manager = multiprocessing.Manager()
manager_queue = manager.Queue()
partial_transform_representation = partial(transform_representations,
representation_path=representation_path,
new_representation_path=new_representation_path,
manager_queue=manager_queue)
data = numpy.array_split(icustays, 6)
# self.transform_representations(data[0], new_representation_path=new_representation_path, manager_queue=manager_queue)
# exit()
total_files = len(icustays)
map_obj = pool.map_async(partial_transform_representation, data)
consumed = 0
while not map_obj.ready() or manager_queue.qsize() != 0:
for _ in range(manager_queue.qsize()):
manager_queue.get()
consumed += 1
sys.stderr.write('\rdone {0:%}'.format(consumed / total_files))
result = map_obj.get() |
import os
import numpy as np
import moby2
from moby2.scripting import products
from moby2.analysis import hwp
from todloop import Routine
from .utils import *
class CutSources(Routine):
def __init__(self, **params):
"""A routine that cuts the point sources"""
Routine.__init__(self)
# retrieve the inputs and outputs keys from data store
self.inputs = params.get('inputs', None)
self.outputs = params.get('outputs', None)
# retrieve other parameters
self._tag_source = params.get('tag_source', None)
self._source_list = params.get('source_list', None)
self._no_noise = params.get('no_noise', True)
self._pointing_par = params.get('pointing_par', None)
self._mask_params = params.get('mask_params', {})
self._shift_params = params.get('mask_shift_generator', None)
self._depot_path = params.get('depot', None)
self._write_depot = params.get('write_depot', False)
def initialize(self):
# get the depot
self._depot = moby2.util.Depot(self._depot_path)
user_config = moby2.util.get_user_config()
moby2.pointing.set_bulletin_A(params=user_config.get('bulletin_A_settings'))
def execute(self, store):
# retrieve tod
tod = store.get(self.inputs.get('tod'))
# check if source cut results exist
sourceResult = os.path.exists(
self._depot.get_full_path(
moby2.TODCuts, tag=self._tag_source, tod=tod))
# if cuts exist, load it now
if sourceResult:
self.logger.info("Loading time stream cuts (%s)" % self._tag_source)
# load source cut
source_cuts = self._depot.read_object(
moby2.TODCuts, tag=self._tag_source, tod=tod)
# fill the cuts in the TOD
moby2.tod.fill_cuts(tod, source_cuts, no_noise=self._no_noise)
# if source cut cannot be retrieved by tag_source, load it
# through _source_list
elif self._source_list is not None:
self.logger.info("Finding new source cuts")
# retrieve source list from the file given
with open(self._source_list, 'r') as f:
source_list = f.readlines()
source_list = [(s.strip('\n'), 'source') for s in source_list]
# supply focal plane information to tod
tod.fplane = products.get_focal_plane(self._pointing_par, tod.info)
# find sources that fall in the given TOD
matched_sources = moby2.ephem.get_sources_in_patch(
tod=tod, source_list=source_list)
# check if shift is needed
if self._shift_params is not None:
# calculate pointing offset
offset = products.get_pointing_offset(
self._shift_params, tod=tod, source_offset=True)
# check if offset is calculated successfully, if not give
# a zero offset
if offset is None:
offset = (0., 0.)
# calculate a map size
if max(offset) > 20. / 60:
self._mask_params['map_size'] = max(offset) + 10. / 60
self._mask_params['offset'] = offset
self.logger.info("matched sources: %s" % matched_sources)
# create a placeholder cut object to store our source cuts
pos_cuts_sources = moby2.TODCuts.for_tod(tod, assign=False)
# process source cut for each source
for source in matched_sources:
# compute the source cut associated with the source
source_cut = moby2.tod.get_source_cuts(
tod, source[1], source[2], **self._mask_params)
# merge the source cut to the total cuts
pos_cuts_sources.merge_tod_cuts(source_cut)
# fill the source cuts to the tod
moby2.tod.fill_cuts(tod, pos_cuts_sources, no_noise=self._no_noise)
# write to depot, copied from moby2, not needed here
if self._write_depot:
self._depot.write_object(pos_cuts_sources,
tag=params.get('tag_source'),
force=True, tod=tod, make_dirs=True)
# pass the processed tod back to data store
store.set(self.outputs.get('tod'), tod)
class CutPlanets(Routine):
def __init__(self, **params):
"""A routine that perform the planet cuts"""
Routine.__init__(self)
self.inputs = params.get('inputs', None)
self.outputs = params.get('outputs', None)
self._no_noise = params.get('no_noise', True)
self._tag_planet = params.get('tag_planet', None)
self._pointing_par = params.get('pointing_par', None)
self._mask_params = params.get('mask_params', {})
self._shift_params = params.get('mask_shift_generator', None)
self._depot_path = params.get('depot', None)
self._write_depot = params.get('write_depot', False)
def initialize(self):
self._depot = moby2.util.Depot(self._depot_path)
user_config = moby2.util.get_user_config()
moby2.pointing.set_bulletin_A(params=user_config.get('bulletin_A_settings'))
def execute(self, store):
tod = store.get(self.inputs.get('tod'))
# check if planetCuts exist
planetResult = os.path.exists(
self._depot.get_full_path(
moby2.TODCuts, tag=self._tag_planet, tod=tod))
# if planetCuts exist load it into variable pos_cuts_planets
if planetResult:
self.logger.info("Loading time stream cuts (%s)" % self._tag_planet)
pos_cuts_planets = self._depot.read_object(
moby2.TODCuts, tag=self._tag_planet, tod=tod)
# if planetCuts do not exist generate it on the run
else:
self.logger.info("Finding new planet cuts")
if not hasattr(tod, 'fplane'):
tod.fplane = products.get_focal_plane(self._pointing_par,
tod.info)
# load planet sources
matched_sources = moby2.ephem.get_sources_in_patch(
tod=tod, source_list=None)
# check if shift is needed
if self._shift_params is not None:
# calculate pointing offset
offset = products.get_pointing_offset(
self._shift_params, tod=tod, source_offset=True)
# check if offset is calculated successfully, if not give
# a zero offset
if offset is None:
offset = (0., 0.)
# calculate a map size
if max(offset) > 20. / 60:
self._mask_params['map_size'] = max(offset) + 10. / 60
self._mask_params['offset'] = offset
self.logger.info("matched sources: %s" % matched_sources)
# a place holder cut object to store all planet cut
pos_cuts_planets = moby2.TODCuts.for_tod(tod, assign=False)
# process each planet source
for source in matched_sources:
# calculate planet cut
planet_cut = moby2.tod.get_source_cuts(
tod, source[1], source[2], **self._mask_params)
# merge it into the total cut
pos_cuts_planets.merge_tod_cuts(planet_cut)
if self._write_depot:
# write planet cut to depot, copied from moby2, not needed
# here
self._depot.write_object(pos_cuts_planets,
tag=self._tag_planet, force=True, tod=tod,
make_dirs=True)
# fill planet cuts into tod
moby2.tod.fill_cuts(tod, pos_cuts_planets, no_noise=self._no_noise)
# pass the processed tod back to data store
store.set(self.outputs.get('tod'), tod)
class RemoveSyncPickup(Routine):
def __init__(self, **params):
"""This routine fit / removes synchronous pickup"""
Routine.__init__(self)
self.inputs = params.get('inputs', None)
self.outputs = params.get('outputs', None)
self._remove_sync = params.get('remove_sync', False)
self._force_sync = params.get('force_sync', False)
self._tag_sync = params.get('tag_sync', None)
self._depot_path = params.get('depot', None)
self._write_depot = params.get('write_depot', False)
def initialize(self):
self._depot = moby2.util.Depot(self._depot_path)
def execute(self, store):
# retrieve tod
tod = store.get(self.inputs.get('tod'))
# Check for existing results, to set what operations must be
# done/redone.
sync_result = os.path.exists(
self._depot.get_full_path(
moby2.tod.Sync, tag=self._tag_sync, tod=tod))
# determine if sync is needed
skip_sync = not self._remove_sync or (not self._force_sync
and sync_result)
# obtain scan frequency
scan_freq = moby2.tod.get_scan_info(tod).scan_freq
if (self._remove_sync) and (scan_freq != 0):
self.logger.info("Removing Sync")
# check if sync can be skipped
if skip_sync:
self.logger.info("Using old sync")
ss = self._depot.read_object(
moby2.tod.Sync, tag=self._tag_sync, tod=tod)
# if not generate it on the go
else:
self.logger.info("Computing new sync")
ss = moby2.tod.Sync(tod)
ss.findOutliers()
ss = ss.extend()
# write sync object to disk
if self._write_depot:
self._depot.write_object(ss, tag=self._tag_sync, tod=tod, make_dirs=True,
force=True)
ss.removeAll()
del ss
# pass the processed tod back to data store
store.set(self.outputs.get('tod'), tod)
class CutPartial(Routine):
def __init__(self, **params):
"""A routine that performs the partial cuts"""
Routine.__init__(self)
self.inputs = params.get('inputs', None)
self.outputs = params.get('outputs', None)
self._tag_partial = params.get('tag_partial', None)
self._force_partial = params.get('force_partial', False)
self._glitchp = params.get('glitchp', {})
self._include_mce = params.get('include_mce', True)
self._depot_path = params.get('depot', None)
self._no_noise = params.get('no_noise', True)
self._write_depot = params.get('write_depot', False)
def initialize(self):
self._depot = moby2.util.Depot(self._depot_path)
def execute(self, store):
# retrieve tod
tod = store.get(self.inputs.get('tod'))
# check if partial results already exist
partial_result = os.path.exists(
self._depot.get_full_path(moby2.TODCuts,
tag=self._tag_partial, tod=tod))
# check if we need to skip creating partial cuts
skip_partial = not self._force_partial and partial_result
# if we want to skip creating partial cuts, load from depot
if skip_partial:
# Read existing result
self.logger.info("Loading time stream cuts (%s)" % self._tag_partial)
cuts_partial = self._depot.read_object(
moby2.TODCuts, tag=self._tag_partial, tod=tod)
# otherwise generate partial cuts now
else:
self.logger.info('Generating partial cuts')
# Generate and save new glitch cuts
# note calbol may not be implemented...
cuts_partial = moby2.tod.get_glitch_cuts(
tod=tod, params=self._glitchp)
# check if we want to include mce_cuts
if self._include_mce:
# find mce cuts
mce_cuts = moby2.tod.get_mce_cuts(tod)
# merge it with the partial cuts
cuts_partial.merge_tod_cuts(mce_cuts)
# write to depot, not needed here
if self._write_depot:
self._depot.write_object(cuts_partial,
tag=self._tag_partial,
tod=tod, make_dirs=True, force=True)
# fill the partial cuts in our tod
moby2.tod.fill_cuts(
tod, cuts_partial, extrapolate=False, no_noise=self._no_noise)
# save the partial cuts in tod object for further processing
tod.cuts = cuts_partial
# pass the tod back to the store
store.set(self.outputs.get('tod'), tod)
class SubstractHWP(Routine):
def __init__(self, input_key, output_key, **params):
"""This routine substracts the A(chi) signal from HWP"""
Routine.__init__(self)
self._input_key = params.get('input_key', None)
self._output_key = params.get('output_key', None)
self._hwp_par = params.get('hwp_par')
self._depot_path = params.get('depot', None)
def initialize(self):
self._depot = moby2.util.Depot(self._depot_path)
def execute(self, store):
# retrieve tod
tod = store.get(self.inputs.get('tod'))
self.logger.info("Substract HWP signal")
# retrieve hwp_modes object from depot
hwp_modes = self._depot.read_object(
hwp.HWPModes,
tag=self._hwp_par['a_chi']['tag'],
tod=tod,
structure=self._hwp_par['a_chi']['structure'])
# get hwp angles
hwp_angles = moby2.scripting.products.get_hwp_angles(
self._hwp_par['angles'], tod)
# substracting the hwp sinal
r = hwp_modes.get_reconstructor(hwp_angles * np.pi / 180)
hwp_signal = r.get_achi()
tod.data[hwp_modes.det_uid, :] -= hwp_signal
# pass the tod to the data store
store.set(self.outputs.get('tod'), tod)
class FindJumps(Routine):
def __init__(self, **params):
Routine.__init__(self)
self.inputs = params.get('inputs', None)
self.outputs = params.get('outputs', None)
self._dsStep = params.get('dsStep', None)
self._window = params.get('window', None)
def execute(self, store):
tod = store.get(self.inputs.get('tod'))
# find jumps
jumps = moby2.libactpol.find_jumps(tod.data,
self._dsStep,
self._window)
# store the jumps values
crit = {
'jumpLive': jumps,
'jumpDark': jumps,
}
# save to data store
store.set(self.outputs.get('jumps'), crit)
|
count=0
sum=0
while count<10:
count=count+1
shu=input("请输入一个数")
shu=int(shu)
sum=sum+shu
print("之和为",sum) |
weeks_in_year = 52
work_days_in_week = 5
week_days_in_year = weeks_in_year * work_days_in_week
holiday_in_year = 30
working_days_in_year = week_days_in_year - holiday_in_year
salary = 33660.00
day_rate = round(salary / working_days_in_year, 2)
work_hours_per_day = 7.5
hourly_rate = round(day_rate / work_hours_per_day, 2)
print("\nThis calculates salary assumptions")
#Work days in a year
print(f"\nThere are {weeks_in_year} weeks in a year, and {work_days_in_week} working days in a week. Therefore the number of week days in a year is about {week_days_in_year}.")
#Minus Holiday allowance
print(f"\nFrom this we subtract holiday allowance, for a total of {working_days_in_year} working days.")
#Per annum salary
print(f"\nIf the yearly salary is £{salary} we can divide that by {working_days_in_year} working days to get a day rate of £{day_rate}")
#Hourly rate
print(f"\nAnd thefore an hourly rate, assuming a {work_hours_per_day} hour working day, of £{hourly_rate}")
print("\n")
|
from django.db import models
# Create your models here.
class Word(models.Model):
word = models.TextField(blank=True, null=True)
meaning = models.TextField(blank=True, null=True) |
import time
import logging
logger = logging.getLogger(__name__)
# Log how long a function takes to run
def instrument(func):
def wrapper(*args, **kwargs):
startTime = time.time()
result = func(*args, **kwargs)
endtime = time.time()
diff = endtime - startTime
logger.info(
f'Took {time.strftime("%H:%M:%S", time.gmtime(diff))} to get songs'
)
return result
return wrapper
|
#!/usr/bin/env python3
"""Measure the start-up time of the modules with differing number of contracts."""
import os
import statistics
import subprocess
from typing import List
def main() -> None:
""""Execute the main routine."""
modules = [
"functions_100_with_no_contract",
"functions_100_with_1_contract",
"functions_100_with_5_contracts",
"functions_100_with_10_contracts",
"functions_100_with_1_disabled_contract",
"functions_100_with_5_disabled_contracts",
"functions_100_with_10_disabled_contracts",
"classes_100_with_no_invariant",
"classes_100_with_1_invariant",
"classes_100_with_5_invariants",
"classes_100_with_10_invariants",
"classes_100_with_1_disabled_invariant",
"classes_100_with_5_disabled_invariants",
"classes_100_with_10_disabled_invariants",
]
for a_module in modules:
durations = [] # type: List[float]
for i in range(0, 10):
duration = float(
subprocess.check_output(["./measure.py", "--module", a_module], cwd=os.path.dirname(__file__)).strip())
durations.append(duration)
print("Duration to import the module {} (in milliseconds): {:.2f} ± {:.2f}".format(
a_module,
statistics.mean(durations) * 10e3,
statistics.stdev(durations) * 10e3))
if __name__ == "__main__":
main()
|
"""Add GroupRequest table.
Revision ID: 39a5823a808
Revises: a364e6e9c14
Create Date: 2013-09-21 15:40:31.274287
"""
# revision identifiers, used by Alembic.
revision = '39a5823a808'
down_revision = 'a364e6e9c14'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('grouprequest',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=True),
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('to_user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['from_user_id'], [u'user.id'], ),
sa.ForeignKeyConstraint(['project_id'], [u'project.id'], ),
sa.ForeignKeyConstraint(['to_user_id'], [u'user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('from_user_id','project_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('grouprequest')
### end Alembic commands ###
|
from datetime import datetime
from unittest import TestCase
from ddt import data, ddt, unpack # type:ignore[import]
from healthcheck.security import safe_dict
def make_test_dict(test_key, test_value, deep=1):
if deep > 1:
return dict(dummy=make_test_dict(test_key, test_value, deep - 1))
return {test_key: test_value}
class MakeTestDictTest(TestCase):
def test_should_return_input_value_if_deep_is_lte_0(self):
self.assertEqual({'a': 'asd'}, make_test_dict('a', 'asd', 0))
def test_should_make_test_dict_first_level(self):
self.assertEqual({'a': 'asd'}, make_test_dict('a', 'asd'))
self.assertEqual({'a': '***'}, make_test_dict('a', '***'))
def test_should_make_test_dict_second_level(self):
self.assertEqual({'dummy': {'a': 'asd'}}, make_test_dict('a', 'asd', 2))
self.assertEqual({'dummy': {'a': '***'}}, make_test_dict('a', '***', 2))
def test_should_make_test_dict_third_level(self):
self.assertEqual({'dummy': {'dummy': {'a': 'asd'}}}, make_test_dict('a', 'asd', 3))
self.assertEqual({'dummy': {'dummy': {'a': '***'}}}, make_test_dict('a', '***', 3))
@ddt
class SafeDictTest(TestCase):
def test_should_return_input_value_if_max_deep_is_lte_0(self):
self.assertEqual({'a': 'asd'}, safe_dict({'a': 'asd'}, max_deep=0))
@unpack
@data(
(1, 'a', 'asdf'),
(1, 'a', 42),
(1, 'a', 3.14),
(1, 'a', datetime.now()),
)
def test_should_dump_dictionary_without_blacklisted_keys(self, deep, key_to_test, value):
input_dict = make_test_dict(key_to_test, value, deep)
to_dict = safe_dict(input_dict)
self.assertEqual(input_dict, to_dict)
@unpack
@data(
(1, 'key', 'asdf'),
(1, 'Key', 'asdf'),
(1, 'somekey', 'asdf'),
(1, 'someKey', 'asdf'),
(1, 'key_value', 'asdf'),
(1, 'Key_value', 'asdf'),
(1, 'key', 42),
(1, 'Key', 42),
(1, 'somekey', 42),
(1, 'someKey', 42),
(1, 'key_value', 42),
(1, 'Key_value', 42),
(1, 'key', 3.14),
(1, 'Key', 3.14),
(1, 'somekey', 3.14),
(1, 'someKey', 3.14),
(1, 'key_value', 3.14),
(1, 'Key_value', 3.14),
(1, 'key', datetime.now()),
(1, 'Key', datetime.now()),
(1, 'somekey', datetime.now()),
(1, 'someKey', datetime.now()),
(1, 'key_value', datetime.now()),
(1, 'Key_value', datetime.now()),
(2, 'key', 'asdf'),
(2, 'Key', 'asdf'),
(2, 'somekey', 'asdf'),
(2, 'someKey', 'asdf'),
(2, 'key_value', 'asdf'),
(2, 'Key_value', 'asdf'),
(3, 'key', 'asdf'),
(3, 'Key', 'asdf'),
(3, 'somekey', 'asdf'),
(3, 'someKey', 'asdf'),
(3, 'key_value', 'asdf'),
(3, 'Key_value', 'asdf'),
(4, 'key', 'asdf'),
(4, 'Key', 'asdf'),
(4, 'somekey', 'asdf'),
(4, 'someKey', 'asdf'),
(4, 'key_value', 'asdf'),
(4, 'Key_value', 'asdf'),
(5, 'key', 'asdf'),
(5, 'Key', 'asdf'),
(5, 'somekey', 'asdf'),
(5, 'someKey', 'asdf'),
(5, 'key_value', 'asdf'),
(5, 'Key_value', 'asdf'),
)
def test_should_dump_dictionary_with_blacklisted_key_deep(self, deep, key_to_test, value):
input_dict = make_test_dict(key_to_test, value, deep)
to_dict = safe_dict(input_dict)
self.assertEqual(make_test_dict(key_to_test, '********', deep), to_dict)
|
import connexion
#app = connexion.FlaskApp(__name__, specification_dir='.', server='tornado')
app = connexion.FlaskApp(__name__, specification_dir='.')
app.add_api('swagger.yaml')
print(app.app.__dict__)
app.run(port=8080)
|
from utility import dataset_function as reader
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin
from sacred import Experiment
def create_client_profile_features(X: pd.DataFrame, copy: bool = True) -> pd.DataFrame:
"""
Создание признаков на основе профиля клиентов.
Parameters
----------
X: pandas.core.frame.DataFrame
Матрица признаков с исходным профилем клиента.
copy: bool, optional, default = True
Флаг использования копии датафрейма X.
Опциональный параметр, по умолчанию, равен True.
Returns
-------
X_transformed: pandas.core.frame.DataFrame
Расширенная матрица признаков с профилем клиентов.
"""
if copy:
X = X.copy()
X["days_on_last_job"] = X["days_on_last_job"].replace(365243, np.nan)
bki_flags = [flag for flag in X.columns if "amt_req_credit_bureau" in flag]
X["bki_requests_count"] = X[bki_flags].sum(axis=1)
X["bki_kurtosis"] = X[bki_flags].kurtosis(axis=1)
X["external_scoring_prod"] = X["external_scoring_rating_1"] * X["external_scoring_rating_2"] * X[
"external_scoring_rating_3"]
X["external_scoring_weighted"] = X.external_scoring_rating_1 * 2 + X.external_scoring_rating_2 * 1 \
+ X.external_scoring_rating_3 * 3
for function_name in ["min", "max", "mean", "nanmedian", "var"]:
feature_name = "external_scoring_rating_{}".format(function_name)
X[feature_name] = eval("np.{}".format(function_name))(
X[["external_scoring_rating_1", "external_scoring_rating_2", "external_scoring_rating_3"]], axis=1
)
# Отношение между основными фин. показателями
X['ratio_credit_to_annuity'] = X['amount_credit'] / X['amount_annuity']
X["ratio_annuity_to_salary"] = X['amount_annuity'] / X['total_salary']
X['ratio_credit_to_salary'] = X['amount_credit'] / X['total_salary']
X["total_salary_net"] = X["total_salary"] - X["amount_annuity"]
# Отношение фин. показателей к возрасту и временным фичам
X["ratio_annuity_to_age"] = X["amount_annuity"] / X["age"]
X["ratio_credit_to_age"] = X["amount_credit"] / X["age"]
X["ratio_salary_to_age"] = X["total_salary"] / X["age"]
X["ratio_salary_to_experience"] = X["total_salary"] / X["days_on_last_job"]
X["ratio_credit_to_experience"] = X["amount_credit"] / X["days_on_last_job"]
X["ratio_annuity_to_experience"] = X["amount_annuity"] / X["days_on_last_job"]
# Отношение врменных признаков
X["ratio_age_to_experience"] = X["age"] / X["days_on_last_job"]
X["ratio_salary_to_region_population"] = X["total_salary"] * X["region_population"]
X["ratio_car_to_experience"] = X["own_car_age"] / X["days_on_last_job"]
X["ratio_car_to_age"] = X["own_car_age"] / X["age"]
# Произведение фин. показателей кредита на вероятность дефолта
# Такая штука называется математическим ожиданием дефолта или ожидаемыми потерями
X["expected_total_loss_1"] = X["external_scoring_rating_1"] * X["amount_credit"]
X["expected_total_loss_2"] = X["external_scoring_rating_2"] * X["amount_credit"]
X["expected_total_loss_3"] = X["external_scoring_rating_3"] * X["amount_credit"]
X["expected_monthly_loss_1"] = X["external_scoring_rating_1"] * X["amount_annuity"]
X["expected_monthly_loss_2"] = X["external_scoring_rating_2"] * X["amount_annuity"]
X["expected_monthly_loss_3"] = X["external_scoring_rating_3"] * X["amount_annuity"]
return X
class ClientProfile(TransformerMixin):
client_profile: pd.DataFrame
def __init__(self, filename: str, ex: Experiment):
self.client_profile = reader.get_input(filename, ex)
self.client_profile = create_client_profile_features(X=self.client_profile, copy=True)
self.client_profile.columns = [f"cp_{x}" for x in self.client_profile.columns]
self.client_profile = self.client_profile.rename(columns={f'cp_application_number': 'application_number'})
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
Xt = X.merge(self.client_profile, how="left", on="application_number")
return Xt
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
import os
from xosconfig import Config
from xosconfig import Config as Config2
basic_conf = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/confs/basic_conf.yaml")
yaml_not_valid = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/confs/yaml_not_valid.yaml")
invalid_format = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/confs/invalid_format.yaml")
sample_conf = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/confs/sample_conf.yaml")
small_schema = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/schemas/small_schema.yaml")
services_list = {
"xos-ws": [],
"xos-db": [],
}
db_service = [
{
"ModifyIndex": 6,
"CreateIndex": 6,
"Node": "0152982c3159",
"Address": "172.19.0.2",
"ServiceID": "0d53ce210785:frontend_xos_db_1:5432",
"ServiceName": "xos-db",
"ServiceTags": [],
"ServiceAddress": "172.18.0.4",
"ServicePort": 5432,
"ServiceEnableTagOverride": "false"
}
]
class XOSConfigTest(unittest.TestCase):
"""
Testing the XOS Config Module
"""
def tearDown(self):
# NOTE clear the config after each test
Config.clear()
def test_initialize_only_once(self):
"""
[XOS-Config] Raise if initialized twice
"""
with self.assertRaises(Exception) as e:
Config.init(sample_conf)
Config2.init(sample_conf)
self.assertEqual(e.exception.message, "[XOS-Config] Module already initialized")
def test_config_not_initialized(self):
"""
[XOS-Config] Raise if accessing properties without initialization
"""
with self.assertRaises(Exception) as e:
Config.get("database")
self.assertEqual(e.exception.message, "[XOS-Config] Module has not been initialized")
def test_missing_file_exception(self):
"""
[XOS-Config] Raise if file not found
"""
with self.assertRaises(Exception) as e:
Config.init("missing_conf")
self.assertEqual(e.exception.message, "[XOS-Config] Config file not found at: missing_conf")
def test_yaml_not_valid(self):
"""
[XOS-Config] Raise if yaml is not valid
"""
with self.assertRaises(Exception) as e:
Config.init(yaml_not_valid)
self.assertEqual(e.exception.message, "[XOS-Config] The config format is wrong: Unable to load any data from source yaml file")
def test_invalid_format(self):
"""
[XOS-Config] Raise if format is not valid (we expect a dictionary)
"""
with self.assertRaises(Exception) as e:
Config.init(invalid_format)
self.assertEqual(e.exception.message, "[XOS-Config] The config format is wrong: Schema validation failed:\n - Value '['I am', 'a yaml', 'but the', 'format is not', 'correct']' is not a dict. Value path: ''.")
def test_env_override(self):
"""
[XOS-Config] the XOS_CONFIG_FILE environment variable should override the config_file
"""
os.environ["XOS_CONFIG_FILE"] = "env.yaml"
with self.assertRaises(Exception) as e:
Config.init("missing_conf")
self.assertEqual(e.exception.message, "[XOS-Config] Config file not found at: env.yaml")
del os.environ["XOS_CONFIG_FILE"]
def test_schema_override(self):
"""
[XOS-Config] the XOS_CONFIG_SCHEMA environment variable should override the config_schema
"""
os.environ["XOS_CONFIG_SCHEMA"] = "env-schema.yaml"
with self.assertRaises(Exception) as e:
Config.init(basic_conf)
self.assertRegexpMatches(e.exception.message, '\[XOS\-Config\] Config schema not found at: (.+)env-schema\.yaml')
# self.assertEqual(e.exception.message, "[XOS-Config] Config schema not found at: env-schema.yaml")
del os.environ["XOS_CONFIG_SCHEMA"]
def test_schema_override_usage(self):
"""
[XOS-Config] the XOS_CONFIG_SCHEMA should be used to validate a config
"""
os.environ["XOS_CONFIG_SCHEMA"] = small_schema
with self.assertRaises(Exception) as e:
Config.init(basic_conf)
self.assertEqual(e.exception.message, "[XOS-Config] The config format is wrong: Schema validation failed:\n - Key 'database' was not defined. Path: ''.")
del os.environ["XOS_CONFIG_SCHEMA"]
def test_get_cli_param(self):
"""
[XOS-Config] Should read CLI -C param
"""
args = ["-A", "Foo", "-c", "Bar", "-C", "config.yaml"]
res = Config.get_cli_param(args)
self.assertEqual(res, "config.yaml")
def test_get_default_val_for_missing_param(self):
"""
[XOS-Config] Should get the default value if nothing is specified
"""
Config.init(basic_conf)
log = Config.get("logging")
self.assertEqual(log, {
"level": "info",
"channels": ["file", "console"],
"logstash_hostport": "cordloghost:5617",
"file": "/var/log/xos.log",
})
def test_get_config_file(self):
"""
[XOS-Config] Should return the config file in use
"""
Config.init(sample_conf)
res = Config.get_config_file()
self.assertEqual(res, sample_conf)
def test_get_missing_param(self):
"""
[XOS-Config] Should raise reading a missing param
"""
Config.init(sample_conf)
res = Config.get("foo")
self.assertEqual(res, None)
def test_get_first_level(self):
"""
[XOS-Config] Should return a first level param
"""
Config.init(sample_conf)
# NOTE we are using Config2 here to be sure that the configuration is readable from any import,
# not only from the one that has been used to initialize it
res = Config2.get("database")
self.assertEqual(res, {
"name": "xos",
"username": "test",
"password": "safe"
})
def _test_get_child_level(self):
"""
[XOS-Config] Should return a child level param
"""
Config.init(sample_conf)
res = Config.get("nested.parameter.for")
self.assertEqual(res, "testing")
def test_get_service_list(self):
"""
[XOS-Config] Should query registrator and return a list of services
"""
with patch("xosconfig.config.requests.get") as mock_get:
mock_get.return_value.json.return_value = services_list
res = Config.get_service_list()
self.assertEqual(res, [
"xos-ws",
"xos-db",
])
def test_get_service_info(self):
"""
[XOS-Config] Should query registrator and return service info
"""
with patch("xosconfig.config.requests.get") as mock_get:
mock_get.return_value.json.return_value = db_service
info = Config.get_service_info("xos-db")
self.assertEqual(info, {
"name": "xos-db",
"url": "172.18.0.4",
"port": 5432
})
def test_fail_get_service_info(self):
"""
[XOS-Config] Should query registrator and return an exception if it"s down
"""
with patch("xosconfig.config.requests.get") as mock_get:
mock_get.return_value.ok = False
with self.assertRaises(Exception) as e:
Config.get_service_info("missing-service")
self.assertEqual(e.exception.message, "[XOS-Config] Registrator is down")
def test_missing_get_service_info(self):
"""
[XOS-Config] Should query registrator and return an exception if service is not there
"""
with patch("xosconfig.config.requests.get") as mock_get:
mock_get.return_value.json.return_value = []
with self.assertRaises(Exception) as e:
Config.get_service_info("missing-service")
self.assertEqual(e.exception.message, "[XOS-Config] The service missing-service looking for does not exist")
def test_get_service_endpoint(self):
"""
[XOS-Config] Should query registrator and return service endpoint
"""
with patch("xosconfig.config.requests.get") as mock_get:
mock_get.return_value.json.return_value = db_service
endpoint = Config.get_service_endpoint("xos-db")
self.assertEqual(endpoint, "http://172.18.0.4:5432")
if __name__ == '__main__':
unittest.main() |
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import gettext as __
from myapp import app, appbuilder
from myapp.models.log import Log
from myapp.views.base import MyappModelView
from . import LogMixin
class LogModelView(LogMixin, MyappModelView):
datamodel = SQLAInterface(Log)
list_columns = ['user','method','path','duration_ms','dttm']
if (
not app.config.get("FAB_ADD_SECURITY_VIEWS") is False
or app.config.get("MYAPP_LOG_VIEW") is False
):
appbuilder.add_view(
LogModelView,
"Action Log",
label=__("Action Log"),
category="Security",
category_label=__("Security"),
icon="fa-list-ol",
)
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
from Pubnub import Pubnub
import sys
import datetime
import tornado
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or None
ssl_on = len(sys.argv) > 4 and bool(sys.argv[4]) or False
## -----------------------------------------------------------------------
## Initiat Class
## -----------------------------------------------------------------------
pubnub = Pubnub( publish_key, subscribe_key, secret_key, ssl_on )
crazy = ' ~`!@#$%^&*( 顶顅 Ȓ)+=[]\\{}|;\':",./<>?abcd'
## -----------------------------------------------------------------------
## BENCHMARK
## -----------------------------------------------------------------------
def connected() :
pubnub.publish({
'channel' : crazy,
'message' : { 'Info' : 'Connected!' }
})
trips = { 'max' : 0 }
def received(message):
current_trip = str(datetime.datetime.now())[0:19]
if not trips.has_key(current_trip) :
trips[current_trip] = 0
trips[current_trip] = trips[current_trip] + 1
if trips[current_trip] > trips['max'] :
trips['max'] = trips[current_trip]
print(message)
pubnub.publish({
'channel' : crazy,
'message' : current_trip +
" Trip: " +
str(trips[current_trip]) +
" Max Trips: " +
str(trips['max']) +
"/sec"
})
pubnub.subscribe({
'channel' : crazy,
'connect' : connected,
'callback' : received
})
## -----------------------------------------------------------------------
## IO Event Loop
## -----------------------------------------------------------------------
tornado.ioloop.IOLoop.instance().start()
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.fields.simple import TextAreaField
from wtforms.validators import InputRequired, Email, Length
class UserForm(FlaskForm):
"""creates a form for createing / editing"""
email = StringField("Email",validators=[InputRequired(message="Email is required"), Email(message="Must be a valid email") ])
first_name = StringField("First Name", validators = [InputRequired(), Length(max = 30) ] )
last_name = StringField("Last Name", validators = [InputRequired(), Length(max = 30) ] )
username = StringField("User Name",validators=[InputRequired()])
password= PasswordField("Password",validators=[InputRequired()])
class UserLogin(FlaskForm):
username = StringField("User Name",validators=[InputRequired()])
password= PasswordField("Password",validators=[InputRequired()])
class FeedBackForm(FlaskForm):
title = StringField("Title", validators=[InputRequired()])
content = TextAreaField("Content", validators=[InputRequired()]) |
#!/usr/bin/env python2
## -*- coding: utf-8 -*-
import json
import os,sys
import requests
import time
import multiprocessing.dummy
debug = False
def jsondump(item):
return json.dumps(item, sort_keys=True,indent=4).decode('unicode_escape').encode('utf-8')
class AnsibleAPI:
def __init__(self,host,user,passwd,ssl=True,verify=False):
self.host = host
self.user = user
self.__passwd = passwd
self.ssl = ssl
self.verify = verify
class Error(Exception):
def __init__(self, status,content,url):
self.status = status
self.content = content
self.url = url
def __str__(self):
return repr("%d : '%s' url='%s'"%(self.status,self.content,self.url))
def __returnHandle(self,status,content,url,warning=()):
if status == 200:
return json.loads(content)
elif status in warning or warning == 'all':
print >> sys.stderr, self.Error(status,content,url)
else:
raise self.Error(status,content,url)
def request(self,method, url ,data = None,warning=()):
fullurl = "http"
if self.ssl:
fullurl += 's'
fullurl += "://%s%s"%(self.host,url)
if "?" in fullurl:
fullurl += "&format=json"
else:
fullurl += "?format=json"
r = requests.request(method,fullurl,auth=(self.user,self.__passwd),verify = self.verify, data = data)
return self.__returnHandle(r.status_code,r.content,fullurl,warning)
def gethosts(self):
result = self.request("GET","/api/v1/hosts/")
count = result['count']
hosts = result['results']
while result['next']:
result = self.request("GET",result['next'])
hosts.extend(result['results'])
if len(hosts) != count:
print >> sys.stderr, 'count = %d but actually count is %d'%(count,len(hosts))
self.hosts = hosts
return hosts
def getfactsbyhostid(self,hostid):
def getfactsin(hostid,events):
warning = 'all'
result = self.request("GET","/api/v1/hosts/%d/%s/?page=last"%(hostid,events),warning=warning)
if not result:
return None
jobs = result.get('results')
for job in jobs:
if job['event_data']['res'].has_key('ansible_facts'):
return job['event_data']['res']['ansible_facts']
while result['previous']:
result = self.request("GET",result['previous'],warning=warning)
jobs = result['results']
for job in jobs:
if job['event_data']['res'].has_key('ansible_facts'):
return job['event_data']['res']['ansible_facts']
return None
job_fact = getfactsin(hostid,'job_events')
command_fact = getfactsin(hostid,'ad_hoc_command_events')
return job_fact or command_fact
def getfacts(self):
pool = multiprocessing.dummy.Pool(50)
results = pool.map(self.getfactsbyhostid,[host['id'] for host in self.hosts])
facts = dict(zip([host['name'] for host in self.hosts],results))
for host in facts.keys():
if not facts[host]:
facts.pop(host)
self.facts = facts
return facts
def main():
api = AnsibleAPI('10.214.129.160','cmdb','cmdb1234')
api.gethosts()
print time.ctime()
api.getfacts()
print time.ctime()
print len(api.hosts)
print len(api.facts)
print api.facts.keys()
return api
if __name__ == "__main__":
# url = 'https://10.214.129.160/api/v1/hosts/?format=json'
main()
pass
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from django_extensions.db.fields import CreationDateTimeField, AutoSlugField
class Proposal(models.Model):
CODIGOS_E_LINGUAGENS = 'codigos-e-linguagens'
CIENCIAS_DA_NATUREZA_E_SUAS_TECNOLOGIAS = \
'ciencias-da-natureza-e-suas-tecnologias'
ELETRONICA = 'eletronica'
GESTAO_E_NEGOCIOS = 'gestao-e-negocios'
INFORMATICA = 'informatica'
MARKETING = 'marketing'
EDUCACAO = 'educacao'
AREA_CHOICES = (
(CODIGOS_E_LINGUAGENS, _('Códigos e Linguagenns')),
(CIENCIAS_DA_NATUREZA_E_SUAS_TECNOLOGIAS,
_('Ciências da Natureza e Suas Tecnologias')),
(ELETRONICA, _('Eletrônica')),
(GESTAO_E_NEGOCIOS, _('Gestão e Negócios')),
(INFORMATICA, _('Informática')),
(MARKETING, _('Marketing')),
(EDUCACAO, _('Educação')),
)
title = models.CharField(_('Title'), max_length=300)
slug = AutoSlugField(
populate_from='title', overwrite=True,
max_length=340, unique=True, db_index=True)
brief = models.TextField(
_('Brief'), max_length=5000, blank=True,
help_text=_('Max of 300 words.'))
created_at = CreationDateTimeField(_(u'Created At'))
area = models.CharField(
_('Area'), choices=AREA_CHOICES, max_length=100)
document = models.FileField(_('Document'), null=True, blank=True)
# relations
activity = models.ForeignKey(
to='activity.Activity',
on_delete=models.CASCADE,
related_name='proposals')
created_by = models.ForeignKey(
to='core.Profile',
on_delete=models.CASCADE,
related_name='proposals')
# SIPEX - Minicursos
(DUAS_HORAS, QUATRO_HORAS, SEIS_HORAS) = range(3)
CARGA_HORARIA_CHOICES = (
(DUAS_HORAS, _('Duas Horas')),
(QUATRO_HORAS, _('Quatro Horas')),
(SEIS_HORAS, _('Seis Horas')),
)
(DUAS_HORAS, QUATRO_HORAS, SEIS_HORAS) = range(3)
PUBLICO_CHOICES = (
(DUAS_HORAS, _('Duas Horas')),
(QUATRO_HORAS, _('Quatro Horas')),
(SEIS_HORAS, _('Seis Horas')),
)
carga_horaria = models.IntegerField(
_('Carga horária'), choices=CARGA_HORARIA_CHOICES, default=DUAS_HORAS,
null=True, blank=True)
quantidade_de_vagas = models.IntegerField(
_('Quantidade de Vagas'), null=True, blank=False)
institution = models.CharField(
_('Institution'), max_length=200, null=True, blank=False)
ementa = models.TextField(
_('Ementa'), max_length=1000, blank=False)
objetivos = models.TextField(
_('Objetivos'), max_length=1000, blank=False)
publico = models.CharField(
_('Tipo de Público'), max_length=300, null=True, blank=False)
materiais = models.TextField(
_('Materiais necessários'), max_length=1000, blank=False)
justificativa = models.TextField(
_('Justificativa'), max_length=1000, blank=False)
pre_requisitos = models.TextField(
_('Pré-requisitos'), max_length=2000,
null=True, blank=False)
# Concurso Fotografico
camera = models.CharField(
_('Câmera utilizada na captação da imagem'), max_length=200,
null=True, blank=False)
ferramenta = models.CharField(
_('Programa utilizado no tratamento da imagem'), max_length=200,
null=True, blank=False)
class Meta:
verbose_name = _('Proposal')
verbose_name_plural = _('Proposals')
ordering = ('-created_at',)
def __unicode__(self):
return u'{0.title}'.format(self)
def get_absolute_url(self):
return reverse('proposal:detail', kwargs={
'activity_slug': self.activity.slug,
'slug': self.slug
})
def get_update_url(self):
return reverse('proposal:update', kwargs={
'activity_slug': self.activity.slug,
'slug': self.slug
})
def get_delete_url(self):
return reverse('proposal:delete', kwargs={
'activity_slug': self.activity.slug,
'slug': self.slug
})
def get_authors_email(self):
return self.authors.values('email', flat=True)
def send_submited_proposal_email(self):
context = {
'proposal': self,
'activity': self.activity,
}
message = render_to_string(
'mailing/submited_proposal.txt', context)
html_message = render_to_string(
'mailing/submited_proposal.html', context)
subject = _(u'Submited Proposal to the "{}"!').format(
self.activity.title)
recipients = [
settings.EMAIL_168HORAS,
self.created_by.user.email,
self.activity.created_by.user.email
]
send_mail(
subject=subject, message=message, html_message=html_message,
from_email=settings.NO_REPLY_EMAIL, recipient_list=recipients
)
class Author(models.Model):
name = models.CharField(_('Name'), max_length=300)
email = models.EmailField(_('Email'), max_length=300)
phone = models.CharField(
_('Phone'), max_length=30, null=True, blank=True)
# relations
proposal = models.ForeignKey(
to='proposal.Proposal',
on_delete=models.CASCADE,
related_name='authors'
)
class Meta:
verbose_name = _('Author')
verbose_name_plural = _('Author')
def send_submited_proposal_email(self):
context = {
'proposal': self.proposal,
'activity': self.proposal.activity,
}
message = render_to_string(
'mailing/submited_proposal.txt', context)
html_message = render_to_string(
'mailing/submited_proposal.html', context)
subject = _(u'Submited Proposal to the "{}"!').format(
self.proposal.activity.title)
recipients = [
settings.EMAIL_168HORAS,
self.email,
]
send_mail(
subject=subject, message=message, html_message=html_message,
from_email=settings.NO_REPLY_EMAIL, recipient_list=recipients
)
class Images(models.Model):
file = models.FileField(_('File'), null=True, blank=True)
# relations
proposal = models.ForeignKey(
to='proposal.Proposal', related_name='images',
on_delete=models.CASCADE)
class Meta:
verbose_name = "Images"
verbose_name_plural = "Imagess"
def send_proposal_submited_email(sender, instance, created, **kwargs):
if not created:
return
instance.send_submited_proposal_email()
post_save.connect(send_proposal_submited_email, sender=Proposal)
post_save.connect(send_proposal_submited_email, sender=Author)
|
def nds(xs):
p = 0
q = len(xs) - 1
for i, _ in enumerate(xs):
if i == q:
return (-1, -1)
if xs[i] > xs[i+1]:
p = i
break
for i, _ in enumerate(xs[p+1:], p+1):
if xs[i-1] < xs[i]:
q = i - 1
break
return p, q
def is_sorted(xs):
for i in range(1, len(xs)):
if xs[i-1] > xs[i]:
return False
return True
def flip(xs, k):
for i in range((k + 1) // 2):
xs[i], xs[k-i] = xs[k-i], xs[i]
# xs = [1, 2, 5, 4, 3, 6, ...]
# => [1, 2, (5, 4, 3), 6, ...] identify nds
# => [(3, 4, 5), 2, 1, 6, ...] flip at nds end
# => [(5, 4, 3), 2, 1, 6, ...] flip at nds end-start
# => [1, 2, (3, 4, 5), 6, ...]
def sort(xs):
while not is_sorted(xs):
start, end = nds(xs)
flip(xs, end)
flip(xs, end - start)
flip(xs, end)
assert is_sorted(xs[start:end+1])
return xs
# rsort(xs, pre)
# try to sort longer and longer prefixes of the
# array:
# - xs = A[1..n]
# - pre= A[1..m] where m <= n
#
def rsort(xs, pre=[]):
if len(xs) == len(pre):
return xs
L = len(pre)
m = min(xs[L:])
if xs[:L + 1] != pre + [m]:
i = xs.index(m)
flip(xs, i)
if pre:
flip(xs, i - L)
flip(xs, i)
return rsort(xs, pre + [m])
if __name__ == '__main__':
from itertools import permutations
for k in range(1, 6):
for xs in permutations(range(k)):
xs = list(xs)
xd = xs[:]
assert is_sorted(rsort(xs))
assert is_sorted(sort(xd))
|
from typing import Dict, List, Optional, Union
import numpy as np
import gdsfactory as gf
from gdsfactory.components.via_corner import via_corner
from gdsfactory.cross_section import strip
from gdsfactory.port import Port
from gdsfactory.routing.manhattan import round_corners
from gdsfactory.types import (
ComponentSpec,
CrossSectionSpec,
MultiCrossSectionAngleSpec,
Route,
)
def get_route_from_steps(
port1: Port,
port2: Port,
steps: Optional[List[Dict[str, float]]] = None,
bend: ComponentSpec = "bend_euler",
taper: Optional[ComponentSpec] = "taper",
cross_section: Union[CrossSectionSpec, MultiCrossSectionAngleSpec] = strip,
**kwargs
) -> Route:
"""Returns a route formed by the given waypoints steps.
Uses smooth euler bends instead of corners and optionally tapers in straight sections.
Tapering to wider straights reduces the optical loss when auto_widen=True.
`get_route_from_steps` is a manual version of `get_route`
and a more concise and convenient version of `get_route_from_waypoints`
Args:
port1: start port.
port2: end port.
steps: changes that define the route [{'dx': 5}, {'dy': 10}].
bend: function that returns bends.
straight: straight spec.
taper: taper spec.
cross_section: cross_section spec.
kwargs: cross_section settings.
.. plot::
:include-source:
import gdsfactory as gf
c = gf.Component("get_route_from_steps_sample")
w = gf.components.straight()
left = c << w
right = c << w
right.move((100, 80))
obstacle = gf.components.rectangle(size=(100, 10), port_type=None)
obstacle1 = c << obstacle
obstacle2 = c << obstacle
obstacle1.ymin = 40
obstacle2.xmin = 25
p1 = left.ports['o2']
p2 = right.ports['o2']
route = gf.routing.get_route_from_steps(
port1=p1,
port2=p2,
steps=[
{"x": 20},
{"y": 20},
{"x": 120},
{"y": 80},
],
)
c.add(route.references)
c.plot()
c.show(show_ports=True)
"""
x, y = port1.center
x2, y2 = port2.center
waypoints = [(x, y)]
steps = steps or []
for d in steps:
x = d["x"] if "x" in d else x
x += d.get("dx", 0)
y = d["y"] if "y" in d else y
y += d.get("dy", 0)
waypoints += [(x, y)]
waypoints += [(x2, y2)]
waypoints = np.array(waypoints)
if not isinstance(cross_section, list):
x = gf.get_cross_section(cross_section, **kwargs)
auto_widen = x.auto_widen
if auto_widen:
taper = gf.get_component(
taper,
length=x.taper_length,
width1=x.width,
width2=x.width_wide,
cross_section=cross_section,
**kwargs,
)
else:
taper = None
else:
taper = None
return round_corners(
points=waypoints,
bend=bend,
taper=taper,
cross_section=cross_section,
**kwargs,
)
get_route_from_steps_electrical = gf.partial(
get_route_from_steps, bend="wire_corner", taper=None, cross_section="metal3"
)
get_route_from_steps_electrical_multilayer = gf.partial(
get_route_from_steps,
bend=via_corner,
taper=None,
cross_section=[
(gf.cross_section.metal2, (90, 270)),
(gf.cross_section.metal3, (0, 180)),
],
)
@gf.cell
def test_route_from_steps() -> gf.Component:
c = gf.Component("get_route_from_steps_sample")
w = gf.components.straight()
left = c << w
right = c << w
right.move((100, 80))
obstacle = gf.components.rectangle(size=(100, 10))
obstacle1 = c << obstacle
obstacle2 = c << obstacle
obstacle1.ymin = 40
obstacle2.xmin = 25
p1 = left.ports["o2"]
p2 = right.ports["o2"]
route = get_route_from_steps(
port1=p1,
port2=p2,
steps=[
{"x": 20, "y": 0},
{"x": 20, "y": 20},
{"x": 120, "y": 20},
{"x": 120, "y": 80},
],
)
length = 186.548
assert route.length == length, route.length
route = gf.routing.get_route_from_steps(
port1=p1,
port2=p2,
steps=[
{"x": 20},
{"y": 20},
{"x": 120},
{"y": 80},
],
layer=(2, 0),
)
c.add(route.references)
assert route.length == length, route.length
return c
if __name__ == "__main__":
# c = test_route_from_steps()
# c = gf.Component("get_route_from_steps_sample")
# w = gf.components.straight()
# left = c << w
# right = c << w
# right.move((100, 80))
# p1 = left.ports["o2"]
# p2 = right.ports["o2"]
# route = get_route_from_steps(
# port1=p2,
# port2=p1,
# steps=[
# {"x": 20, "y": 0},
# {"x": 20, "y": 20},
# {"x": 120, "y": 20},
# {"x": 120, "y": 80},
# ],
# )
# c.add(route.references)
# c.show(show_ports=True)
c = gf.Component("pads_route_from_steps")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((100, 200))
route = gf.routing.get_route_from_steps_electrical(
pb.ports["e11"],
pt.ports["e11"],
steps=[
{"y": 200},
],
# cross_section=gf.cross_section.metal3,
# bend=gf.components.wire_corner,
)
c.add(route.references)
c.show(show_ports=True)
|
'''
Created on 22.10.2018
@author: Jarkko
'''
from selenium import webdriver
from Framehandling import cromedriverpath
cromedriverpath = "C:\Python3.7\chromedriver.exe"
driver=webdriver.Chrome(cromedriverpath)
driver.get("https://google.fi")
co=driver.get_cookies()
print(len(co))
driver.delete_all_cookies()
col=driver.get_cookies()
print(len(col))
|
#!/usr/bin/python3
def remove_char_at(str, n):
if n < 0:
return str
return (str[0:n] + str[n + 1:])
|
# Generated by Django 2.2.16 on 2020-09-21 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aids", "0108_auto_20200615_1055"),
]
operations = [
migrations.AddField(
model_name="aid",
name="in_france_relance",
field=models.BooleanField(
default=False,
help_text="Is this aid a part of the France Relance program?",
verbose_name="France Relance?",
),
),
]
|
from .pluginmanager import PluginManager
from .plugin import Plugin
from .exceptions import PluginException
_FORGOT_PARENS="""RegisterPlugin decorator called incorrectly.
Looks like you forgot to put parens after the decorator, make sure the line looks like this: @RegisterPlugin()"""
_POS_ARGS="""RegisterPlugin decorator called incorrectly.
Perhaps you gave it positional arguments which is illegal?"""
class RegisterPlugin:
"""Class that acts as a decorator which registers plugins"""
def __init__(self, *args, label: str = None, **kwargs) -> None:
if len(args) > 0:
if type(args[0]) == type:
raise PluginException(_FORGOT_PARENS)
else:
raise PluginException(_POS_ARGS)
self.label = label
self.kwargs = kwargs
def __call__(self, plugin_class: Plugin):
pm = PluginManager()
pm.register_plugin(plugin_class, label=self.label, **self.kwargs)
return plugin_class
|
import numpy as np
import pandas as pd
import argparse
import tensorflow as tf
import re
import jieba
from utils.config import root,vocab_path
import os
from sklearn.model_selection import train_test_split
from utils.multi_proc_utils import parallelize
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer,LabelBinarizer
def load_stop_words(stopwords_path):
stopwords = []
with open(stopwords_path, 'r',encoding='utf-8') as f:
for word in f.readlines():
word = word.strip()
stopwords.append(word)
return stopwords
stopwords_path = 'data/stopwords/哈工大停用词表.txt'
stop_words = load_stop_words(stopwords_path)
def clean_sentence(line):
'''清理sentence'''
line = re.sub(
"[a-zA-Z0-9]|[\s+\-\|\!\/\[\]\{\}_,.$%^*(+\"\')]+|[::+——()?【】《》“”!,。?、~@#¥%……&*()]+|题目", '', line)
words = jieba.cut(line, cut_all=False)
return words
def sentence_proc(sentence):
cleaned_sentence = clean_sentence(sentence)
remove_stopwords = [word for word in cleaned_sentence if word not in stop_words]
return " ".join(remove_stopwords)
def proc(df):
df['content'] = df['content'].apply(lambda x: sentence_proc(x))
return df
def build_data(params):
if os.path.exists(os.path.join(root, 'data', 'X_train.npy')):
X_train = np.load(os.path.join(root, 'data', 'X_train.npy'))
X_test = np.load(os.path.join(root, 'data', 'X_test.npy'))
y_train = np.load(os.path.join(root, 'data', 'y_train.npy'))
y_test = np.load(os.path.join(root, 'data', 'y_test.npy'))
return X_train, X_test, y_train, y_test
data = pd.read_csv(params['data_path'],header = None).rename(columns={0: 'label', 1: 'content'})
processed_data = parallelize(data, proc)
#word2index
text_preprocesser = Tokenizer(num_words=params['vocab_size'], oov_token="<UNK>")
text_preprocesser.fit_on_texts(processed_data['content'])
#save vocab
word_dict = text_preprocesser.word_index
with open(params['vocab_path'], 'w', encoding='utf-8') as f:
for k, v in word_dict.items():
f.write(f'{k}\t{str(v)}\n')
x = text_preprocesser.texts_to_sequences(processed_data['content'])
# padding
x = pad_sequences(x, maxlen=params['padding_size'], padding='post', truncating='post')
# 划分标签
if params['train_mode'] == "multi_label":
processed_data['label'] = processed_data['label'].apply(lambda x: x.split())
# 多标签编码
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(processed_data['label'])
# 数据集划分
elif params['train_mode'] == "multi_class":
processed_data['subject'] = processed_data['label'].apply(lambda x: x.split()[1])
print("class category: ", set(processed_data['subject']))
lb=LabelBinarizer()
y = lb.fit_transform(processed_data['subject'])
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# 保存数据
np.save(os.path.join(root,'data','X_train.npy'),X_train)
np.save(os.path.join(root, 'data', 'X_test.npy'), X_test)
np.save(os.path.join(root, 'data', 'y_train.npy'), y_train)
np.save(os.path.join(root, 'data', 'y_test.npy'), y_test)
return X_train, X_test, y_train, y_test, mlb, word_dict # vocab
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This is the data processing ')
parser.add_argument('-d', '--data_path', default='data/baidu_95.csv', type=str,
help='data path')
parser.add_argument('-v', '--vocab_save_dir', default='data/', type=str,
help='data path')
parser.add_argument('-vocab_size', default=50000, type=int, help='Limit vocab size.(default=50000)')
parser.add_argument('-p', '--padding_size', default=300, type=int, help='Padding size of sentences.(default=128)')
parser.add_argument('-train_mode', default='multi_label', type=str, help='multi-class or multi-label')
#parser.add_argument('-BUFFER_SIZE', default='3000', type=str, help='multi-class or multi-label')
params = parser.parse_args()
print('Parameters:', params)
# 处理生成数据
x_train,x_test, y_train,y_test,_,vocab = build_data(params)
print(f"vocab size : {len(vocab)} ")
|
import unittest
import datetime
from sno import Sno, epoch
TestSnos = [
# inspired by https://github.com/rs/xid/blob/master/id_test.go
{
'sno': Sno([0x2b, 0x44, 0x5f, 0x68, 0x34, 0x86, 0xe4, 0x28, 0x2d, 0xc9]),
'ts': datetime.datetime(2021, 10, 11, 15, 16, 34, 24000),
'tick': False,
'metabyte': 0x86,
'partition': 0xe428,
'counter': 0x2dc9
},
{
'sno': Sno([0x2b, 0x44, 0x5f, 0x68, 0x35, 0x86, 0xe4, 0x28, 0x2d, 0xc9]),
'ts': datetime.datetime(2021, 10, 11, 15, 16, 34, 24000),
'tick': True,
'metabyte': 0x86,
'partition': 0xe428,
'counter': 0x2dc9
},
{
'sno': Sno([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]),
'ts': epoch,
'tick': False,
'metabyte': 0x00,
'partition': 0x0000,
'counter': 0
},
{
'sno': Sno([0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xdd, 0xee, 0x00, 0x01]),
'ts': epoch,
'tick': False,
'metabyte': 0xcc,
'partition': 0xddee,
'counter': 1
},
{
'sno': Sno([0x00, 0x00, 0x00, 0x00, 0x01, 0xcc, 0xdd, 0xee, 0x00, 0x01]),
'ts': epoch,
'tick': True,
'metabyte': 0xcc,
'partition': 0xddee,
'counter': 1
}
]
class TestSno(unittest.TestCase):
def test_no_duplicates(self):
collect = []
for i in range(0, 1000):
collect.append(Sno.new())
ids = [i.to_string() for i in collect]
self.assertEqual(len(set(ids)), 1000)
def test_from_string(self):
x = Sno.new()
y = Sno.from_string(x.to_string())
self.assertEqual(x.value, y.value)
self.assertEqual(x.to_bytes(), y.to_bytes())
self.assertEqual(x.to_string(), y.to_string())
def test_sno_always_reversible(self):
for i in range(1000):
s = Sno.new().to_string()
self.assertEqual(Sno.from_string(s).to_string(), s)
def test_datetime(self):
for x in TestSnos:
self.assertEqual(x['sno'].datetime, x['ts'])
def test_tick(self):
for x in TestSnos:
self.assertEqual(x['sno'].tick, x['tick'])
def test_metabyte(self):
for x in TestSnos:
self.assertEqual(x['sno'].metabyte, x['metabyte'])
def test_partition(self):
for x in TestSnos:
self.assertEqual(x['sno'].partition, x['partition'])
def test_counter(self):
for x in TestSnos:
self.assertEqual(x['sno'].counter, x['counter'])
def test_copy_array_from_golang(self):
x = Sno([78, 111, 33, 96, 160, 255, 154, 10, 16, 51])
self.assertEqual('brpk4q72xwf2m63l', x.to_string())
def test_copy_string_from_golang(self):
x = Sno.from_string('brpk4q72xwf2m63l')
self.assertEqual(x.value, [78, 111, 33, 96, 160, 255, 154, 10, 16, 51])
def test_thread_safety(self):
# TODO: this does not test that it's thread safe?
import threading
threads = []
def worker():
for i in range(10):
threading.current_thread().ident, Sno.new().to_string()
for i in range(10):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2 on 2020-05-27 01:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Fabricas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='VEHICULO',
fields=[
('id_Vehiculo', models.AutoField(primary_key=True, serialize=False)),
('Marca', models.CharField(max_length=200)),
('Linea', models.CharField(max_length=200)),
('Anio', models.CharField(max_length=200)),
('Codigo_Universal', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='REPUESTOS',
fields=[
('Codigo_Repuesto', models.AutoField(primary_key=True, serialize=False)),
('Modelo_De_Parte', models.IntegerField(default=0)),
('Nombre', models.CharField(max_length=200)),
('Descripcion', models.CharField(default=0, max_length=200)),
('Stock', models.IntegerField(default=0)),
('Precio_Fabricante', models.FloatField(default=0.0)),
('Precio_Venta', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),
('Fabrica', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='Fabricas.FABRICA')),
('Vehiculo_Compatible', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='Repuestos.VEHICULO')),
],
),
]
|
import os
from botocore.vendored import requests
def handler(event, context):
try:
requests.get('https://' + os.environ['PrimaryUrl'] + "/ting")
except Exception as e:
print("Received an error, not retrying")
print(e)
|
from abc import ABC, abstractmethod
from .section import SectionAttacher
class BaseElement(ABC):
def __init__(self):
pass
class Element(BaseElement):
"""
All html components will be a child of Element
Different element will implement different Attribute classes
Element is used to control distribute dffierent attributes
NEED A WAY TO CHECK FOR INVALID ATTRIBUTES
"""
def __init__(self, *args, id: str = None, **kwargs):
# *args will fall through all child args, **kwargs, will fall thru all child kwargs
# zig should keep a log of all id, and check id during registration
# NEED TO SANITIZE ALL USER INPUTS
self.id = id
class DomElement(Element, SectionAttacher):
"""
All html components will be a child of HtmlElement
Different element will implement different Attribute classes
"""
def __init__(self, *args, **kwargs):
super().__init__(self, **kwargs)
SectionAttacher.__init__(self, **kwargs)
class HtmlElement(Element, SectionAttacher):
"""
All html components will be a child of HtmlElement
Different element will implement different Attribute classes
"""
def __init__(self, *args, **kwargs):
super().__init__(self, **kwargs)
SectionAttacher.__init__(self, **kwargs)
|
from django.db import models
from django.utils import timezone
class Post(models.Model) :
author = models.ForeignKey('auth.User', on_delete = models.CASCADE)
title = models.CharField(max_length = 200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
published_date = models.DateTimeField(blank = True, null = True)
def publish(self) :
self.published_date = timezone.now()
self.save()
def __str__(self) :
return self.title
class Category(models.Model) :
'''
Category : table format category of this board
* board_type : kind of board
* board_admin : administrator of this board
* board_url : url of this board
'''
board_type = models.CharField(null = False, blank = False, max_length = 20)
board_admin = models.CharField(null = False, blank = False, max_length = 20)
board_url = models.CharField(null = False, blank = False, max_length = 200)
class Board(models.Model) :
'''
Board - table for cashing board
* board_number : number of board
* title : title of post
* url : link to post
* category : category of this board
* admin_post : administrator of this post
* description : descripttion of post
* updated_at : updated date
'''
board_number = models.IntegerField(null = False, blank = False)
title = models.CharField(blank = False, null = False, max_length = 100)
url = models.CharField(blank = False, null = False, max_length = 200)
category = models.CharField(blank = False, null = False, max_length = 20)
admin_post = models.CharField(null = False, blank = False, max_length = 20)
description = models.TextField(null = False, blank = False)
updated_at = models.DateTimeField(auto_now_add = True) |
from copy import deepcopy
from .metaclasses.struct import StructMeta
__all__ = ['Struct']
class Struct(metaclass=StructMeta):
"""Serialisable object with individual fields"""
def __init__(self):
self._attribute_container.register_storage_interfaces()
def __deepcopy__(self, memo):
"""Serialiser description of tuple
:returns: new struct instance
"""
new_struct = self.__class__()
source_container = self._attribute_container
target_container = new_struct._attribute_container
# Local lookups
old_attribute_container_data = source_container.data
new_attribute_container_data = target_container.data
get_new_member = target_container.get_member_by_name
for name, member in source_container._ordered_mapping.items():
old_value = old_attribute_container_data[member]
new_member = get_new_member(name)
new_attribute_container_data[new_member] = deepcopy(old_value)
return new_struct
def __description__(self):
"""Serialiser description of tuple"""
return hash(self._attribute_container.get_ordered_descriptions())
def __repr__(self):
class_name = self.__class__.__name__
attributes = self._attribute_container.data
associated_values = "".join(["\n {} = {}".format(k, v) for k, v in attributes.items()])
return "<Struct {}>{}".format(class_name, associated_values)
@classmethod
def from_bytes(cls, bytes_string, offset=0):
"""Create a struct from bytes
:param bytes_string: Packed byte representation of struct contents
:returns: Struct instance
"""
struct = cls()
struct.read_bytes(bytes_string, offset)
return struct
@classmethod
def from_list(cls, list_):
"""Create a struct from a list
:param list_: List representation of struct contents
:returns: Struct instance
"""
struct = cls()
struct.read_list(list_)
return struct
def read_bytes(self, bytes_string, offset=0):
"""Update struct contents with bytes
:param bytes_string: Packed byte representation of struct contents
:param offset: offset to start reading from
"""
replicable_data = self._attribute_container.data
get_attribute = self._attribute_container.get_member_by_name
# Process and store new values
for attribute_name, value in self._serialiser.unpack(bytes_string, previous_values=replicable_data,
offset=offset):
attribute = get_attribute(attribute_name)
# Store new value
replicable_data[attribute] = value
def read_list(self, list_):
"""Update struct contents with a list
:param list_: List representation of struct contents
"""
data = self._attribute_container.data
members = self._attribute_container._ordered_mapping.values()
for member, value in zip(members, list_):
data[member] = value
def to_bytes(self):
"""Write struct contents to bytes
:returns: packed contents
"""
return self._serialiser.pack({a.name: v for a, v in self._attribute_container.data.items()})
def to_list(self):
"""Write struct contents to a list
:returns: contents tuple
"""
attribute_data = self._attribute_container.data
attributes = self._attribute_container._ordered_mapping.values()
return [attribute_data[attribute] for attribute in attributes]
def __iter__(self):
return iter(self.to_list())
__bytes__ = to_bytes |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Class called Tiles, should set and contain the values of the letter tile
class Tiles:
# initialise the class
def __init__(self):
# using a list for each of the letter groups
self.value_one = ["A", "E", "I", "O", "L", "N", "R", "S", "T"]
self.value_two = ["D", "G"]
self.value_three = ["B", "C", "M", "P"]
self.value_four = ["F", "H", "V", "W", "Y"]
self.value_five = ["K"]
self.value_eight = ["J", "X"]
self.value_ten = ["Q", "Z"]
# function to find the score of a word
def score(self):
# choose_word() function to assign user input to the word variable
word = self.choose_word()
# capitalise the word so can compare easily to list and output nicer
word = word.upper()
# initialise the score variable so can add to
score = 0
# for loop to loop through the word
for letter in word:
# if-elif-else statements to add to the score
if letter in self.value_one:
score += 1 # add to the running score
print(f"{letter}: 1") # display the value of the letter
elif letter in self.value_two:
score += 2
print(f"{letter}: 2")
elif letter in self.value_three:
score += 3
print(f"{letter}: 3")
elif letter in self.value_four:
score += 4
print(f"{letter}: 4")
elif letter in self.value_five:
score += 5
print(f"{letter}: 5")
elif letter in self.value_eight:
score += 8
print(f"{letter}: 8")
elif letter in self.value_ten:
score += 10
print(f"{letter}: 10")
else:
print("Not a letter")
print(f"{word} : {score}")
# added a small function to help the user input a word of choice
def choose_word(self):
word = str(input("Please enter a word: "))
return word
word = Tiles() # initialising the object
# word.score("bcmp")
word.score() # small test
|
import simpy
import os
import random
import pandas as pd
import numpy as np
import networkx as nx
from collections import OrderedDict, namedtuple
save_path = './result'
if not os.path.exists(save_path):
os.makedirs(save_path)
class Resource(object):
def __init__(self, env, model, monitor, tp_info=None, wf_info=None, delay_time=None, network=None):
self.env = env
self.model = model
self.monitor = monitor
self.delay_time = delay_time
self.network = network
# resource 할당
self.tp_store = simpy.Store(env)
self.wf_store = simpy.FilterStore(env)
# resource 위치 파악
self.tp_location = {}
self.wf_location = {}
transporter = namedtuple("Transporter", "name, capa, v_loaded, v_unloaded")
workforce = namedtuple("Workforce", "name, skill")
if tp_info is not None:
for name in tp_info.keys():
self.tp_location[name] = []
self.tp_store.put(transporter(name, tp_info[name]["capa"], tp_info[name]["v_loaded"], tp_info[name]["v_unloaded"]))
# No resource is in resource store -> machine hv to wait
self.tp_waiting = OrderedDict()
if wf_info is not None:
for name in wf_info.keys():
self.wf_location[name] = []
self.wf_store.put(workforce(name, wf_info[name]["skill"]))
# No resource is in resource store -> machine hv to wait
self.wf_waiting = OrderedDict()
def request_tp(self, current_process):
tp, waiting = False, False
if len(self.tp_store.items) > 0: # 만약 tp_store에 남아있는 transporter가 있는 경우
tp = yield self.tp_store.get()
self.monitor.record(self.env.now, None, None, part_id=None, event="tp_going_to_requesting_process",
resource=tp.name)
return tp, waiting
else: # transporter가 전부 다른 공정에 가 있을 경우
tp_location_list = []
for name in self.tp_location.keys():
if not self.tp_location[name]:
continue
tp_current_location = self.tp_location[name][-1]
if len(self.model[tp_current_location].tp_store.items) > 0: # 해당 공정에 놀고 있는 tp가 있다면
tp_location_list.append(self.tp_location[name][-1])
if len(tp_location_list) == 0: # 유휴 tp가 없는 경우
waiting = True
return tp, waiting
else: # 유휴 tp가 있어 part을 실을 수 있는 경우
# tp를 호출한 공정 ~ 유휴 tp 사이의 거리 중 가장 짧은 데에서 호출
distance = []
for location in tp_location_list:
called_distance = self.network[location][current_process]
distance.append(called_distance)
# distance = list(map(lambda i: self.network.get_shortest_path_distance(tp_location_list[i], current_process), tp_location_list))
location_idx = distance.index(min(distance))
location = tp_location_list[location_idx]
tp = yield self.model[location].tp_store.get()
self.monitor.record(self.env.now, None, None, part_id=None, event="tp_going_to_requesting_process", resource=tp.name)
yield self.env.timeout(distance[location_idx]/tp.v_unloaded) # 현재 위치까지 오는 시간
return tp, waiting
# def delaying(self):
# yield self.env.timeout(self.delay_time)
# return
class Part(object):
def __init__(self, name, data):
# 해당 Part의 이름
self.id = name
# 작업 시간 정보
self.data = data
# 작업을 완료한 공정의 수
self.step = 0
class Source(object):
def __init__(self, env, parts, model, monitor):
self.env = env
self.name = 'Source'
self.parts = parts ## Part 클래스로 모델링 된 Part들이 list 형태로 저장
self.model = model
self.monitor = monitor
self.action = env.process(self.run())
def run(self):
while True:
part = self.parts.pop(0)
IAT = part.data[(0, 'start_time')] - self.env.now
if IAT > 0:
yield self.env.timeout(part.data[(0, 'start_time')] - self.env.now)
# record: part_created
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="part_created")
# print(part.id, 'is created at ', self.env.now)
# next process
next_process = part.data[(part.step, 'process')]
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="part_transferred_to_first_process")
self.model[next_process].buffer_to_machine.put(part)
# print(part.id, 'is transferred to ', next_process, 'at ', self.env.now)
if len(self.parts) == 0:
print("all parts are sent at : ", self.env.now)
break
class Process(object):
def __init__(self, env, name, machine_num, model, monitor, resource=None, network=None,process_time=None, capacity=float('inf'),
routing_logic='cyclic', priority=None, capa_to_machine=float('inf'), capa_to_process=float('inf'),
MTTR=None, MTTF=None, initial_broken_delay=None, delay_time=None, workforce=None, transporter=False):
# input data
self.env = env
self.name = name
self.model = model
self.monitor = monitor
self.resource = resource # Resource class
self.network = network # Network class
self.capa = capacity
self.machine_num = machine_num
self.routing_logic = routing_logic
self.process_time = process_time[self.name] if process_time is not None else [None for _ in range(machine_num)]
self.priority = priority[self.name] if priority is not None else [1 for _ in range(machine_num)]
self.MTTR = MTTR[self.name] if MTTR is not None else [None for _ in range(machine_num)]
self.MTTF = MTTF[self.name] if MTTF is not None else [None for _ in range(machine_num)]
self.initial_broken_delay = initial_broken_delay[self.name] if initial_broken_delay is not None else [None for _ in range(machine_num)]
self.delay_time = delay_time[name] if delay_time is not None else None
self.workforce = workforce[self.name] if workforce is not None else [False for _ in range(machine_num)] # workforce 사용 여부
self.transporter = transporter
# variable defined in class
self.parts_sent = 0
self.parts_sent_to_machine = 0
self.machine_idx = 0
self.len_of_server = []
self.waiting_machine = OrderedDict()
self.waiting_pre_process = OrderedDict()
# buffer and machine
self.buffer_to_machine = simpy.Store(env, capacity=capa_to_machine)
self.buffer_to_process = simpy.Store(env, capacity=capa_to_process)
self.machine = [Machine(env, '{0}_{1}'.format(self.name, i), self.name, self.resource,
process_time=self.process_time[i], priority=self.priority[i], out=self.buffer_to_process,
waiting=self.waiting_machine, monitor=monitor, MTTF=self.MTTF[i], MTTR=self.MTTR[i],
initial_broken_delay=self.initial_broken_delay[i],
workforce=self.workforce[i]) for i in range(self.machine_num)]
# resource
self.tp_store = simpy.Store(self.env)
self.wf_store = simpy.Store(self.env)
# get run functions in class
env.process(self.to_machine())
env.process(self.to_process())
def to_machine(self):
while True:
routing = Routing(self.machine, priority=self.priority)
if self.delay_time is not None:
delaying_time = self.delay_time if type(self.delay_time) == float else self.delay_time()
yield self.env.timeout(delaying_time)
part = yield self.buffer_to_machine.get()
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="Process_entered")
## Rouring logic 추가 할 예정
if self.routing_logic == 'priority':
self.machine_idx = routing.priority()
else:
self.machine_idx = 0 if (self.parts_sent_to_machine == 0) or (self.machine_idx == self.machine_num - 1) else self.machine_idx + 1
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="routing_ended")
self.machine[self.machine_idx].machine.put(part)
self.parts_sent_to_machine += 1
# finish delaying of pre-process
if (len(self.buffer_to_machine.items) < self.buffer_to_machine.capacity) and (len(self.waiting_pre_process) > 0):
self.waiting_pre_process.popitem(last=False)[1].succeed() # delay = (part_id, event)
def to_process(self):
while True:
part = yield self.buffer_to_process.get()
# next process
step = 1
# while not part.data[(part.step + step, 'process_time')]:
# if part.data[(part.step + step, 'process')] != 'Sink':
# step += 1
# break
# else:
# break
next_process_name = part.data[(part.step + step, 'process')]
next_process = self.model[next_process_name]
if next_process.__class__.__name__ == 'Process':
# buffer's capacity of next process is full -> have to delay
if len(next_process.buffer_to_machine.items) == next_process.buffer_to_machine.capacity:
next_process.waiting_pre_process[part.id] = self.env.event()
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="delay_start_out_buffer")
yield next_process.waiting_pre_process[part.id]
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="delay_finish_out_buffer")
# part transfer
if self.transporter is True: # if machine's used transporter
self.monitor.record(self.env.now, self.name, None, part_id=None, event="tp_request")
tp, waiting = False, True
while waiting:
tp, waiting = yield self.env.process(self.resource.request_tp(self.name))
if not waiting:
break
# if waiting is True == All tp is moving == process hv to delay
else:
self.resource.tp_waiting[part.id] = self.env.event()
self.monitor.record(self.env.now, self.name, None, part_id=part.id,
event="delay_start_cus_no_tp")
yield self.resource.tp_waiting[part.id]
self.monitor.record(self.env.now, self.name, None, part_id=part.id,
event="delay_finish_cus_yes_tp")
continue
if tp is not None:
self.monitor.record(self.env.now, self.name, None, part_id=part.id,
event="tp_going_to_next_process", resource=tp.name)
distance_to_move = self.network[self.name][next_process_name]
yield self.env.timeout(distance_to_move/tp.v_loaded)
self.monitor.record(self.env.now, next_process_name, None, part_id=part.id,
event="tp_finished_transferred_to_next_process", resource=tp.name)
next_process.buffer_to_machine.put(part)
self.monitor.record(self.env.now, self.name, None, part_id=part.id,
event="part_transferred_to_next_process_with_tp")
next_process.tp_store.put(tp)
self.resource.tp_location[tp.name].append(next_process_name)
# 가용한 tp 하나 발생 -> delay 끝내줌
if len(self.resource.tp_waiting) > 0:
self.resource.tp_waiting.popitem(last=False)[1].succeed()
else: # not using transporter
next_process.buffer_to_machine.put(part)
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="part_transferred_to_next_process")
else: # next_process == Sink
next_process.put(part)
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="part_transferred_to_Sink")
part.step += step
self.parts_sent += 1
if (len(self.buffer_to_process.items) < self.buffer_to_process.capacity) and (len(self.waiting_machine) > 0):
self.waiting_machine.popitem(last=False)[1].succeed() # delay = (part_id, event)
class Machine(object):
def __init__(self, env, name, process_name, resource, process_time, priority, out, waiting, monitor,
MTTF, MTTR, initial_broken_delay, workforce):
# input data
self.env = env
self.name = name
self.process_name = process_name
self.resource = resource
self.process_time = process_time
self.priority = priority
self.out = out
self.waiting = waiting
self.monitor = monitor
self.MTTR = MTTR
self.MTTF = MTTF
self.initial_broken_delay = initial_broken_delay
self.workforce = workforce
# variable defined in class
self.machine = simpy.Store(env)
self.working_start = 0.0
self.total_time = 0.0
self.total_working_time = 0.0
self.working = False # whether machine's worked(True) or idled(False)
self.broken = False # whether machine is broken or not
self.unbroken_start = 0.0
self.planned_proc_time = 0.0
# broke and re-running
self.residual_time = 0.0
self.broken_start = 0.0
if self.MTTF is not None:
mttf_time = self.MTTF if type(self.MTTF) == float else self.MTTF()
self.broken_start = self.unbroken_start + mttf_time
# get run functions in class
self.action = env.process(self.work())
# if (self.MTTF is not None) and (self.MTTR is not None):
# env.process(self.break_machine())
def work(self):
while True:
self.broken = True
part = yield self.machine.get()
self.working = True
wf = None
# process_time
if self.process_time == None: # part에 process_time이 미리 주어지는 경우
proc_time = part.data[(part.step, "process_time")]
else: # service time이 정해진 경우 --> 1) fixed time / 2) Stochastic-time
proc_time = self.process_time if type(self.process_time) == float else self.process_time()
self.planned_proc_time = proc_time
if self.workforce is True:
resource_item = list(map(lambda item: item.name, self.resource.wf_store.items))
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id, event="workforce_request", resource=resource_item)
while len(self.resource.wf_store.items) == 0:
self.resource.wf_waiting[part.id] = self.env.event()
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="delay_start_machine_cus_no_resource")
yield self.resource.wf_waiting[part.id] # start delaying
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="delay_finish_machine_cus_yes_resource")
wf = yield self.resource.wf_store.get()
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="workforce get in the machine", resource=wf.name)
while proc_time:
if self.MTTF is not None:
self.env.process(self.break_machine())
try:
self.broken = False
## working start
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id, event="work_start")
self.working_start = self.env.now
yield self.env.timeout(proc_time)
## working finish
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id, event="work_finish")
self.total_working_time += self.env.now - self.working_start
self.broken = True
proc_time = 0.0
except simpy.Interrupt:
self.broken = True
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="machine_broken")
print('{0} is broken at '.format(self.name), self.env.now)
proc_time -= self.env.now - self.working_start
if self.MTTR is not None:
repair_time = self.MTTR if type(self.MTTR) == float else self.MTTR()
yield self.env.timeout(repair_time)
self.unbroken_start = self.env.now
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="machine_rerunning")
print(self.name, 'is solved at ', self.env.now)
self.broken = False
mttf_time = self.MTTF if type(self.MTTF) == float else self.MTTF()
self.broken_start = self.unbroken_start + mttf_time
self.working = False
if self.workforce is True:
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id, event="workforce_used_out", resource=wf.name)
self.resource.wf_store.put(wf)
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="workforce get out the machine", resource=wf.name)
if (len(self.resource.wf_store.items) > 0) and (len(self.resource.wf_waiting) > 0):
self.resource.wf_waiting.popitem(last=False)[1].succeed() # delay = (part_id, event)
# start delaying at machine cause buffer_to_process is full
if len(self.out.items) == self.out.capacity:
self.waiting[part.id] = self.env.event()
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="delay_start_machine")
yield self.waiting[part.id] # start delaying
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="delay_finish_machine")
# transfer to 'to_process' function
self.out.put(part)
self.monitor.record(self.env.now, self.process_name, self.name, part_id=part.id,
event="part_transferred_to_out_buffer")
self.total_time += self.env.now - self.working_start
def break_machine(self):
if (self.working_start == 0.0) and (self.initial_broken_delay is not None):
initial_delay = self.initial_broken_delay if type(self.initial_broken_delay) == float else self.initial_broken_delay()
yield self.env.timeout(initial_delay)
residual_time = self.broken_start - self.working_start
if (residual_time > 0) and (residual_time < self.planned_proc_time):
yield self.env.timeout(residual_time)
self.action.interrupt()
else:
return
# if (self.monitor.event.count('completed') > 0) and (
# self.monitor.event.count('part_created') == self.monitor.event.count('completed')):
# break
# yield self.env.timeout(mttf_time)
# if not self.broken:
# self.action.interrupt()
class Sink(object):
def __init__(self, env, monitor):
self.env = env
self.name = 'Sink'
self.monitor = monitor
# self.tp_store = simpy.FilterStore(env) # transporter가 입고 - 출고 될 store
self.parts_rec = 0
self.last_arrival = 0.0
def put(self, part):
self.parts_rec += 1
self.last_arrival = self.env.now
self.monitor.record(self.env.now, self.name, None, part_id=part.id, event="completed")
class Monitor(object):
def __init__(self, filepath):
self.filepath = filepath ## Event tracer 저장 경로
self.time=[]
self.event=[]
self.part_id=[]
self.process=[]
self.subprocess=[]
self.resource = []
def record(self, time, process, subprocess, part_id=None, event=None, resource=None):
self.time.append(time)
self.event.append(event)
self.part_id.append(part_id)
self.process.append(process)
self.subprocess.append(subprocess)
self.resource.append(resource)
def save_event_tracer(self):
event_tracer = pd.DataFrame(columns=['Time', 'Event', 'Part', 'Process', 'SubProcess'])
event_tracer['Time'] = self.time
event_tracer['Event'] = self.event
event_tracer['Part'] = self.part_id
event_tracer['Process'] = self.process
event_tracer['SubProcess'] = self.subprocess
event_tracer['Resource'] = self.resource
event_tracer.to_csv(self.filepath)
return event_tracer
class Routing(object):
def __init__(self, server_list=None, priority=None):
self.server_list = server_list
self.idx_priority = np.array(priority)
def priority(self):
i = min(self.idx_priority)
idx = 0
while i <= max(self.idx_priority):
min_idx = np.argwhere(self.idx_priority == i) # priority가 작은 숫자의 index부터 추출
idx_min_list = min_idx.flatten().tolist()
# 해당 index list에서 machine이 idling인 index만 추출
idx_list = list(filter(lambda j: (self.server_list[j].working == False), idx_min_list))
if len(idx_list) > 0: # 만약 priority가 높은 machine 중 idle 상태에 있는 machine이 존재한다면
idx = random.choice(idx_list)
break
else: # 만약 idle 상태에 있는 machine이 존재하지 않는다면
if i == max(self.idx_priority): # 그 중 모든 priority에 대해 machine이 가동중이라면
idx = random.choice([j for j in range(len(self.idx_priority))]) # 그냥 무작위 배정
# idx = None
break
else:
i += 1 # 다음 priority에 대하여 따져봄
return idx
def first_possible(self):
idx_possible = random.choice(len(self.server_list)) # random index로 초기화 - 모든 서버가 가동중일 때, 서버에 random하게 파트 할당
for i in range(len(self.server_list)):
if self.server_list[i].working is False: # 만약 미가동중인 server가 존재할 경우, 해당 서버에 part 할당
idx_possible = i
break
return idx_possible
class Network(object):
def __init__(self, graph, gis_graph):
self.graph = graph
self.gis_graph = gis_graph
def get_shortest_path_distance(self, location_type_from, location_type_to):
shortest_path_length_dict = dict(nx.shortest_path_length(self.gis_graph, weight='distance'))
shortest_path_length = shortest_path_length_dict[location_type_from][location_type_to]
return shortest_path_length
|
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
# split the text
# from words[2] to words[n-1], check words[x-1] == second and words[x-2] == first
# if yes, append to the result
result = []
words = text.split()
for i in range(2, len(words)):
if first == words[i-2] and second == words[i-1]:
result.append(words[i])
return result |
import json
import os
from pathlib import Path
import instawow.cli
import instawow.db
from instawow.config import Config, setup_logging
from instawow.models import Pkg, PkgList
from instawow.resolvers import Defn
from instawow.results import PkgUpToDate
from instawow.manager import Manager
import sqlalchemy
class InstawowManager:
def __init__(self, game_flavour: str, lib: bool = False):
"""Interface between instawow and main program.
:param game_flavor str: 'classic' or 'retail' or 'vanilla_classic'
:param lib bool: Whether hanlding libraries.
:param lib classic_only_lib: Whether hanlding classic-only libs
"""
self.profile = game_flavour + ('_lib' if lib else '')
addon_dir = Path(os.getcwd()) / 'Addons/'
if lib:
addon_dir /= '!!Libs'
config = Config(addon_dir=addon_dir, game_flavour=game_flavour, profile=self.profile)
config.write()
setup_logging(config)
instawow.cli._apply_patches()
self.manager = Manager.from_config(config)
def get_addons(self):
query = self.manager.database.execute(
sqlalchemy.select(instawow.db.pkg)
.order_by(instawow.db.pkg.c.source, instawow.db.pkg.c.name)
)
if not query:
return []
return [Pkg.from_row_mapping(self.manager.database, p) for p in query.mappings().all()]
def to_defn(self, addon: str, strategy: str = None) -> Defn:
pair = self.manager.pair_uri(addon) or ('*', addon)
ret = Defn(*pair)
if strategy:
ret = ret.with_(strategy=strategy)
return ret
def to_defns(self, addons: str | list[str] | list[tuple]) -> list[Defn]:
if isinstance(addons, str):
return [self.to_defn(addons)]
return [self.to_defn(addon) if isinstance(addon, str) else self.to_defn(*addon) for addon in addons]
def update(self):
addons = [Defn.from_pkg(p) for p in self.get_addons()]
results = instawow.cli.run_with_progress(self.manager.update(addons, False))
report = instawow.cli.Report(results.items(),
lambda r: not isinstance(r, PkgUpToDate))
if str(report):
print(report)
else:
print(f'All {self.profile} addons are up-to-date!')
def install(self, addons: str | list[str], strategy=None):
defns = self.to_defns(addons)
if '_lib' in self.profile:
defns = [d.with_(strategy='any_flavour' if d.source == 'curse' else 'default') for d in defns]
elif strategy:
defns = [d.with_(strategy=strategy) for d in defns]
results = instawow.cli.run_with_progress(self.manager.install(defns, replace=False))
print(instawow.cli.Report(results.items()))
def remove(self, addons: str | list[str]):
defns = self.to_defns(addons)
results = instawow.cli.run_with_progress(self.manager.remove(defns, False))
print(instawow.cli.Report(results.items()))
def show(self):
for addon in self.get_addons():
print(f'{addon.name}: {addon.version}')
def export(self):
with open(f'{self.profile}.json', 'w', encoding='utf-8') as file:
file.write(PkgList.parse_obj(self.get_addons()).json(indent=2))
def reinstall(self, filename: str):
with open(filename, 'rb') as file:
addons = json.loads(file.read())
addons = [(f"{a['source']}:{a['slug']}", a['options']['strategy']) for a in addons]
addons = [self.to_defn(addon, strategy) for addon, strategy in addons]
results = instawow.cli.run_with_progress(self.manager.install(addons, replace=True))
print(instawow.cli.Report(results.items()))
|
#coding=utf-8
import socketserver
class MyServer(socketserver.BaseRequestHandler):
def handle(self):
while 1:
conn=self.request
addr=self.client_address
while 1:
receiver_data=str(conn.recv(1024),encoding="utf8")
print(receiver_data)
if receiver_data=="bb":
conn.close()
break
send_data=bytes(input(">>>>>>"),encoding="utf8")
conn.sendall(send_data)
if __name__=="__main__":
server=socketserver.ThreadingTCPServer(("localhost",8888),MyServer)
server.serve_forever() |
'''
Karen Sommer
CS 521 Spring 2021
Assignment 5
Problem 9 pages 264-267
'''
#Write a function that takes as input an English sentence ( a string) and prints the total
#numbers of vowels and the total number of consonants in the sentence. The function
#returns nothing. Note that the sentece could have special characters such as dots, dashes
vowels=['a','e','i','o','u','A','E','I','O','U']
def count_vowel_consonants(str):
count_vowles=0
count_consonants=0
for s in str:
if s.isalpha():
if s in vowels:
count_vowles+=1
else:
count_consonants+=1
print('Total number of vowels',count_vowles )
print('Total number of consonants',count_consonants )
if __name__ == "__main__":
print ('Enter a string')
str = (input())
count_vowel_consonants(str)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 22:19:49 2018
@author: Yuanpei Cao
"""
import pandas as pd
###############################################################################
## load geoIDs
###############################################################################
df_train1_geoid = pd.read_csv('/Users/ycao/Desktop/taxi_fare_prediction/all/'
'filtered_data/filter_train1_geoid.csv')
df_train2_geoid = pd.read_csv('/Users/ycao/Desktop/taxi_fare_prediction/all/'
'filtered_data/filter_train2_geoid.csv')
###############################################################################
## get location and geoid only
###############################################################################
df_loc_geoid1 = df_train1_geoid[[
'pickup_longitude', 'pickup_latitude', 'GEOID_p'
]]
df_loc_geoid2 = df_train1_geoid[[
'dropoff_longitude', 'dropoff_latitude', 'GEOID_d'
]]
df_loc_geoid3 = df_train2_geoid[[
'pickup_longitude', 'pickup_latitude', 'GEOID_p'
]]
df_loc_geoid4 = df_train2_geoid[[
'dropoff_longitude', 'dropoff_latitude', 'GEOID_d'
]]
###############################################################################
## rename the column, combine the dataframe, and drop duplicates
###############################################################################
# rename
df_loc_geoid1 = df_loc_geoid1.rename(
columns = {
'pickup_longitude':'longitude',
'pickup_latitude':'latitude',
'GEOID_p':'geoid'
}
)
df_loc_geoid2 = df_loc_geoid2.rename(
columns = {
'dropoff_longitude':'longitude',
'dropoff_latitude':'latitude',
'GEOID_d':'geoid'
}
)
df_loc_geoid3 = df_loc_geoid3.rename(
columns = {
'pickup_longitude':'longitude',
'pickup_latitude':'latitude',
'GEOID_p':'geoid'
}
)
df_loc_geoid4 = df_loc_geoid4.rename(
columns = {
'dropoff_longitude':'longitude',
'dropoff_latitude':'latitude',
'GEOID_d':'geoid'
}
)
# change type to strings
# convert location to string
df_loc_geoid1[['longitude', 'latitude']] = \
df_loc_geoid1[['longitude', 'latitude']].astype(str)
df_loc_geoid2[['longitude', 'latitude']] = \
df_loc_geoid2[['longitude', 'latitude']].astype(str)
df_loc_geoid3[['longitude', 'latitude']] = \
df_loc_geoid3[['longitude', 'latitude']].astype(str)
df_loc_geoid4[['longitude', 'latitude']] = \
df_loc_geoid4[['longitude', 'latitude']].astype(str)
###############################################################################
## combine the dataframe
###############################################################################
df_loc_geoid1 = df_loc_geoid1.drop_duplicates()
df_loc_geoid2 = df_loc_geoid2.drop_duplicates()
df_loc_geoid3 = df_loc_geoid3.drop_duplicates()
df_loc_geoid4 = df_loc_geoid4.drop_duplicates()
df_loc_geoid = pd.concat(
[df_loc_geoid1, df_loc_geoid2, df_loc_geoid3, df_loc_geoid4],
axis = 0
).drop_duplicates()
###############################################################################
## save the result
###############################################################################
df_loc_geoid.to_csv('/Users/ycao/Desktop/taxi_fare_prediction/all/'
'loc_geoid_train12.csv', index = False) |
# -*- coding: utf-8 -*-
"""
Created on 张斌 2018-11-26 15:58:00
@author: zhang bin
@email: zhangbin@gsafety.com
Rest API --行情操作
"""
import os, copy, ast, time, threading
import mySystem
from flask import jsonify, request, flash, render_template, redirect #导入模块
#引用根目录类文件夹--必须,否则非本地目录起动时无法找到自定义类
mySystem.Append_Us("../zxcPy.APIs", False, __file__)
mySystem.Append_Us("../zxcPy.Quotation", False, __file__)
mySystem.Append_Us("../zxcPy.Quotation/Quote_Data", False, __file__)
mySystem.Append_Us("../zxcPy.Quotation/Quote_Data/Data_Risk", False, __file__)
mySystem.Append_Us("../zxcPy.Quotation/Quote_Source", False, __file__)
mySystem.Append_Us("../zxcPy.Quotation/Quote_Listener", False, __file__)
mySystem.Append_Us("", False)
import myWeb, myDebug, myData, myData_Trans, myData_Json, myQuote_Source, myQuote_Setting, myData_StockRisk
from myGlobal import gol
#API-行情设置
class myAPI_Quote_Set(myWeb.myAPI):
def get(self):
#提取股票信息
extype=request.args.get('extype', "")
code_id=request.args.get('code_id', "")
code_name=request.args.get('code_name', "")
removeSet=myData_Trans.To_Bool(request.args.get('removeSet', ''))
pMsg = copy.deepcopy(gol._Get_Setting('Return_strFormat', {}))
pStocks = gol._Get_Value('setsStock', None)
lstStock = pStocks._Find(code_id, "", exType=extype)
if(len(lstStock) != 1):
pMsg['text'] = "股票代码或名称错误!"
return pMsg
pStock = lstStock[0]
strTag = "股票设置:"+ pStock.code_name +"\n "
#解析参数
bResult = False
if(bResult == False):
#提取行情对象
pSource = gol._Get_Value('quoteSource', None)
pSets = gol._Get_Value('setsQuote', None)
if(pSource != None and pSets != None):
if(removeSet == False):
editInfo = myData_Trans.Tran_ToDict(request.args.get('editInfo', "{}"))
if(pSets._Edit(pStock.extype, pStock.code_id, pStock.code_name, editInfo)):
pSource.params = pSource._getDefault_Param()
pMsg['text'] = strTag + " --设置已成功修改。"
bResult = True
print(pSource.params)
else:
usrID = request.args.get('usrID', '')
usrPlat = request.args.get('usrPlat', 'wx')
if(pSets._Remove(pStock.extype, pStock.code_id, pStock.code_name, usrID)):
pSource.params = pSource._getDefault_Param()
pMsg['text'] = strTag + " --设置已成功移除。"
bResult = True
print(pSource.params)
pMsg['result'] = bResult
if(bResult == ""): pMsg['text'] = strTag + "操作失败!"
return pMsg
#API-行情设置查询
class myAPI_Quote_SetQuery(myWeb.myAPI):
def get(self):
usrID=request.args.get('usrID', "")
pMsg = copy.deepcopy(gol._Get_Setting('Return_strFormat', {}))
#初始返回组
lstExtypes = []
lstCode_id = []
lstCode_Name = []
lstCode_NameEN = []
#查询及组装
pSets = gol._Get_Value('setsQuote', None)
if(pSets != None and usrID != ""):
lstStock = pSets._Find_Sets(usrID)
for x in lstStock:
lstExtypes.append(x.extype)
lstCode_id.append(x.code_id)
lstCode_Name.append(x.code_name)
lstCode_NameEN.append(x.code_name_En)
jsonStocks = {}
jsonStocks["extypes"] = lstExtypes
jsonStocks["code_ids"] = lstCode_id
jsonStocks["code_names"] = lstCode_Name
jsonStocks["code_namesEN"] = lstCode_NameEN
pMsg['result'] = len(lstExtypes) > 0
pMsg['text'] = jsonStocks
return pMsg
#API-行情设置详情查询
class myAPI_Quote_SetInfoQuery(myWeb.myAPI):
def get(self):
exType=request.args.get('exType', "")
stockID=request.args.get('stockID', "")
stockName=request.args.get('stockName', "")
usrID=request.args.get('usrID', "")
pMsg = copy.deepcopy(gol._Get_Setting('Return_strFormat', {}))
#初始返回组
jsonInfo = {}
pSets = gol._Get_Value('setsQuote', None)
pSet = pSets._Find(stockName, exType + '.' + stockID)
if(pSet != None):
pMsg['result'] = True
pParams = {}
for xx in pSet.settings:
pSetting = pSet.settings[xx]
pParams[xx] = pSetting.IsValid(usrID)
jsonInfo["设置状态"] = pParams
else:
pMsg['result'] = False
pMsg['text'] = jsonInfo
return pMsg
#API-行情设置-风控
class myAPI_Quote_Set_Risk(myWeb.myAPI):
def get(self):
#提取股票信息
pMsg = copy.deepcopy(gol._Get_Setting('Return_strFormat', {}))
pRisks = gol._Get_Value('zxcRisk_Control', None)
bResult = True
# 组装参数并添加
#dicParam = {"边界限制": True,"定量监测": False, "监测间隔": 0.01,"止盈线": 0.20, "止损线": -0.05, "动态止盈": True, "动态止损": True, "止盈回撤": 0.01, "止盈比例": 0.20, "止损回撤": 0.01, "止损比例": 0.20 }
usrID = request.args.get('usrID', '')
usrTag = request.args.get('usrTag', '')
code_id = request.args.get('code_id', "")
code_name = request.args.get('code_name', "")
removeSet = myData_Trans.To_Bool(request.args.get('removeSet', False))
paramInfo = myData_Trans.Tran_ToDict(request.args.get('setInfo', "{}"))
paramInfo['removeSet'] = removeSet
dtTrade = request.args.get('time', "")
dateTag = request.args.get('dateTag', "")
stockPrice = myData_Trans.To_Float(str(request.args.get('stockPrice', 0)))
stockNum = myData_Trans.To_Int(str(request.args.get('stockNum', 0)))
if(removeSet == False and (stockPrice == 0 or stockNum == 0)):
bResult = False; pMsg['text'] = "股价、数量不能为0."
if(usrID == "" and usrTag == ""):
bResult = False; pMsg['text'] = "用户信息不能为空."
if(bResult):
strR = pRisks.addRiskSet(usrID, usrTag, code_id, code_name, stockPrice, stockNum, dtTrade, dateTag, paramInfo)
#解析参数
strTag = "风控设置:"+ code_name +"\n"
if(bResult):
if(removeSet == False):
if(stockPrice == 0 or stockNum == 0):
pMsg['text'] = strTag + " --已成功修改参数信息."
else:
trade = myData.iif(stockNum >0, "买入", "卖出")
if(stockNum % 100 == 0):
pMsg['text'] = strTag + F"新增{trade}:{str(abs(stockNum))} 股.\n{trade}均价:{stockPrice} 元/股)."
else:
pMsg['text'] = strTag + F"新增{trade}:{str(abs(stockNum))} 张.\n{trade}均价:{stockPrice} 元/张."
bResult = True
else:
pMsg['text'] = strTag + " --设置已成功移除."
bResult = True
pMsg['result'] = bResult
if(bResult == ""): pMsg['text'] = strTag + "操作失败!"
return pMsg
#API-行情设置-风控
class myAPI_Quote_SetQuery_Risk(myWeb.myAPI):
def get(self):
pMsg = copy.deepcopy(gol._Get_Setting('Return_strFormat', {}))
usrID = request.args.get('usrID', '')
usrTag = request.args.get('usrTag', '')
code_id = request.args.get('code_id', "")
code_name = request.args.get('code_name', "")
#初始返回组
lstDate_Tag = []
lstInfos = {}
#查询及组装
pRisks = gol._Get_Value('zxcRisk_Control', None)
dictRisks = pRisks.getRisks(usrID, usrTag, code_id, code_name, bCheck = True)
if(dictRisks != None):
for x in dictRisks:
pRisk = dictRisks[x]
if(pRisk.setRisk.valid):
lstDate_Tag.append(x)
dictSet = pRisk.setRisk.Trans_ToDict().copy()
dictSet['操作时间'] = myData_Trans.Tran_ToDatetime_str(dictSet['操作时间'], "%Y-%m-%d %H:%M:%S")
lstInfos[x] = dictSet
jsonSetinfo = {}
jsonSetinfo["dataTags"] = lstDate_Tag
jsonSetinfo["setInfos"] = lstInfos
pMsg['result'] = len(lstDate_Tag) > 0
pMsg['text'] = jsonSetinfo
return pMsg
#初始行情对象
def init_Quote():
#全局对象提取
myQuote_Source.mainStart()
ms_Source = gol._Get_Value('quoteSource', None)
return ms_Source
#集中添加所有API
def add_APIs(pWeb):
#初始行情对象
#init_Quote()
# 创建Web API
pWeb.add_API(myAPI_Quote_Set, '/zxcAPI/robot/stock/QuoteSet')
pWeb.add_API(myAPI_Quote_SetQuery, '/zxcAPI/robot/stock/QuoteSet/Query')
pWeb.add_API(myAPI_Quote_SetInfoQuery, '/zxcAPI/robot/stock/QuoteSetInfo/Query')
pWeb.add_API(myAPI_Quote_Set_Risk, '/zxcAPI/robot/stock/QuoteSetRisk')
pWeb.add_API(myAPI_Quote_SetQuery_Risk, '/zxcAPI/robot/stock/QuoteSetRisk/Query')
#行数监测线程
def thrd_Moniter_API_Quote():
time.sleep(10) #延时等待
pSource = init_Quote()
while(pSource.isClosed == False):
myDebug.Debug(myData_Trans.Tran_ToDatetime_str())
time.sleep(120) #延时等待
myQuote_Source.mainStart() #检查启动行情进程
#启动监测线程
m_thrdAPI_Quote = threading.Thread(target = thrd_Moniter_API_Quote)
m_thrdAPI_Quote.start()
#主程序启动
if __name__ == '__main__':
#初始行情对象
#init_Quote()
#注册平台, 取token
#pQuote_Set = myAPI_Quote_Set()
#msg = pQuote_Set.get()
#print("msg::", msg)
print()
|
from env.frame import Frame
class Potential(Frame):
def __init__(self,potential = 0):
self.__potential = potential
def set_(self,potential = 0):
self.__potential = potential
def get_(self):
return self.__potential |
from django.shortcuts import render, HttpResponse
import datetime
# Create your views here.
class Person(object):
def __init__(self, name, age, sex):
self.name = name
self.age = age
self.sex = sex
def say(self):
return 'my name is' + self.name
def show(request):
if request.method == 'GET':
a = request.GET.get('id', 2)
person = Person('谭咏飞', '20', '男')
data = {
'name': '谭振华',
'age': '22',
'sex': '男',
'eng': 'abcdefg',
'now_time': datetime.datetime.today(),
'tan': person,
'id': a
}
return render(request, 'show.html', data)
|
#! /usr/bin/python
# -*-coding:utf-8-*-
"""
@Author: Tony 2513141027
@Date: 2019/10/9 21:28
@Description: QTreeView控件与系统定制模式
QTreeWidget
Model
QDirModel
"""
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
if __name__ == '__main__':
app = QApplication(sys.argv)
model = QDirModel()
tree = QTreeView()
tree.setModel(model)
tree.setWindowTitle("QTreeView")
tree.resize(600, 400)
tree.show()
sys.exit(app.exec_())
|
all_code = ''
all_code2 = ''
for a, b in self.progrem.items():
if a[0] == '_':
self.add(open(b[0], 'r').read(), a, b[0])
for a, b in self.cods.items():
if 'def main' in b.code:
main = b
continue
all_code += '\n###%s\n'%a
all_code += b.code
all_code += '\n###%s\n'%a
all_code += '\n###%s\n'%a
all_code += main.code
all_code += '\n###%s\n'%a
for i in all_code.split('\n'):
if i:
all_code2 += i + '\n'
open(r'finish_projects\%s.py'%self.progrem['name'], 'w').write(all_code2)
|
from django.conf.urls import url, include
from django.views.generic import ListView, DetailView
from poll.models import Poll, Choice
from . import views
urlpatterns = [
url(r'^$', views.new_poll, name='post_list'),
url(r'^create_poll', views.create_poll, name='create_poll'),
] |
#70. Climbing Stairs
#https://leetcode.com/problems/climbing-stairs/solution/
class Solution:
def climbStairs(self, n: int,memo={}) -> int:
def solve(n,memo={}):
if n==0:
return 1
if n<0:
return 0
if n in memo:
return memo[n]
memo[n]=solve(n-1,memo)+solve(n-2,memo)
return memo[n]
return solve(n,memo={})
print(memo)
|
import math
def parse(in_file):
lines = in_file.readlines()
timestamp = int(lines[0].strip())
raw_buslist = lines[1].split(',')
congruences = [(int(val), int(val) - i) for i,val in enumerate(raw_buslist) if val != 'x']
buslist = [x[0] for x in congruences]
return timestamp, buslist, congruences
def p1(avail_ts, buslist):
ts = {k: math.ceil(avail_ts / k) * k for k in buslist}
first_bus, first_ts = min(ts.items(), key=lambda x: x[1])
return first_bus * (first_ts - avail_ts)
def p2(congruences): # chinese remainder
curr_sum = 0
prod = math.prod(item[0] for item in congruences)
def mult_inv(a, b): # extended euclidean algorithm
if a == 0:
return b, 0, 1
gcd, x1, y1 = mult_inv(b% a, a)
x = y1 - math.floor(b/a) * x1
y = x1
return gcd, x, y
for ni, ai in congruences:
p = math.floor(prod / ni)
curr_sum += ai * mult_inv(p, ni)[1] * p
return curr_sum % prod
if __name__ == "__main__":
# with open('input_test_1.txt') as my_file:
with open("input.txt") as my_file:
avail_ts, buslist, congruences = parse(my_file)
print(f"P1 Answer: {p1(avail_ts, buslist)}")
print(f"P2 Answer: {p2(congruences)}") |
"""
file and directory management
File
named location at storage (hard disk)
Directory
A directory or folder is a collection of files and subdirectories
GUI
command line CL (terminal)
os-dependent
windows - RealMode MS-DOS
- Powershell
dir
MacOS - Terminal
ls
Linux/Unix
- Terminal
- Shell script
OS (operating system)
- File System
- Device management
- Memory management
"""
"""
1. write code to self-study truncate() /seek(),tell()
9. Write a Python program to count the number of lines in a text file.
10. Write a Python program to count the frequency of words in a file
13. Write a Python program to copy the contents of a file to another file .
""" |
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.nn as nn
import numpy as np
import torch
import math
# 神经网络主要结构,这里就是一个简单的线性结构
class Net(nn.Module):
def __init__(self, in_num, hidden_num, out_num):
super(Net, self).__init__()
# 构建一个两层网络,并且随机话初始权重,给以输入数据后返回矩阵乘积数据。
self.input_layer = torch.nn.Linear(in_num, hidden_num)
self.sigmoid = torch.nn.Sigmoid()
self.output_layer = torch.nn.Linear(hidden_num, out_num)
self.relu = torch.nn.Sigmoid()
def forward(self, input_x):
# 对输入数据与权重矩阵的积进行激活,返回激活输出
h_1 = self.sigmoid(self.input_layer(input_x))
h_2 = self.output_layer(h_1)
return h_1, h_2
# 准备数据
xdata = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
ydata = np.array(xdata, copy=True)
# 将数据做成数据集的模样
x = Variable(torch.Tensor(xdata))
x = x.float()
y = Variable(torch.from_numpy(ydata))
y = y.float()
net = Net(4, 2, 4)
# 定义优化器和损失函数
optim = torch.optim.SGD(Net.parameters(net), lr=0.5, momentum=0.55)
Loss = nn.MSELoss()
# 下面开始训练:
# 一共训练 1000次
x_index = []
for epoch in range(60000):
loss = None
# for batch_x, batch_y in dataloader:
out = net(x)
loss = Loss(out[1], y)
optim.zero_grad()
loss.backward()
optim.step()
# 每100次 的时候打印一次日志
'''
if (epoch + 1) % 2 == 0:
print("step: {0} , loss: {1}".format(epoch + 1, loss.item()))
'''
# 使用训练好的模型进行预测
predict = net(torch.tensor(x, dtype=torch.float))
print('隐层输出', predict[0], '隐层恢复输出值', predict[1])
|
from django.contrib import admin
from django.urls import path, include
# admin以外の時はinvest.urlsを呼び出す
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('invest.urls')),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
import os
import logging
from dotenv import find_dotenv, load_dotenv
import pickle
def api_devices(path):
total = 0
url = "https://"
def api_call(limit):
if limit == 0:
object_type = "/api/" + PAYLOAD['api_version'] + "/devices/index"
data = requests.get(url + PAYLOAD['host'] + object_type,
verify=False,
auth=(
PAYLOAD['username'], PAYLOAD['password']))
if limit != 0:
object_type = "/api/" + PAYLOAD['api_version'] + \
"/devices/index?limit=" + str(limit)
data = requests.get(url + PAYLOAD['host'] + object_type,
verify=False, auth=(PAYLOAD['username'],
PAYLOAD['password']))
return json.loads(data.text)
device_data = api_call(total)
if 1000 < device_data['total'] <= 10000:
device_data = api_call(str(device_data['total'] + 200))
pickle.dump(device_data, open(path, "wb"))
def main(project_dir):
"""Script built to make api call's for Data used later on by other scripts.
"""
# get logger
logger = logging.getLogger(__name__)
logger.info('Beginning of Script')
raw_data_path = os.path.join(project_dir, 'data', 'raw')
devices_data_path = os.path.join(raw_data_path, 'devices.pkl')
api_devices(devices_data_path)
if __name__ == '__main__':
# getting root directory
PROJECT_DIR = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# setup logger
LOG_FMT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FMT)
# find .env automatically by walking up directories until it's found
DOTENV_PATH = find_dotenv()
# load up the entries as environment variables
load_dotenv(DOTENV_PATH)
# PAYLOAD for login to IPAM
PAYLOAD = {
'host': os.environ.get("NETM_HOST"),
'username': os.environ.get("NETM_USERNAME"),
'password': os.environ.get("NETM_PASSWORD"),
'api_version': os.environ.get("NETM_API_VERSION")
}
# call the main
main(PROJECT_DIR)
|
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import urllib.request
import json
import random
import textwrap
#For generating random quotes
def getFont(i):
switcher={
1: "LongLiner.ttf",
2: "Chasy.otf",
3: "Summer.otf",
4: "Lemon.otf",
5: "Orange.ttf",
6: "paper.ttf",
7: "Shorelines.otf",
}
return switcher.get(i)
#Get a image from picsum
urllib.request.urlretrieve("https://picsum.photos/1080", "00000001.jpg")
#Open json file
f = open('quotes.json')
quotes = json.load(f)
#Select a random quote from the json file
num = random.randint(0,len(quotes)-1)
thisQuote = quotes[num].get("text")
#TODO Remove saving photo locally
img = Image.open("00000001.jpg")
lines = textwrap.wrap(thisQuote, width=25)
draw = ImageDraw.Draw(img)
#Get a random Font
fontNumber = random.randint(1,8)
font = ImageFont.truetype(getFont(fontNumber), 95)
#Get the largest letter in the font
if(fontNumber == 4):
line_height = font.getsize('hg')[1] + 20
else:
line_height = font.getsize('hg')[1]
y_text = 50
x = 0
for line in lines:
draw.text((50, y_text),line,(255,255,255),font=font, align = "left")
y_text += line_height
#Draw the author
draw.text((50, y_text+line_height),"- " + quotes[num].get("author"),(255,255,255),font=font, align = "left")
img.save('sample-out.jpg')
|
import sys
sys.path.append('c:\\program files\\anaconda3\\lib\\site-packages')
import glob, os
import os, os.path
import csv
import operator
import numpy as np
import pandas as pd
print("***********Showing CPU and GPU results are same****************")
os.chdir("F:\\Studies\\Ph.D\\Ph.D Work\\ProteinDataSet\\")
#For Module 1------->
cpu_result_sum = np.loadtxt("similarity_cpu.txt")
print("Similarity CPU Result read Done!")
p = np.shape(cpu_result_sum)[0]
q = np.shape(cpu_result_sum)[1]
cpu_row = np.int32(p)
cpu_col = np.int32(q)
np.set_printoptions(precision=3)
print(cpu_result_sum)
cpu_sum = 0.0
gpu_sum1 = 0.0
gpu_sum2 = 0.0
for i in range(0,cpu_row):
for j in range(0,cpu_col):
cpu_sum = cpu_sum + cpu_result_sum[i][j]
print("")
print("CPU Result Sum = ",cpu_sum)
#For Module 2 ------------>
#For Row wise calculation
gpu_result_sum1 = np.loadtxt("similarity_gpu-1.txt")
print("\nSimilarity Row wise Result read Done!")
r = np.shape(gpu_result_sum1)[0]
s = np.shape(gpu_result_sum1)[1]
gpu_row = np.int32(r)
gpu_col = np.int32(s)
np.set_printoptions(precision=3)
print(gpu_result_sum1)
for i in range(0, gpu_row):
for j in range(0, gpu_col):
gpu_sum1 = gpu_sum1 + gpu_result_sum1[i][j]
print("\nGPU Result Row wise Sum = ",gpu_sum1)
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import *
from .models import Todo
# Create your views here.
class TodoApiView(APIView):
serializer_class = TodoSerializer
def get(self, request):
try:
todos = Todo.objects.all()
data = TodoSerializer(todos, many=True)
return Response({
"status":status.HTTP_200_OK,
"data": data.data
}, status.HTTP_200_OK)
except:
return Response({
"status":status.HTTP_500_INTERNAL_SERVER_ERROR,
"message": "Something went wrong"
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request):
try:
data = TodoSerializer(data=request.data)
data.is_valid(raise_exception=False)
request_data = data.data
todo = Todo.objects.create(body=request_data["body"])
return Response({
"status":status.HTTP_201_CREATED,
"message": "Todo added successfully"
},status=status.HTTP_201_CREATED)
except Exception as e:
return Response({
"status":status.HTTP_400_BAD_REQUEST,
"message": str(e)
},status=status.HTTP_400_BAD_REQUEST)
class EditTodoView(APIView):
serializer_class = EditSerializer
def put(self, request, todo_id):
try:
data = EditSerializer(data=request.data)
data.is_valid(raise_exception=False)
request_data = data.data
getTodo = Todo.objects.get(id=todo_id)
if request_data["done"]==True:
getTodo.done = True
else:
getTodo.done= False
if request_data["body"]:
getTodo.body = request_data["body"]
getTodo.save()
return Response({
"status": status.HTTP_200_OK,
"message":"Todo modification successful"
}, status=status.HTTP_200_OK)
except Todo.DoesNotExist:
return Response({
"status":status.HTTP_404_NOT_FOUND,
"message":"Todo not found"
}, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
return Response({
"status":status.HTTP_500_INTERNAL_SERVER_ERROR,
"message":str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class DeleteTodoView(APIView):
def delete(self, request, todo_id):
try:
getTodo = Todo.objects.get(id=todo_id)
getTodo.delete()
return Response({
"status":status.HTTP_200_OK,
"message":"Todo removed successfully"
}, status=status.HTTP_200_OK)
except Todo.DoesNotExist:
return Response({
"status":status.HTTP_404_NOT_FOUND,
"message":"Todo not found"
}, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
return Response({
"status":status.HTTP_500_INTERNAL_SERVER_ERROR,
"message":str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
from doctest import run_docstring_examples
import string
import re
from geo import Position
class Tweet:
def __init__(self, text, time, lat, lon):
self.__text = text
self.__time = time
self.__lat = lat
self.__lon = lon
def get_words(self):
"""Return the words in a tweet, not including punctuation.
"""
#replace every none letter with a space and then create a string
# with only lowercase words
return re.sub("[^a-zA-Z]", " ", string).lower().split()
def get_text(self):
"""Return the text of the tweet."""
return self.__text
def get_time(self):
"""Return the datetime that represents when the tweet was posted."""
return self.__time
def get_location(self):
"""Return a position (see geo.py) that represents the tweet's location."""
return Position(self.__lat,self.__lon)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.get_text() == other.get_text() and
self.get_location() == other.get_location() and
self.get_time() == other.get_time())
else:
return False
def __str__(self):
"""Return a string representing the tweet."""
return '"{0}" @ {1} : {2}'.format(self.get_text(),
self.get_location(),
self.get_time())
def __repr__(self):
"""Return a string representing the tweet."""
return 'Tweet({0}, {1}, {2}, {3})'.format(*map(repr,(self.get_text(),
self.get_time(),
self.get_location(
).latitude(),
self.get_location(
).longitude())))
def get_sentiment(self,word_sentiments):
""" Return a sentiment representing the degree of positive or negative
sentiment in the given tweet, averaging over all the words in the tweet
that have a sentiment value.
"""
wordList = self.get_words
summarize = 0
index = 0
#serch for match word and if found, summarize to get average in the
# end. if not found return None
for word in wordList:
if word in word_sentiments:
summarize += word_sentiments[word]
index += 1
if index == 0:
return
return summarize / index
|
#Make text file with pymol commands
def main():
outFile = 'pymol_commands.txt'
crowe_mabs = ['2050', '2082', '2094', '2096', '2130', '2165', '2196', '2479', '2499', '2677', '2832']
antibodies = ['CR3022']
# which set of data to show, and how to color surfaces.
metric = 'max' # `max` looks way better than `total`
color_min = 'white'
color_max = 'red'
# dictionary of views here:
views = {'view1': """\nset_view (\
0.179795116, -0.585253060, 0.790621936,\
-0.975049794, 0.000042952, 0.221752375,\
-0.129811451, -0.810812414, -0.570666671,\
0.002255535, 0.000589155, -237.384719849,\
-32.869243622, 26.272405624, 17.597675323,\
-55707.390625000, 56183.121093750, -20.000000000 )\n""",
}
# save png files of views?
save_png = False
# make full list of antibodies
for mab in crowe_mabs:
ab = 'COV2-'+mab
antibodies.append('COV2-'+mab)
# begin writing to output file
f = open(outFile, "w")
f.write('#commands to load pdbs for antibody mapping\n\n')
# set sequence view to off
f.write('set seq_view, 0\n\n')
# load pdb file for every antibody
for ab in antibodies:
f.write(f'load ../results/pdb_outputs/{ab}_400_6m0j_{metric}_escape.pdb\n')
# rename structure for every antibody
f.write('\n')
for ab in antibodies:
f.write(f'set_name {ab}_400_6m0j_{metric}_escape, {ab}_{metric}\n')
# one-time commands:
f.write('\nhide all\n')
f.write(f'create ACE2, {antibodies[0]}_{metric} and chain A\n')
# remove chain A (ACE2) from each structure.
for ab in antibodies:
f.write(f'remove {ab}_{metric} and chain A\n')
# show ACE2 as gray cartoon
f.write('\nshow cartoon, ACE2; color gray20, ACE2; set cartoon_transparency, 0.5, ACE2\n\n')
# color surface representation by b-factor (which has been recoded to {metric}_escape)
for ab in antibodies:
f.write(f'show surface, {ab}_{metric}; show sticks, {ab}_{metric} and resn NAG; spectrum b, {color_min} {color_max}, {ab}_{metric}, minimum=0\n')
# show each as surface
f.write('\nhide all\n')
for ab in antibodies:
f.write(f'show surface, {ab}_{metric}\n')
# change coloring of RBD_bind and RBD_express to blue white (where blue is negative)
f.write(f'\nshow surface, RBD_bind; spectrum b, blue white, RBD_bind, minimum=-2, maximum=0; show sticks, RBD_bind and resn NAG')
f.write(f'\nshow surface, RBD_expr; spectrum b, blue white, RBD_expr, minimum=-2, maximum=0; show sticks, RBD_expr and resn NAG\n')
# iterate through each view we define above, get view, show as surface, take picture
for view_n in views.keys():
f.write(f'{views[view_n]}\n') # set one view you like
# now get pictures of each
for ab in antibodies:
if save_png:
f.write('\nhide all\n')
f.write(f'show surface, {ab}_{metric}\n')
if save_png:
f.write(f'png ../structural_views/{ab}_{metric}_{view_n}.png, ray=1, 600, 600\n')
if not save_png:
f.write('\nsave surface_escape.pse')
break
f.close()
main()
|
# -*- coding: utf8 -*-
import os
import gen_glyph
SN_ENSO_SIZE = 28
SN_ENSO_HEIGHT = 35
SN_ENSO_WIDTH = 560
SN_TITLE_SIZE = 29
SN_TITLE_HEIGHT = 280
SN_TITLE_WIDTH = 48
SN_SUBTITLE_SIZE = 22
SN_SUBTITLE_HEIGHT = 280
SN_SUBTITLE_WIDTH = 48
BG_COLOR = 0x00000000
TXT_COLOR = 0xFFFFFFFF
# For test
BG_COLOR = 0xEEEEEE00
TXT_COLOR = 0x000000FF
FONT = "DFKTLB.TTC"
EXE_CONVERT = "convert.exe"
EXE_COMPOSITE = "composite.exe"
ROTATE_CHARACTER = set(u"「」~()ー-~-∞—()《》{}<>『』[]…")
def gen_song_name_non_select(title, subtitle, out_folder):
vconvert2(os.path.join(out_folder, "sn_non_select.png"), txt=title,
size=(SN_TITLE_WIDTH, SN_TITLE_HEIGHT), font=FONT,
bgcolor=BG_COLOR, txt_color=TXT_COLOR, vspacing=-5, align="North",
font_size=SN_TITLE_SIZE)
def gen_song_name_select_short(title, subtitle, out_folder):
vconvert2(os.path.join(out_folder, "sn_select_short.png"), txt=title,
size=(80, SN_TITLE_HEIGHT), font=FONT,
bgcolor=BG_COLOR, txt_color=TXT_COLOR, vspacing=-5, align="North",
font_size=SN_TITLE_SIZE)
def gen_song_name_select_full(title, subtitle, out_folder):
if not subtitle:
vconvert2(os.path.join(out_folder, "sn_select_full.png"), txt=title,
size=(80, SN_TITLE_HEIGHT), font=FONT,
bgcolor=BG_COLOR, txt_color=TXT_COLOR, vspacing=-5, align="North",
font_size=SN_TITLE_SIZE)
else:
vconvert2(os.path.join(out_folder, "sn_subtitle.png"), txt=subtitle,
size=(48, SN_SUBTITLE_HEIGHT), font=FONT,
bgcolor=BG_COLOR, txt_color=TXT_COLOR, vspacing=-4, align="South",
font_size=SN_SUBTITLE_SIZE)
os.system("%s -size 80x280 xc:#%08x \"%s\"" % (EXE_CONVERT, BG_COLOR, os.path.join(out_folder, "sn_select_full.png")))
os.system("%s -gravity Southwest \"%s\" \"%s\" \"%s\"" % (
EXE_COMPOSITE,
os.path.join(out_folder, "sn_subtitle.png"),
os.path.join(out_folder, "sn_select_full.png"),
os.path.join(out_folder, "sn_select_full.png")
))
os.system("%s -gravity Northeast \"%s\" \"%s\" \"%s\"" % (
EXE_COMPOSITE,
os.path.join(out_folder, "sn_non_select.png"),
os.path.join(out_folder, "sn_select_full.png"),
os.path.join(out_folder, "sn_select_full.png")
))
os.system("del \"%s\"" % os.path.join(out_folder, "sn_subtitle.png"))
#os.system("del %s" % os.path.join(out_folder, "sn_subtitle.png"))
def gen_song_name_enso(title, subtitle, out_folder):
hconvert2(os.path.join(out_folder, "sn_game.png"), txt=title,
size=(SN_ENSO_WIDTH, SN_ENSO_HEIGHT), font=FONT,
bgcolor=BG_COLOR, txt_color=TXT_COLOR, hspacing=-3, align="East",
font_size=SN_ENSO_SIZE)
def gen_song_name_texture(title, subtitle, out_folder, font="DFKTLB.TTC"):
global FONT
FONT = font
gen_song_name_enso(title, subtitle, out_folder)
gen_song_name_non_select(title, subtitle, out_folder)
gen_song_name_select_short(title, subtitle, out_folder)
gen_song_name_select_full(title, subtitle, out_folder)
def gen_glyphs_from_text(text, size):
ret = []
for token_idx, ch in enumerate(text):
if not ch.isspace():
left, top, w, h = gen_glyph.gen1(FONT, size, ch, "token%d.png" % token_idx)
ret.append((w, h))
else:
ret.append((0, 0))
return ret
def gen_glyphs_from_text_detailed(text, size):
ret = []
for token_idx, ch in enumerate(text):
if not ch.isspace():
left, top, w, h = gen_glyph.gen1(FONT, size, ch, "token%d.png" % token_idx)
ret.append((left, top, w, h))
else:
ret.append((0, 0, 0, 0))
return ret
# vconvert version2
# using freetype generated glyph(without extra space) to build a good vertical text
def vconvert2(out_path, txt=u"Test", size=None, font=None, bgcolor=None, txt_color=None, hspacing=None, vspacing=None, align=None, font_size=None, rotate=0):
# gen character pic
glyph_sizes = gen_glyphs_from_text(txt, font_size)
# handle special character
for token_idx, ch in enumerate(txt):
if glyph_sizes[token_idx] == (0, 0): # increase a small gap
os.system("%s -size %dx%d xc:#%08x token%d.png" % (EXE_CONVERT, size[0], 6, bgcolor, token_idx))
glyph_sizes[token_idx] = (size[0], 6)
continue
if ch in ROTATE_CHARACTER: # rotate if needed
os.system("%s -rotate 90 token%d.png token%d.png" % (EXE_CONVERT, token_idx, token_idx))
glyph_sizes[token_idx] = (glyph_sizes[token_idx][1], glyph_sizes[token_idx][0])
continue
# join all tokens
gravity = align or "North"
os.system("%s -gravity %s -background #%08x -append %s \"%s\"" % (EXE_CONVERT, gravity, bgcolor, " ".join(["token%d.png" % i for i in xrange(len(txt))]), out_path))
#os.system("convert.exe -gravity %s -background #%08x -append %s %s" % (gravity, bgcolor, " ".join(["token%d.png" % i for i in xrange(len(txt))]), "test.png"))
os.system("del token*.png")
# width and height
height = 0
width = 0
for w, h in glyph_sizes:
height += h
width = max(width, w)
#print "size = (%d, %d)" % (width, height)
# scaling carefully
scale = 1.0
scale = min(scale, 1.0 * size[0] / width)
scale = min(scale, 1.0 * size[1] / height)
if scale < 1.0:
os.system("%s \"%s\" -resize %f%% \"%s\"" % (EXE_CONVERT, out_path, scale * 100, out_path))
width = int(width * scale)
height = int(height * scale)
#print "scale = %f" % scale
#print "scaled_size = (%d, %d)" % (width, height)
# Adding border
border_x = abs((size[0] - width) // 2)
border_y = abs((size[1] - height) // 2)
if border_x != 0 or border_y != 0:
os.system("%s \"%s\" -bordercolor #FFFFFF00 -border %dx0 \"%s\"" % (EXE_CONVERT, out_path, border_x, out_path))
os.system("%s \"%s\" -gravity %s -background #FFFFFF00 -extent %dx%d \"%s\"" % (EXE_CONVERT, out_path, align, size[0], size[1], out_path))
def hconvert2(out_path, txt=u"Test", size=None, font=None, bgcolor=None, txt_color=None, hspacing=None, vspacing=None, align=None, font_size=None, rotate=0):
# gen character pic
glyph_metrics = gen_glyphs_from_text_detailed(txt, font_size)
max_top = -1
for token_idx, ch in enumerate(txt):
_left, _top, _w, _h = glyph_metrics[token_idx]
max_top = max(max_top, _top)
# handle special character
for token_idx, ch in enumerate(txt):
_left, _top, _w, _h = glyph_metrics[token_idx]
if (_w, _h) == (0, 0): # increase a small gap
os.system("%s -size %dx%d xc:#%08x token%d.png" % (EXE_CONVERT, 6, size[1], bgcolor, token_idx))
glyph_metrics[token_idx] = (_left, _top, 6, size[1])
continue
else:
padding = max_top - _top
#print "padding %d" % padding
if padding > 0:
os.system("%s token%d.png -background #%08x -gravity South -extent %dx%d token%d.png" % (EXE_CONVERT, token_idx, bgcolor, _w, _h + padding, token_idx))
glyph_metrics[token_idx] = (_left, _top, _w, _h + padding)
# join all tokens
gravity = align or "East"
os.system("%s -gravity %s -background #%08x +append %s \"%s\"" % (EXE_CONVERT, gravity, bgcolor, " ".join(["token%d.png" % i for i in xrange(len(txt))]), out_path))
#os.system("convert.exe -gravity %s -background #%08x -append %s %s" % (gravity, bgcolor, " ".join(["token%d.png" % i for i in xrange(len(txt))]), "test.png"))
os.system("del token*.png")
# width and height
height = 0
width = 0
for left, top, w, h in glyph_metrics:
width += h
height = max(height, w)
#print "size = (%d, %d)" % (width, height)
# scaling carefully
scale = 1.0
scale = min(scale, 1.0 * size[0] / width)
scale = min(scale, 1.0 * size[1] / height)
if scale < 1.0:
os.system("%s \"%s\" -resize %f%% \"%s\"" % (EXE_CONVERT, out_path, scale * 100, out_path))
width = int(width * scale)
height = int(height * scale)
#print "scale = %f" % scale
#print "scaled_size = (%d, %d)" % (width, height)
# Adding border
border_x = abs((size[0] - width) // 2)
border_y = abs((size[1] - height) // 2)
if border_x != 0 or border_y != 0:
os.system("%s \"%s\" -bordercolor #FFFFFF00 -border 0x%d \"%s\"" % (EXE_CONVERT, out_path, border_y, out_path))
os.system("%s \"%s\" -gravity %s -background #FFFFFF00 -extent %dx%d \"%s\"" % (EXE_CONVERT, out_path, align, size[0], size[1], out_path))
if __name__ == '__main__':
#gen_song_name_texture(u"カロン", u"TVCM「LISMO!」より", ".")
#gen_song_name_texture(u"ガツガツ!!", u"「トリコ」より", ".")
#gen_song_name_texture(u"季曲", u"~Seasons Of Asia~", ".")
#gen_song_name_texture(u"蓄勢", u"~Gear Up~", ".")
#gen_song_name_texture(u"蓄勢(裏)", u"~Gear Up~", ".")
#gen_song_name_texture(u"蛻變", u"~Transformation~", ".")
gen_song_name_texture(u"Rotter Tarmination", u" ", ".")
#gen_song_name_texture(u"きがつけば あなた", u"KIRIN「午後の紅茶」CMソング", ".")
#gen_song_name_texture(u"練習曲OP.10-4", u" ", ".")
#vconvert2("test.png", txt=u"TVCM「LISMO!」より",
#size=(80, SN_TITLE_HEIGHT), font=FONT,
#bgcolor=0x00000000, txt_color=TXT_COLOR, vspacing=-5, align="North",
#font_size=SN_TITLE_SIZE)
#gen_glyphs_from_text(u"恋は混沌の隷也")#Seasons of Asia~~")
|
import torch.nn as nn
# import torch.nn.functional as F
from neural.types import TT
class MLP(nn.Module):
"""Multi-layered perceptron (also called feed-forwards network)"""
def __init__(self, idim: int, hdim: int, odim: int):
"""Create a feed-forward network.
Args:
idim: size of the input vector
hdim: size of the hidden vector
odim: size of the output vector
"""
super(MLP, self).__init__()
self.L1 = nn.Linear(idim, hdim)
self.L2 = nn.Linear(hdim, odim)
self.activ = nn.LeakyReLU(inplace=True)
def forward(self, X: TT) -> TT:
# Explicitely check that the dimensions match
assert X.shape[-1] == self.L1.in_features
# Calculate the hidden layer and apply activation
H = self.L1.forward(X)
self.activ(H)
# H = F.leaky_relu(self.L1.forward(X))
return self.L2.forward(H)
|
import pytest
from fastapi.testclient import TestClient
import pandas as pd
from app import app
DATA_PATH = "data/raw/example_for_online_inference.csv"
@pytest.fixture
def client():
with TestClient(app) as client:
yield client
@pytest.fixture
def example_data():
return pd.read_csv(DATA_PATH).to_dict(orient="records")
@pytest.fixture
def example_row(example_data):
return example_data[5]
def test_startup(client):
resp = client.get("/healthcheck")
assert 200 == resp.status_code
def test_root(client):
resp = client.get("/")
assert 200 == resp.status_code
def test_normal_data_return_200_status(example_row, client):
resp = client.post("/predict", json=example_row)
assert 200 == resp.status_code
def test_data_validation_return_400_status_if_bad_range(example_row, client):
example_row['age'] = 234
resp = client.post("/predict", json=example_row)
assert 400 == resp.status_code
def test_data_validation_return_400_status_if_not_binary(example_row, client):
example_row['sex'] = 2
resp = client.post("/predict", json=example_row)
assert 400 == resp.status_code
def test_data_validation_return_400_status_if_bad_category(example_row, client):
example_row['slope'] = 9
resp = client.post("/predict", json=example_row)
assert 400 == resp.status_code
def test_normal_data_return_200_multiple_rows(example_data, client):
for row in example_data:
resp = client.post("/predict", json=row)
assert 200 == resp.status_code
|
# -*- coding: utf-8 -*-
a, b, s = map(int, raw_input().split(' '))
if (abs(a) + abs(b)) <= s and (abs(a) + abs(b) - s) % 2 == 0:
print('YES')
else:
print('NO')
|
#!/usr/bin/env python3
n = int(input())
for i in range(n+1):
if int(i * 1.08) == n:
print(i)
exit()
print(":(") |
from tkinter import *
scree_background_colour = "#363124"
class Quizzgui:
def __init__(self):
self.window = Tk()
self.window.minsize(width=400,height=400)
self.window.config(padx=0,pady=50,bg=scree_background_colour)
self.window.title("Quize Time")
self.canvas = Canvas(width = 250,height=250)
self.canvas.place(x=75,y=0)
# self.photo = PhotoImage(file="finalpic.jpg")
self.button = Button(width =5)
self.button.place(x=50, y=300)
self.window.mainloop()
|
import argparse
import glob
import json
import sys
from urllib.parse import unquote
from http.server import SimpleHTTPRequestHandler, HTTPServer
from http import HTTPStatus
"""
This server is very specific and very basic. Ultimately it tries to solve the
the problem of finding out what files are available and their content
but exposing this via HTTP. This way, one container can talk to another
container about what mail/*.eml files were created and their content.
To hack on this locally, run::
cd mail
python mailfileserver.py
# or...
python mailfileserver.py --bind 0.0.0.0 9898
Now, ask it which *.eml file exist (using httpie)::
http http://localhost:8000/
Or grep for the contents of one of those files:
http --check-status http://localhost:8000/grep/Does%20this%20exist
"""
class MyRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
files = glob.glob("*.eml")
if self.path == "/":
output = json.dumps({"files": files}).encode("utf-8")
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/json;charset=utf-8")
self.send_header("Content-Length", int(len(output)))
self.end_headers()
self.wfile.write(output)
elif self.path.startswith("/grep/"):
grep = unquote(self.path).replace("/grep/", "", 1)
# Search all files for a line containing this string.
finds = []
for file in files:
with open(file) as f:
for i, line in enumerate(f):
if grep in line:
finds.append(f"{i + 1}:{line}")
break
output = json.dumps({
"matches": finds,
"grep": grep,
}).encode("utf-8")
if finds:
self.send_response(HTTPStatus.OK)
else:
self.send_response(HTTPStatus.NOT_FOUND)
self.send_header("Content-Type", "application/json;charset=utf-8")
self.send_header("Content-Length", int(len(output)))
self.end_headers()
self.wfile.write(output)
elif self.path.startswith("/debug"):
all_files = {}
for file in files:
with open(file) as f:
all_files[file] = f.read()
output = json.dumps({"all": all_files}).encode("utf-8")
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/json;charset=utf-8")
self.send_header("Content-Length", int(len(output)))
self.end_headers()
self.wfile.write(output)
else:
super().do_GET()
def run(port, bind, klass=MyRequestHandler, protocol="HTTP/1.0"):
server_address = (bind, port)
klass.protocol_version = protocol
with HTTPServer(server_address, klass) as httpd:
host, port = httpd.socket.getsockname()
print(
f"Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
)
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--bind",
"-b",
default="",
metavar="ADDRESS",
help="Specify alternate bind address " "[default: all interfaces]",
)
parser.add_argument(
"port",
action="store",
default=8000,
type=int,
nargs="?",
help="Specify alternate port [default: 8000]",
)
args = parser.parse_args()
sys.exit(run(args.port, args.bind))
|
import datetime
import pandas as pd
import logging
# Import base class
from .retrieval import Retrieval, get_data_dir, _data_dir_fallback
log = logging.getLogger(__name__)
class JHU(Retrieval):
"""
This class can be used to retrieve and filter the dataset from the online repository of the coronavirus visual dashboard operated
by the `Johns Hopkins University <https://coronavirus.jhu.edu/>`_.
Features
- download all files from the online repository of the coronavirus visual dashboard operated by the Johns Hopkins University.
- filter by deaths, confirmed cases and recovered cases
- filter by country and state
- filter by date
Example
-------
.. code-block::
jhu = cov19.data_retrieval.JHU()
jhu.download_all_available_data()
#Acess the data by
jhu.data
#or
jhu.get_new("confirmed","Italy")
jhu.get_total(filter)
"""
@property
def data(self):
if self.confirmed is None or self.deaths is None or self.recovered is None:
return None
return (self.confirmed, self.deaths, self.recovered)
def __init__(self, auto_download=False):
"""
On init of this class the base Retrieval Class __init__ is called, with jhu specific
arguments.
Parameters
----------
auto_download : bool, optional
Whether or not to automatically call the download_all_available_data() method.
One should explicitly call this method for more configuration options
(default: false)
"""
# ------------------------------------------------------------------------------ #
# Init Retrieval Base Class
# ------------------------------------------------------------------------------ #
"""
A name mainly used for the Local Filename
"""
name = "Jhu"
"""
The url to the main dataset as csv, if none if supplied the fallback routines get used
"""
url_csv = [
"https://raw.githubusercontent.com/Quan-kas/IbagueCovid19/main/IbagueCovid19.csv",
"https://raw.githubusercontent.com/Quan-kas/IbagueCovid19/main/IbagueCovid19.csv",
"https://raw.githubusercontent.com/Quan-kas/IbagueCovid19/main/IbagueCovid19.csv",
]
"""
Kwargs for pandas read csv
"""
kwargs = {} # Surpress warning
"""
Fallbacks
"""
fallbacks = [self._fallback_local_backup]
"""
If the local file is older than the update_interval it gets updated once the
download all function is called. Can be diffent values depending on the parent class
"""
update_interval = datetime.timedelta(days=1)
# Init the retrieval base class
Retrieval.__init__(self, name, url_csv, fallbacks, update_interval, **kwargs)
self.confirmed = None
self.deaths = None
self.recovered = None
if auto_download:
self.download_all_available_data()
def download_all_available_data(self, force_local=False, force_download=False):
"""
Attempts to download from the main urls (self.url_csv) which was set on initialization of
this class.
If this fails it downloads from the fallbacks. It can also be specified to use the local files
or to force the download. The download methods get inhereted from the base retrieval class.
Parameters
----------
force_local : bool, optional
If True forces to load the local files.
force_download : bool, optional
If True forces the download of new files
"""
if force_local and force_download:
raise ValueError("force_local and force_download cant both be True!!")
# ------------------------------------------------------------------------------ #
# 1 Download or get local file
# ------------------------------------------------------------------------------ #
retrieved_local = False
if self._timestamp_local_old(force_local) or force_download:
self._download_helper(**self.kwargs)
else:
retrieved_local = self._local_helper()
# ------------------------------------------------------------------------------ #
# 2 Save local
# ------------------------------------------------------------------------------ #
self._save_to_local() if not retrieved_local else None
# ------------------------------------------------------------------------------ #
# 3 Convert to useable format
# ------------------------------------------------------------------------------ #
self._to_iso()
def _to_iso(self):
"""
Converts the data to a usable format i.e. converts all date string to
datetime objects and some other column names.
This is most of the time the first place one has to look at if something breaks!
self.data -> self.data converted
"""
def helper(df):
try:
df = df.drop(columns=["Lat", "Long"]).rename(
columns={"Province/State": "state", "Country/Region": "country"}
)
df = df.set_index(["country", "state"])
df.columns = pd.to_datetime(df.columns)
except Exception as e:
log.warning(f"There was an error formating the data! {e}")
raise e
return df
self.confirmed = helper(self.confirmed).T
self.deaths = helper(self.deaths).T
self.recovered = helper(self.recovered).T
return True
def get_total_confirmed_deaths_recovered(
self,
country: str = None,
state: str = None,
begin_date: datetime.datetime = None,
end_date: datetime.datetime = None,
):
"""
Retrieves all confirmed, deaths and recovered cases from the Johns Hopkins University dataset as a DataFrame with datetime index.
Can be filtered by country and state, if only a country is given all available states get summed up.
Parameters
----------
country : str, optional
name of the country (the "Country/Region" column), can be None if the whole summed up data is wanted (why would you do this?)
state : str, optional
name of the state (the "Province/State" column), can be None if country is set or the whole summed up data is wanted
begin_date : datetime.datetime, optional
intial date for the returned data, if no value is given the first date in the dataset is used
end_date : datetime.datetime, optional
last date for the returned data, if no value is given the most recent date in the dataset is used
Returns
-------
: pandas.DataFrame
"""
# filter
df = pd.DataFrame(
columns=["date", "confirmed", "deaths", "recovered"]
).set_index("date")
if country is None:
df["confirmed"] = self.confirmed.sum(axis=1, skipna=True)
df["deaths"] = self.deaths.sum(axis=1, skipna=True)
df["recovered"] = self.recovered.sum(axis=1, skipna=True)
else:
if state is None:
df["confirmed"] = self.confirmed[country].sum(axis=1, skipna=True)
df["deaths"] = self.deaths[country].sum(axis=1, skipna=True)
df["recovered"] = self.recovered[country].sum(axis=1, skipna=True)
else:
df["confirmed"] = self.confirmed[(country, state)]
df["deaths"] = self.deaths[(country, state)]
df["recovered"] = self.recovered[(country, state)]
df.index.name = "date"
return self.filter_date(df, begin_date, end_date)
def get_new(
self,
value="confirmed",
country: str = None,
state: str = None,
data_begin: datetime.datetime = None,
data_end: datetime.datetime = None,
):
"""
Retrieves all new cases from the Johns Hopkins University dataset as a DataFrame with datetime index.
Can be filtered by value, country and state, if only a country is given all available states get summed up.
Parameters
----------
value: str
Which data to return, possible values are
- "confirmed",
- "recovered",
- "deaths"
(default: "confirmed")
country : str, optional
name of the country (the "Country/Region" column), can be None
state : str, optional
name of the state (the "Province/State" column), can be None
begin_date : datetime.datetime, optional
intial date for the returned data, if no value is given the first date in the dataset is used
end_date : datetime.datetime, optional
last date for the returned data, if no value is given the most recent date in the dataset is used
Returns
-------
: pandas.DataFrame
table with new cases and the date as index
"""
# ------------------------------------------------------------------------------ #
# Default Parameters
# ------------------------------------------------------------------------------ #
if value not in ["confirmed", "recovered", "deaths"]:
raise ValueError(
'Invalid value. Valid options: "confirmed", "deaths", "recovered"'
)
if self.data is None:
self.download_all_available_data()
if country == "None":
country = None
if state == "None":
state = None
# If no date is given set to first and last dates in data
if data_begin is None:
data_begin = self.__get_first_date() + datetime.timedelta(days=1)
if data_end is None:
data_end = self.__get_last_date()
if data_begin == self.data[0].index[0]:
raise ValueError("Date has to be after the first dataset entry")
# ------------------------------------------------------------------------------ #
# Retrieve data and filter it
# ------------------------------------------------------------------------------ #
df = pd.DataFrame(columns=["date", value]).set_index("date")
if country is None:
df[value] = orig.sum(axis=1, skipna=True)
else:
if state is None:
df[value] = getattr(self, value)[country].sum(axis=1, skipna=True)
else:
df[value] = getattr(self, value)[(country, state)]
df.index.name = "date"
df = self.filter_date(df, data_begin - datetime.timedelta(days=1), data_end)
df = (
df.diff().drop(df.index[0]).astype(int)
) # Neat oneliner to also drop the first row and set the type back to int
return df[value]
def get_total(
self,
value="confirmed",
country: str = None,
state: str = None,
data_begin: datetime.datetime = None,
data_end: datetime.datetime = None,
):
"""
Retrieves all total/cumulative cases from the Johns Hopkins University dataset as a DataFrame with datetime index.
Can be filtered by value, country and state, if only a country is given all available states get summed up.
Parameters
----------
value: str
Which data to return, possible values are
- "confirmed",
- "recovered",
- "deaths"
(default: "confirmed")
country : str, optional
name of the country (the "Country/Region" column), can be None
state : str, optional
name of the state (the "Province/State" column), can be None
begin_date : datetime.datetime, optional
intial date for the returned data, if no value is given the first date in the dataset is used
end_date : datetime.datetime, optional
last date for the returned data, if no value is given the most recent date in the dataset is used
Returns
-------
: pandas.DataFrame
table with total/cumulative cases and the date as index
"""
# ------------------------------------------------------------------------------ #
# Default Parameters
# ------------------------------------------------------------------------------ #
if value not in ["confirmed", "recovered", "deaths"]:
raise ValueError(
'Invalid value. Valid options: "confirmed", "deaths", "recovered"'
)
if self.data is None:
self.download_all_available_data()
if country == "None":
country = None
if state == "None":
state = None
# Note: It should be fine to NOT check for the date since this is also done by the filter_date method
# ------------------------------------------------------------------------------ #
# Retrieve data and filter it
# ------------------------------------------------------------------------------ #
df = pd.DataFrame(columns=["date", value]).set_index("date")
orig = getattr(self, value)
if country is None:
df[value] = getattr(self, value).sum(axis=1, skipna=True)
else:
if state is None:
df[value] = getattr(self, value)[country].sum(axis=1, skipna=True)
else:
df[value] = getattr(self, value)[(country, state)]
df.index.name = "date"
df = self.filter_date(df, data_begin, data_end)
return df[value]
def filter_date(
self,
df,
begin_date: datetime.datetime = None,
end_date: datetime.datetime = None,
):
"""
Returns give dataframe between begin and end date. Dataframe has to have a datetime index.
Parameters
----------
begin_date : datetime.datetime, optional
First day that should be filtered
end_date : datetime.datetime, optional
Last day that should be filtered
Returns
-------
: pandas.DataFrame
"""
if begin_date is None:
begin_date = self.__get_first_date()
if end_date is None:
end_date = self.__get_last_date()
if not isinstance(begin_date, datetime.datetime) and isinstance(
end_date, datetime.datetime
):
raise ValueError(
"Invalid begin_date, end_date: has to be datetime.datetime object"
)
return df[begin_date:end_date]
def __get_first_date(self):
return self.data[0].index[0]
def __get_last_date(self):
return self.data[0].index[-1]
def get_possible_countries_states(self):
"""
Can be used to get a list with all possible states and coutries.
Returns
-------
: pandas.DataFrame in the format
"""
all_entrys = (
list(self.confirmed.columns)
+ list(self.deaths.columns)
+ list(self.recovered.columns)
)
df = pd.DataFrame(all_entrys, columns=["country", "state"])
return df
# ------------------------------------------------------------------------------ #
# Helper methods, overload from the base class
# ------------------------------------------------------------------------------ #
def _download_helper(self, **kwargs):
"""
Overloads the method method from the Base Retrival class
"""
try:
# Try to download from original souce
self._download_csvs_from_source(self.url_csv, **kwargs)
except Exception as e:
# Try all fallbacks
log.info(f"Failed to download from url {self.url_csv} : {e}")
self._fallback_handler()
finally:
# We save it to the local files
# self.data._save_to_local()
log.info(f"Successfully downloaded new files.")
def _local_helper(self):
"""
Overloads the method method from the Base Retrival class
"""
try:
self._download_csvs_from_source(
[
get_data_dir() + self.name + "_confirmed" + ".csv.gz",
get_data_dir() + self.name + "_deaths" + ".csv.gz",
get_data_dir() + self.name + "_recovered" + ".csv.gz",
],
**self.kwargs,
)
log.info(f"Successfully loaded data from local")
return True
except Exception as e:
log.info(f"Failed to load local files! {e} Trying fallbacks!")
self.download_helper(**self.kwargs)
return False
def _save_to_local(self):
"""
Overloads the method method from the Base Retrival class
"""
filepaths = [
get_data_dir() + self.name + "_confirmed" + ".csv.gz",
get_data_dir() + self.name + "_deaths" + ".csv.gz",
get_data_dir() + self.name + "_recovered" + ".csv.gz",
]
try:
self.confirmed.to_csv(filepaths[0], compression="infer", index=False)
self.deaths.to_csv(filepaths[1], compression="infer", index=False)
self.recovered.to_csv(filepaths[2], compression="infer", index=False)
self._create_timestamp()
log.info(f"Local backup to {filepaths} successful.")
return True
except Exception as e:
log.warning(f"Could not create local backup {e}")
raise e
return False
def _download_csvs_from_source(self, filepaths, **kwargs):
self.confirmed = pd.read_csv(filepaths[0], **kwargs)
self.deaths = pd.read_csv(filepaths[1], **kwargs)
self.recovered = pd.read_csv(filepaths[2], **kwargs)
def _fallback_local_backup(self):
path_confirmed = (
_data_dir_fallback
+ "/"
+ self.name
+ "_confirmed"
+ "_fallback"
+ ".csv.gz"
)
path_deaths = (
_data_dir_fallback + "/" + self.name + "_deaths" + "_fallback" + ".csv.gz"
)
path_recovered = (
_data_dir_fallback
+ "/"
+ self.name
+ "_recovered"
+ "_fallback"
+ ".csv.gz"
)
self.confirmed = pd.read_csv(path_confirmed, **self.kwargs)
self.deaths = pd.read_csv(path_deaths, **self.kwargs)
self.recovered = pd.read_csv(path_recovered, **self.kwargs)
|
# -*- coding: utf-8 -*-
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from urlparse import urlparse
from urllib import unquote
from lego_process import LegoProcess
from os.path import basename, splitext
import re
# apply the post-order procedure
# Leaf classes
class List2Scalar(LegoProcess):
def __init__(self): pass
def process(self, value):
if type(value) is list:
return value[0]
return None
# Intermediate classes
class MethodAccessTransform(LegoProcess):
def __init__(self, process_obj = None):
super(MethodAccessTransform, self).__init__(process_obj)
self.__mapping = {
"put": "up",
"filepatch": "up",
"post": "up",
"get": "down",
"delta": "down"
}
def process(self, value):
list_value = self.next(value)
if list_value is None: return None
method, uri = list_value
# if access_path.startswith('/api/v1/versions/'):
# access_path = '/'.join(access_path.split('/')[:-1])
access_path = uri.path
access_path = access_path.strip()
if access_path.startswith('/api/v1/versions'):
access_path = '/'.join(access_path.split('/')[:-1])
if access_path.startswith('/api/v2/preview/'):
access_path = '/'.join(access_path.split('/')[5:])
if access_path.startswith('/api/v'):
access_path = '/'.join(access_path.split('/')[4:])
elif access_path.startswith('/dav/'):
access_path = '/'.join(access_path.split('/')[2:])
trans_access = access_path.lower()
if trans_access == "" or trans_access[-1] == "/":
trans_access = None
trans_method = self.__mapping.get(method.lower(), None)
if trans_access and trans_method:
return [trans_method, trans_access]
return None
class ColumnFilter(LegoProcess):
def __init__(self, filter_range, process_obj=None):
super(ColumnFilter, self).__init__(process_obj)
self.__slice = slice(*filter_range)
def process(self, value):
list_value = self.next(value)
if list_value is None: return None
return list_value[self.__slice]
class RawRequestFilter(LegoProcess):
def __init__(self,process_obj = None):
super(RawRequestFilter, self).__init__(process_obj)
def process(self, value):
request = self.next(value)
if request is None: return None
request = request.strip('"')
method, uri, scheme = request.split()
method = method.lower()
uri = unquote(str(uri))
uri = urlparse(uri)
if uri.path.startswith('/api/v1/versions'):
pass
if method == 'get':
if uri.path.startswith('/dav/'):
pass
elif re.match(r"/api/v1/[a-zA-Z]{4,5}/", uri.path):
if "perPage" in uri.query:
uri = None
else:
pass
elif re.match(r"/api/v2/[a-zA-Z]{3,5}/", uri.path):
if uri.path.startswith('/api/v2/url/') or \
uri.path.startswith('/api/v2/multi') or \
uri.path.startswith('/api/v2/user/'):
uri = None
else:
pass
elif uri.path.startswith('/api/v2/preview/'):
pass
else:
uri = None
if uri:
return [method, uri, scheme]
return None
class CleanAccessPath(LegoProcess):
def __init__(self,process_obj = None):
super(CleanAccessPath, self).__init__(process_obj)
def process(self, value):
list_value = self.next(value)
if list_value is None: return None
url = list_value[0]
access_path = url.path
access_path = access_path.strip()
if access_path.startswith('/api/v1/versions'):
access_path = '/'.join(access_path.split('/')[:-1])
if access_path.startswith('/api/v2/preview/'):
access_path = '/'.join(access_path.split('/')[5:])
if access_path.startswith('/api/v'):
access_path = '/'.join(access_path.split('/')[4:])
elif access_path.startswith('/dav/'):
access_path = '/'.join(access_path.split('/')[2:])
if access_path == "" or access_path[-1] == "/": return None
return access_path
class Tokenizer(LegoProcess):
def __init__(self, process_obj = None, minlen = 1):
super(Tokenizer, self).__init__(process_obj)
self.__minlen = minlen
def process(self, value):
sentence = self.next(value)
if sentence is None: return None
tokenizer = RegexpTokenizer(r'[a-zA-Z0-9\x80-\xFF]{%d,}' % self.__minlen)
words = tokenizer.tokenize(sentence)
sep_uni_asc = ""
for word in words:
sep_uni_asc += word[0]
for i in range(1, len(word)):
if (ord(word[i]) - 127) * (ord(word[i-1]) - 127) < 0:
sep_uni_asc += ",%s" % word[i]
else:
sep_uni_asc += word[i]
sep_uni_asc += ","
words = sep_uni_asc.split(',')[:-1]
# to lower case
return map(lambda w: str.lower(w), words)
class RemoveStopWord(LegoProcess):
def __init__(self, process_obj = None):
super(RemoveStopWord, self).__init__(process_obj)
def process(self, value):
word_list = self.next(value)
if word_list is None: return None
stop_words = set(stopwords.words('english'))
return filter(lambda w: not w in stop_words, word_list)
#TODO stemming
class Stemming(object):
def __init__(self, process_obj):
self.__process_obj = process_obj
def process(self, value): raise NotImplementedError
#TODO lemmatization
class Lemmatization(object):
def __init__(self, process_obj):
self.__process_obj = process_obj
def process(self, value): raise NotImplementedError
|
"""
Build a CBOW word2vec model
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import nltk
from nltk.corpus import stopwords
STOPWORDS = stopwords.words('english')
from nltk.stem import WordNetLemmatizer
LEMMATIZER = WordNetLemmatizer()
import os
import sys
import json
sys.path.append(os.path.join('..', 'practice_qcf', 'analysis'))
from word_embed import clean_tokens
#######################################
## Vocabulary and word freqs
#######################################
with open(os.path.join('D:\\qcf_nlp\\practice_qcf\\analysis', 'dictionary.json'), 'r') as f:
vocabulary = json.load(f)
inverse_vocabulary = {v: k for k, v in vocabulary.items()}
with open(os.path.join('D:\\qcf_nlp\\practice_qcf\\analysis', 'word_freqs.json'), 'r') as f:
word_freqs = json.load(f)
def vocab_index(word):
""" Return index of word in vocabulary """
try:
return vocabulary[word]
except KeyError:
return vocabulary['<UNK>'] # word not in vocabulary
def index_to_word(i):
""" Return word corresponding to index in vocabulary """
return inverse_vocabulary[i]
def cw_to_ci(cw):
""" Map contextWord to contextIndices
cw format: (list(str), str)
ci format: (list(int), int)
"""
return (
[vocab_index(t) for t in cw[0]], # transform list of context words
vocab_index(cw[1]), # transform target word
)
def ci_to_cw(ci):
""" Inverse of cw_to_ci """
return (
[index_to_word(i) for i in ci[0]],
index_to_word(ci[1]),
)
#######################################
## CBOW_Model class
#######################################
class CBOW_Model(nn.Module):
""" CBOW word2vec model """
def __init__(self, V, E, CW):
"""
Arguments:
V (int): vocabulary size
E (int): embedding dimension
CW (int): context window size (e.g. 5 <=> 2 words before and after target word)
"""
super(CBOW_Model, self).__init__()
self.V = V
self.E = E
self.CW = CW
self.embed = nn.Embedding(V, E)
self.linear = nn.Linear(E, V)
def forward(self, inputs):
"""
Arguments:
inputs (np.Array (N, CW-1)): indices of context words
Returns:
(np.Array (N, V)): scores for words in vocabulary (not softmaxed)
"""
embeds = self.embed(torch.LongTensor(inputs))
avgEmbeds = torch.mean(embeds, dim=1)
out = self.linear(avgEmbeds)
return out
#######################################
## Trial run
#######################################
def make_sample_CBOW_dataset(fname='93410_0000093410-12-000003_10-Q_output.txt'):
""" Create a sample dataset from a single document """
# Open, clean, tokenize an example document
with open(
os.path.join('D:\\qcf_nlp\\practice_qcf\\data\\relevant_text', fname),
encoding='utf-8'
) as f:
exText = f.read()
exTokens = clean_tokens(exText)
# Construct CBOW training data
# Note: hard-coded context window of 5 here
contextWords = [
([exTokens[i], exTokens[i+1], exTokens[i+3], exTokens[i+4]], exTokens[i+2])
for i in range(len(exTokens) - 4)
]
# Map tokens to indices in vocabulary
contextIndices = [cw_to_ci(cw) for cw in contextWords]
# Convert to numpy arrays
contextArr = np.array([ci[0] for ci in contextIndices])
targetArr = np.array([ci[1] for ci in contextIndices])
return contextArr, targetArr
if __name__ == '__main__':
pass |
from django.contrib import admin
# Register your models here.
from payment.models import Invoice, PaymentType
@admin.register(Invoice)
class InvoiceAdmin(admin.ModelAdmin):
list_display = ('PayerID', 'Data', 'Rate', 'quantity', 'Created_date', 'Evidence', 'display_payment')
list_filter = ['Created_date', 'PayerID']
@admin.register(PaymentType)
class PaymentTypeAdmin(admin.ModelAdmin):
pass
|
#coding=utf-8
import random
import itertools as it
from manimlib.constants import *
from manimlib.mobject.types.vectorized_mobject import VMobject, VGroup
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.mobject.geometry import Rectangle, Square, Annulus, RegularPolygon
from manimlib.once_useful_constructs.fractals import fractalify
from manimlib.utils.space_ops import get_norm
# self.skip_animations
# self.force_skipping()
# self.revert_to_original_skipping_status()
## Some handmade control buttons
class Button(VMobject):
CONFIG = {
"color" : YELLOW,
"inner_radius" : 2,
"outer_radius" : 2.5,
}
def generate_points(self):
self.ring = Annulus(
inner_radius = self.inner_radius,
outer_radius = self.outer_radius,
fill_color = self.color
)
self.symbol = self.generate_symbol()
self.add(VGroup(self.ring, self.symbol))
def generate_symbol(self):
# raise Exception("Not Implemented")
return VMobject()
def get_ring(self):
return self.ring
def get_symbol(self):
return self.symbol
class PauseButton(Button):
def generate_symbol(self):
symbol = VGroup(*[
Rectangle(
length = 2, width = 0.5, stroke_width = 0,
fill_color = self.color, fill_opacity = 1,
)
for i in range(2)
])
symbol.arrange_submobjects(RIGHT, buff = 0.5)
symbol.set_height(self.inner_radius)
return symbol
class PlayButton(Button):
def generate_symbol(self):
symbol = RegularPolygon(
n = 3, stroke_width = 0,
fill_color = self.color, fill_opacity = 1,
)
symbol.set_height(self.inner_radius)
return symbol
class SkipButton(Button):
def generate_symbol(self):
symbol = VGroup(*[
RegularPolygon(
n = 3, stroke_width = 0,
fill_color = self.color, fill_opacity = 1
)
for i in range(2)
])
symbol.arrange_submobjects(RIGHT, buff = 0)
symbol.set_height(self.inner_radius * 0.7)
return symbol
class RewindButton(Button):
def generate_symbol(self):
symbol = VGroup(*[
RegularPolygon(
n = 3, stroke_width = 0, start_angle = np.pi,
fill_color = self.color, fill_opacity = 1
)
for i in range(2)
])
symbol.arrange_submobjects(RIGHT, buff = 0)
symbol.set_height(self.inner_radius * 0.7)
return symbol
class StopButton(Button):
def generate_symbol(self):
symbol = Square(stroke_width = 0, fill_color = self.color, fill_opacity = 1)
symbol.set_height(self.inner_radius * 0.8)
return symbol
class TickButton(Button):
CONFIG = {
"color" : GREEN,
}
def generate_symbol(self):
symbol = TexMobject("\\checkmark")
symbol.set_color(self.color)
symbol.set_height(self.inner_radius)
return symbol
## Danger Sign
class HollowTriangle(VMobject):
CONFIG = {
"inner_height" : 3.5,
"outer_height" : 5,
"color" : RED,
"fill_opacity" : 1,
"stroke_width" : 0,
"mark_paths_closed" : False,
"propagate_style_to_family" : True,
}
def generate_points(self):
self.points = []
inner_tri = RegularPolygon(n = 3, start_angle = np.pi/2)
outer_tri = RegularPolygon(n = 3, start_angle = np.pi/2)
inner_tri.flip()
inner_tri.set_height(self.inner_height, about_point = ORIGIN)
outer_tri.set_height(self.outer_height, about_point = ORIGIN)
self.points = outer_tri.points
self.add_subpath(inner_tri.points)
class DangerSign(VMobject):
CONFIG = {
"color" : RED,
"triangle_config" : {},
}
def generate_points(self):
hollow_tri = HollowTriangle(**self.triangle_config)
bang = TexMobject("!")
bang.set_height(hollow_tri.inner_height * 0.7)
bang.move_to(hollow_tri.get_center_of_mass())
self.add(hollow_tri, bang)
self.set_color(self.color)
## QED symbols
# Used when try to claim something proved/"proved".
class QEDSymbol(VMobject):
CONFIG = {
"color" : WHITE,
"height" : 0.5,
}
def generate_points(self):
qed = Square(fill_color = self.color, fill_opacity = 1, stroke_width = 0)
self.add(qed)
self.set_height(self.height)
class FakeQEDSymbol(VMobject):
CONFIG = {
"jagged_percentage" : 0.02,
"order" : 1,
"qed_config" : {},
}
def generate_points(self):
fake_qed = fractalify(
QEDSymbol(**self.qed_config),
order = self.order, dimension = 1+self.jagged_percentage
)
self.add(fake_qed)
## Sudoku board
# A byproduct during the experiment. Not finalized.
class SudokuBoard(VGroup):
CONFIG = {
"n" : 3,
"height" : 6,
}
def generate_points(self):
n = self.n
self.small_squares = VGroup(*[
Square(side_length = 1, stroke_color = GREY, stroke_width = 3)
for k in range(n**4)
])
self.small_squares.arrange_submobjects_in_grid(n**2, n**2, buff = 0)
self.big_squares = VGroup(*[
Square(side_length = self.n, stroke_color = WHITE, stroke_width = 8)
for k in range(n**2)
])
self.big_squares.arrange_submobjects_in_grid(n, n, buff = 0)
self.entries = dict()
self.add(self.small_squares, self.big_squares)
self.center()
self.set_height(self.height)
def add_entry(self, x, y, entry):
# x: row index (up->down, 0->8); y: column index (left->right, 0->8)
text = TexMobject(str(entry))
square = self.get_square(x, y)
fit_mobject_in(text, square)
self.entries[(x, y)] = text
self.add(text)
def add_entries(self, entries):
for (x, y), entry in entries:
self.add_entry(x, y, entry)
def get_square(self, x, y):
square_index = (self.n**2) * x + y
return self.small_squares[square_index]
def get_square_critical_point(self, x, y, direction):
square = self.get_square(x, y)
return square.get_critical_point(direction)
def get_entry(self, x, y):
return self.entries[(x, y)]
def get_elimination_arrow(self, x1, y1, x2, y2, direction, **arrow_kwargs):
nudge = self.get_square(0, 0).get_height() * 0.05 * (-direction)
start_point = self.get_square_critical_point(x1, y1, direction) + nudge
end_point = self.get_square_critical_point(x2, y2, direction) + nudge
arrow = Arrow(start_point, end_point, buff = 0, **arrow_kwargs)
return arrow
|
import time
import sys
from Report import Report
if __name__ == '__main__':
rpt = Report()
if len(sys.argv) < 2:
print("Usage: PATH_TO_MAIN.PY PATH_TO_DIRECTORY")
else:
print("Reading the databases...", file=sys.stderr)
before = time.time()
areaTitlesDict = {}
areaTitlesObj = open(str(sys.argv[1] + "/area_titles.csv"))
areaTitlesObj.readline()
for line in areaTitlesObj:
line = line.strip()
line = line.strip("\"")
fips, title = line.split("\",\"")
if fips.isnumeric() and not fips.endswith("000"):
areaTitlesDict[fips] = title
dataObj = open(str(sys.argv[1] + "/2019.annual.singlefile.csv"))
allNumAreas = 0
allTotWages = 0
allMaxWage = 0
allMaxWageArea = ""
allTotEstab = 0
allMaxEstab = 0
allMaxEstabArea = ""
allTotEmp = 0
allMaxEmp = 0
allMaxEmpArea = ""
softNumAreas = 0
softTotWages = 0
softMaxWage = 0
softMaxWageArea = ""
softTotEstab = 0
softMaxEstab = 0
softMaxEstabArea = ""
softTotEmp = 0
softMaxEmp = 0
softMaxEmpArea = ""
for line in dataObj:
lineList = line.split(",", maxsplit=11)
lineList.remove(lineList[11])
for i in range(len(lineList)):
lineList[i] = lineList[i].strip("\"")
if lineList[0] in areaTitlesDict:
# For all industries
if lineList[1] == "0" and lineList[2] == "10":
allNumAreas += 1
allTotEstab += int(lineList[8])
allTotEmp += int(lineList[9])
allTotWages += int(lineList[10])
# Update max values for all industry
if int(lineList[8]) > allMaxEstab:
allMaxEstab = int(lineList[8])
allMaxEstabArea = areaTitlesDict[lineList[0]]
if int(lineList[9]) > allMaxEmp:
allMaxEmp = int(lineList[9])
allMaxEmpArea = areaTitlesDict[lineList[0]]
if int(lineList[10]) > allMaxWage:
allMaxWage = int(lineList[10])
allMaxWageArea = areaTitlesDict[lineList[0]]
# For software industry only
if lineList[1] == "5" and lineList[2] == "5112":
softNumAreas += 1
softTotEstab += int(lineList[8])
softTotEmp += int(lineList[9])
softTotWages += int(lineList[10])
# Update max values for software industry
if int(lineList[8]) > softMaxEstab:
softMaxEstab = int(lineList[8])
softMaxEstabArea = areaTitlesDict[lineList[0]]
if int(lineList[9]) > softMaxEmp:
softMaxEmp = int(lineList[9])
softMaxEmpArea = areaTitlesDict[lineList[0]]
if int(lineList[10]) > softMaxWage:
softMaxWage = int(lineList[10])
softMaxWageArea = areaTitlesDict[lineList[0]]
after = time.time()
print(f"Done in {after - before:.3f} seconds!", file=sys.stderr)
rpt.all.num_areas = allNumAreas
rpt.all.total_annual_wages = allTotWages
rpt.all.max_annual_wage = (allMaxWageArea, allMaxWage)
rpt.all.total_estab = allTotEstab
rpt.all.max_estab = (allMaxEstabArea, allMaxEstab)
rpt.all.total_empl = allTotEmp
rpt.all.max_empl = (allMaxEmpArea, allMaxEmp)
rpt.soft.num_areas = softNumAreas
rpt.soft.total_annual_wages = softTotWages
rpt.soft.max_annual_wage = (softMaxWageArea, softMaxWage)
rpt.soft.total_estab = softTotEstab
rpt.soft.max_estab = (softMaxEstabArea, softMaxEstab)
rpt.soft.total_empl = softTotEmp
rpt.soft.max_empl = (softMaxEmpArea, softMaxEmp)
# Print the completed report
print(rpt)
|
def area1(x,y):
return 0.5*float(x)*float(y)
def area2(a,b,c):
s= (a + b + c) / 2
area = (s * (s - a) * (s - b) * (s - c)) ** 0.5
return area
1
choice = input("What are the dimensions of the triangle which you have:\n"
"1.If you have height and base\n"
"2.If you have all sides\n")
print(choice)
if (choice=='1'):
base= input("Enter the base of triangle:")
print(base)
height= input("Enter the height of triangle:")
print(height)
print("The area of triangle is ",area1(base,height))
elif (choice=='2'):
s1=int(input("Enter side1"))
print(s1)
s2 = int(input("Enter side2"))
print(s2)
s3 = int(input("Enter side3"))
print(s3)
print("The area of triangle is ",area2(s1,s2,s3))
else:
print("You have entered wrong values")
|
# Data Visualization
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
#from constants import *
def plot_path_with_map(path_df, map_path, coordinates_box):
img = plt.imread(map_path)
fig, ax = plt.subplots()
latitudes = path_df.latitude
longitudes = path_df.longitude
numberOfDots = len(path_df.index)
colors = cm.rainbow(np.linspace(0, 1, numberOfDots))
ax.scatter(longitudes, latitudes, color=colors)
ax.set_xlim(coordinates_box[0], coordinates_box[1])
ax.set_ylim(coordinates_box[2], coordinates_box[3])
ax.imshow(img, zorder=0, extent=coordinates_box, aspect= 'equal')
plt.show()
plt.clf()
def plot_path(path_df):
latitudes = path_df.latitude
longitudes = path_df.longitude
numberOfDots = len(path_df.index)
colors = cm.rainbow(np.linspace(0, 1, numberOfDots))
plt.margins(0) # Fit the map in the image
plt.scatter(longitudes, latitudes, color=colors)
#plt.scatter(bus_stop_longitude, bus_stop_latitude, marker='*')
plt.show()
def save_path(path_df, name):
latitudes = path_df.latitude
longitudes = path_df.longitude
numberOfDots = len(path_df.index)
colors = cm.rainbow(np.linspace(0, 1, numberOfDots))
plt.margins(0) # Fit the map in the image
plt.scatter(longitudes, latitudes, color=colors)
#plt.scatter(bus_stop_longitude, bus_stop_latitude, marker='*')
plt.savefig(name)
plt.clf()
def plot_correctness(correctness):
plt.plot(range(len(correctness)), correctness)
plt.ylabel('Porcentagem')
plt.xlabel('# Leituras')
plt.title('Porcentagem de acerto por # de leituras')
plt.grid(True)
plt.show() |
import tensorflow as tf
import sys
from src.main.python_code.utils.nnUtils import activate, softmax_cross_entropy # Activation function for neurons and Loss Function
import tensorflow as tf
def simple_lrp_linear(R, input_tensor, weights, biases=None):
R_shape = R.get_shape().as_list()
if len(R_shape)!=2:
linear = tf.nn.bias_add(tf.matmul(input_tensor, weights), biases)
activations_shape = linear.get_shape().as_list()
# activations_shape = self.activations.get_shape().as_list()
R = tf.reshape(R, activations_shape)
Z = tf.expand_dims(weights, 0) * tf.expand_dims(input_tensor, -1)
Zs = tf.expand_dims(tf.reduce_sum(Z, 1), 1)
if biases:
Zs += tf.expand_dims(tf.expand_dims(biases, 0), 0)
stabilizer = 1e-8*(tf.where(tf.greater_equal(Zs,0), tf.ones_like(Zs, dtype=tf.float32), tf.ones_like(Zs, dtype=tf.float32)*-1))
Zs += stabilizer
return tf.reduce_sum((Z / Zs) * tf.expand_dims(R, 1),2)
# def _simple_lrp(self, R): # R sarebbe il relevance score del Layer considerato
# self.R = R
# # Controllo se il tensore degli score di Relevance ha le stesse dimensioni del tensore di attivazioni (num. di neuroni)
# R_shape = self.R.get_shape().as_list()
# if len(R_shape) != 2:
# activations_shape = self.activations.get_shape().as_list()
# self.R = tf.reshape(self.R, activations_shape)
#
# Z = tf.expand_dims(self.weights, 0) * tf.expand_dims(self.input_tensor,
# -1) # tf.expand_dims aggiunge una dimensione al tensore desiderato (e.g. 0 all'inizio e -1 alla fine)
# Zs = tf.expand_dims(tf.reduce_sum(Z, 1), 1) + tf.expand_dims(tf.expand_dims(self.biases, 0), 0)
# stabilizer = 1e-8 * (
# tf.where(tf.greater_equal(Zs, 0), tf.ones_like(Zs, dtype=tf.float32), tf.ones_like(Zs, dtype=tf.float32) * -1))
# Zs += stabilizer
#
# return tf.reduce_sum((Z / Zs) * tf.expand_dims(self.R, 1),
# 2) # Restituisce i relevance score del Layer precedente (dipendono dai pesi W ma anche dagli attuali score di R)
def epsilon_lrp(R, epsilon, input_tensor, weights, biases=None):
'''
LRP according to Eq(58) in DOI: 10.1371/journal.pone.0130140
'''
Z = tf.expand_dims(weights, 0) * tf.expand_dims(input_tensor, -1)
Zs = tf.expand_dims(tf.reduce_sum(Z, 1), 1)
if biases:
Zs += tf.expand_dims(tf.expand_dims(biases, 0), 0)
Zs += epsilon * tf.where(tf.greater_equal(Zs,0), tf.ones_like(Zs)*-1, tf.ones_like(Zs))
return tf.reduce_sum((Z / Zs) * tf.expand_dims(R, 1),2)
# def _alphabeta_lrp(self,R,alpha):
# '''
# LRP according to Eq(60) in DOI: 10.1371/journal.pone.0130140
# '''
# self.R= R
# beta = 1 - alpha
# Z = tf.expand_dims(self.weights, 0) * tf.expand_dims(self.input_tensor, -1)
#
# if not alpha == 0:
# Zp = tf.where(tf.greater(Z,0),Z, tf.zeros_like(Z))
# term2 = tf.expand_dims(tf.expand_dims(tf.where(tf.greater(self.biases,0),self.biases, tf.zeros_like(self.biases)), 0 ), 0)
# term1 = tf.expand_dims( tf.reduce_sum(Zp, 1), 1)
# Zsp = term1 + term2
# Ralpha = alpha * tf.reduce_sum((Zp / Zsp) * tf.expand_dims(self.R, 1),2)
# else:
# Ralpha = 0
#
# if not beta == 0:
# Zn = tf.where(tf.less(Z,0),Z, tf.zeros_like(Z))
# term2 = tf.expand_dims(tf.expand_dims(tf.where(tf.less(self.biases,0),self.biases, tf.zeros_like(self.biases)), 0 ), 0)
# term1 = tf.expand_dims( tf.reduce_sum(Zn, 1), 1)
# Zsp = term1 + term2
# Rbeta = beta * tf.reduce_sum((Zn / Zsp) * tf.expand_dims(self.R, 1),2)
# else:
# Rbeta = 0
#
# return Ralpha + Rbeta
def simple_lrp_softmax(R, input_tensor, *args, **kwargs):
# component-wise operations within this layer
#Rx = self.input_tensor * self.activations
Rx = input_tensor * R
#Rx = Rx / tf.reduce_sum(self.input_tensor)
return Rx
class KernelBasedDeepArchitecture:
""" This class implements the Kernel-based Deep Architecture (KDA) described in:
Croce D., Filice S., Castellucci G. and Basili R. Deep Learning in Semantic Kernel Spaces. In Proceedings of ACL '17
"""
def __init__(self, nn_typeA, projector_s, projector_d, hl_conf_list, l2_lambda, num_classes,landmarks_size, rnd_seed=1927):
self.type = nn_typeA
self.ny_projector_s = projector_s # Proiettore (c x H_Ny) Statico
self.ny_projector_d = projector_d # Proiettore (c x H_Ny) Dinamico
self.hl_list = hl_conf_list # Lista con numero di HiddenLayerConf (classe definita in hidden_layer_configuration)
self.num_classes = num_classes
self.l2_lambda = l2_lambda
self.rnd_seed = rnd_seed
self.landmarks_size = landmarks_size
self.input_x = tf.placeholder(tf.float32, shape=[None, self.landmarks_size], name="input_x") # the KDA "x" input is actually
# the index of the C vector to be used
self.input_y = tf.placeholder(tf.float32, shape=[None, self.num_classes], name="input_y") # the KDA "y" input
# (necessary in training) is a "one-hot vector" whose dimensionality corresponds to the number of classes
self.dkp = tf.placeholder(tf.float32, name="dropout_keep_prob") # A scalar Tensor of float32 type
in_size = self.ny_projector_s.projection_size # Numero di colonne della matrice H_Ny o meglio "l" numero di landmarks
self.l2_loss = 0
# LRP:
self.is_test = tf.placeholder(tf.float32, name="is_test")
self.LRP_W_s = []
self.LRP_W_d = []
self.LRP_b_s = []
self.LRP_b_d = []
self.LRP_input_tensors_s = []
self.LRP_input_tensors_d = []
# PRIMO LAYER (NYSTROM LAYER): PROIEZIONE DEL VETTORE DI INPUT "x":
with tf.name_scope("x_projection"): # "x_projection": guarda ny_projector.project(instance_index, dkp=1.0, fake_projection=False)
if self.type == "n":
self.projection_s = self.ny_projector_s.project(self.input_x, fake_projection=True) # Proiezione espressa come tensore
self.hl_projection_s = self.projection_s
if self.type == "s" or self.type == "sd":
self.projection_s = self.ny_projector_s.project(self.input_x)
self.hl_projection_s = self.projection_s
# Ponendo fake_projection=True, ny_projector_s.project(instance_index) restituisce lo stesso embedding "c"
self.LRP_input_tensors_s.append(self.ny_projector_s.project(self.input_x, dkp=1.0, fake_projection=True))
# La prima matrice nella lista e' la matrice di proiezione di Nystrom
self.LRP_W_s.append(self.ny_projector_s.projection_matrix)
# Nel Nystrom Layer non ho bias
self.LRP_b_s.append(None)
if self.type == "d" or self.type == "sd":
self.projection_d = self.ny_projector_d.project(self.input_x)
self.hl_projection_d = self.projection_d
self.LRP_input_tensors_d.append(self.ny_projector_d.project(self.input_x, dkp=1.0, fake_projection=True))
self.LRP_W_d.append(self.ny_projector_d.projection_matrix)
self.LRP_b_d.append(None)
# PROIEZIONE NEI LAYERS SUCCESSIVI DELLA KDA:
for i, hl_conf in enumerate(self.hl_list): # hl_conf sono oggetti HiddenLayerConf
hl_size = hl_conf.get_size()
hl_size = hl_conf.get_size()
hl_activation = hl_conf.get_activation()
if self.type == "n" or self.type == "s" or self.type == "sd":
W_s = tf.Variable(tf.truncated_normal(shape=[in_size, hl_size], stddev=0.1), name="hls-W-" + str(i))
b_s = tf.Variable(tf.constant(0.1, shape=[hl_size]), name="hls-b-" + str(i))
# Salvo i parametri per fare la LRP
# self.LRP_input_tensors_s.append(tf.identity(self.hl_projection_s))
self.LRP_W_s.append(W_s)
self.LRP_b_s.append(b_s)
# Aggiorno il vettore proiezione che diventa l'input del layer successivo
self.hl_projection_s = tf.nn.dropout(self.hl_projection_s, self.dkp, seed=self.rnd_seed)
########################################################################################
self.LRP_input_tensors_s.append(tf.identity(self.hl_projection_s))
########################################################################################
self.hl_projection_s = activate(
tf.nn.xw_plus_b(self.hl_projection_s, W_s, b_s, name="hl-projection-" + str(i)), hl_activation) # Aggiornamento del vettore proiettato (Attivazione dei neuroni)
self.l2_loss += tf.nn.l2_loss(W_s) # Ad ogni passo accumulo loss
if self.type == "d" or self.type == "sd":
W_d = tf.Variable(tf.truncated_normal(shape=[in_size, hl_size], stddev=0.1), name="hld-W-" + str(i))
b_d = tf.Variable(tf.constant(0.1, shape=[hl_size]), name="hld-b-" + str(i))
# Salvo i parametri per fare la LRP
self.LRP_input_tensors_d.append(tf.identity(self.hl_projection_d))
self.LRP_W_d.append(W_d)
self.LRP_b_d.append(b_d)
# Aggiorno il vettore proiezione che diventa l'input del layer successivo
self.hl_projection_d = tf.nn.dropout(self.hl_projection_d, self.dkp, seed=self.rnd_seed)
self.hl_projection_d = activate(
tf.nn.xw_plus_b(self.hl_projection_d, W_d, b_d, name="hl-projection-" + str(i)), hl_activation)
self.l2_loss += tf.nn.l2_loss(W_d)
in_size = hl_size # Aggiorno le dimensioni dell'input dell'hidden layer successivo
# CALCOLO DEL VETTORE DA MANDARE NEL LAYER DI OUTPUT (Add dropout):
with tf.name_scope("dropout"):
if self.type == "n" or self.type == "s" or self.type == "sd":
self.dropped_out_s = tf.nn.dropout(self.hl_projection_s, self.dkp, seed=self.rnd_seed)
if self.type == "d" or self.type == "sd":
self.dropped_out_d = tf.nn.dropout(self.hl_projection_d, self.dkp, seed=self.rnd_seed)
# CALCOLO DEL VETTORE DI OUTPUT CONTENENTE LO SCORE DI CIASCUNA CLASSE:
with tf.name_scope("output"):
proj_s = self.ny_projector_s.projection_matrix
proj_d = self.ny_projector_d.projection_matrix
if self.type == "n" or self.type == "s" or self.type == "sd":
W_cl_s = tf.Variable(tf.truncated_normal([in_size, num_classes], stddev=0.1), name="Ws-classifier")
b_cl_s = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="bs-classifier")
########################################################################################################
# self.LRP_input_tensors_s.append(tf.identity(self.hl_projection_s))
self.LRP_input_tensors_s.append(tf.identity(self.dropped_out_s))
########################################################################################################
self.LRP_W_s.append(W_cl_s)
self.LRP_b_s.append(b_cl_s)
self.scores_s = tf.nn.xw_plus_b(self.dropped_out_s, W_cl_s, b_cl_s, name="scores_s")
self.l2_loss += tf.nn.l2_loss(W_cl_s)
if self.type == "d" or self.type == "sd":
W_cl_d = tf.Variable(tf.truncated_normal([in_size, num_classes], stddev=0.1), name="Wd-classifier")
b_cl_d = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="bd-classifier")
self.LRP_input_tensors_d.append(tf.identity(self.dropped_out_d))
self.LRP_W_d.append(W_cl_d)
self.LRP_b_d.append(b_cl_d)
if self.l2_lambda > 0:
self.l2_loss += tf.nn.l2_loss(tf.subtract(proj_s, proj_d))
self.l2_loss += tf.nn.l2_loss(W_cl_d)
self.scores_d = tf.nn.xw_plus_b(self.dropped_out_d, W_cl_d, b_cl_d, name="scores_d")
# IN BASE AL TIPO DI RETE, FISSO GLI SCORES FINALI:
if self.type == "n" or self.type == "s":
self.scores = self.scores_s
if self.type == "d":
self.scores = self.scores_d
if self.type == "sd":
self.scores = tf.multiply(self.scores_s, self.scores_d)
soft_max = tf.nn.softmax(self.scores)
self.pred_argmax = tf.argmax(soft_max, 1) # Softmax ==> Scores normalizzati
self.gold_argmax = tf.argmax(self.input_y, 1) # tf.argmax returns a tensor of output_type type
self.predictions = tf.argmax(self.scores, 1, name="predictions")
self.softmax = soft_max
with tf.name_scope("LRP"):
# print(tf.contrib.training.train)
if self.is_test is not None:
if self.type == "n" or self.type == "s":
# R_s = self.scores
R_s = simple_lrp_softmax(self.scores, soft_max)
LRP_size = len(self.LRP_input_tensors_s)
for i in range(LRP_size - 1, -1, -1):
input_tensor = self.LRP_input_tensors_s[i]
weights = self.LRP_W_s[i]
biases = self.LRP_b_s[i]
R_s = simple_lrp_linear(R_s, input_tensor, weights, biases=biases)
# R = epsilon_lrp(R, 1, input_tensor, weights, biases=biases)
self.R = R_s
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = softmax_cross_entropy(self.scores, self.input_y)
self.loss = tf.reduce_mean(losses) + self.l2_lambda * self.l2_loss
with tf.name_scope("f1"):
self.corrects = {}
self.preds = {}
self.golds = {}
self.precisions = {}
self.recalls = {}
self.f1s = {}
for label in range(num_classes):
self.preds[label] = tf.reduce_sum(tf.cast(tf.equal(self.pred_argmax, label), tf.int32)) # tf.equal is element-wise, it returns a boolean tensor
self.golds[label] = tf.reduce_sum(tf.cast(tf.equal(self.gold_argmax, label), tf.int32)) # tf.cast casts a tensor to a new type ( i.e. da booleani ad interi)
self.corrects[label] = tf.reduce_sum(
tf.cast(tf.logical_and(tf.equal(self.pred_argmax, self.gold_argmax),
tf.equal(self.gold_argmax, label)), tf.int32))
self.precisions[label] = tf.truediv(self.corrects[label], self.preds[label])
self.recalls[label] = tf.truediv(self.corrects[label], self.golds[label])
self.f1s[label] = tf.truediv(2 * tf.multiply(self.precisions[label], self.recalls[label]),
tf.add(self.precisions[label], self.recalls[label]))
self.f1 = tf.reduce_mean(list(self.f1s.values()))
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
|
import random
import math
import names
from data import *
from people import *
from astronomy import *
class Ship(object):
def __init__(self,prefix=None,shipType=0,crewSize=1):
# Name
self.name = names.get_ship_name()
self.hasCommandElement = False
# Type
if shipType == 0:
shipType = pick('ship.type')
else:
shipType = ship['type'][shipType-1]
self.prefix = prefix
self.typeName = shipType['name']
self.typeCode = shipType['designation']
self.idNumber = random.randint(1,177)
# Characteristics
self.activeServiceDuration = random.randint(0,60)
self.commissionedDate = pickDate()
self.activeServiceDate = addYears(self.commissionedDate,self.activeServiceDuration)
self.homesystem = Starsytem()
self.homeport = random.choice(self.homesystem.planets)
# Divisions
self.divisions = []
for i in range(0, len(shipType['divisions'])):
divisionCode = shipType['divisions'][i]
self.divisions.append(Division(divisionCode))
for division in self.divisions:
mulitplier = random.random()
divisionSize = int(round(division.size*crewSize,0))
# One person minimum.
if divisionSize == 0:
divisionSize = 1
for i in range(0, divisionSize):
if random.random() < division.rank: person = Officer()
else: person = Enlisted()
if(division.hasCallsigns): person = Pilot()
if(division.marines): person = Marine()
division.crew.append(person)
division.crew = sorted(division.crew, key = lambda person: (person.rank.order))
division.crew.reverse()
if len(self.divisions) > 2:
self.hasCommandElement = True
self.co = HighRankOfficer()
self.xo = Commander()
class Division(object):
def __init__(self,code):
self.crew = []
self.code = code
self.name = ship['division'][code]['name']
self.size = ship['division'][code]['size']
self.rank = ship['division'][code]['rank']
try:
self.hasCallsigns = ship['division'][code]['callsigns']
except KeyError:
self.hasCallsigns = False
try:
self.marines = ship['division'][code]['marines']
except KeyError:
self.marines = False |
# Tryout 2: Second attempt to solve the handwritten digits
# We break the whole training set in x:y ration and treat y as the
# testing dataset for calculating the accuracy of the algorithm
import math
import csv
import operator
from __builtin__ import len
import plotpixel as plt
import random
'''---------------Helper Methods---------------'''
# load training data
# def loadData(fileName):
# with open(fileName, 'rb') as csvfile:
# lines = csv.reader(csvfile)
# dataset = list(lines)
#
# return dataset
def loadDataset(filename, split, trainingSet=[], testSet=[]):
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for x in range(len(dataset) - 1):
# First line in this dataset is the label not the data
if (x==0):
continue
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def printDataset(dataset):
for x in range(len(dataset) - 1):
echo( dataset[x] )
def getNeighbors(trainingSet, testInstance, k):
distances = []
length = len(testInstance) - 1
for x in range(len(trainingSet)):
if(x==0):
continue
dist = euclideanDistance(testInstance, trainingSet[x], length)
# distances.append((trainingSet[x], dist,trainingSet[x][0]))
distances.append((dist,trainingSet[x]))
distances.sort(key=operator.itemgetter(0))
neighbors = []
for x in range(k):
neighbors.append(distances[x])
return neighbors
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
# print "x is %d instance1[x], instance2[x] :::", x ,instance1[x], instance2[x];
distance += pow((int(instance1[x+1] ) - int(instance2[x])), 2)
return math.sqrt(distance)
def echo(message):
print message
return
'''---------------Helper Methods---------------'''
def main():
# prepare data
trainingData = []
testData = []
split = 0.67
loadDataset('data/train.csv', split, trainingData, testData)
# loadDataset('actualdata/train.csv', split, trainingData, testData)
matches = 0
for x in range(len(testData)):
instance = testData[x]
neighbors = getNeighbors(trainingData,instance,3)
guess = int(getGuessedDigit(neighbors))
actual = int(instance[0])
echo('(%d) Guessed Value %d ' %(x , guess))
echo('(%d) Actual Value %d ' %(x , actual))
if(guess == actual):
matches +=1
else:
echo('Displaying character which is not matching')
# plt.drawPixels(instance[1:], nRows=28, nCols=28)
echo ('Total matches %d out of %d' %(matches,len(testData)))
echo ('Accuracy is %f ' %((matches)/float(len(testData))*100)+'%')
# def getGuessedDigit(items):
# # Iterate over the neighbors
# # for i in range(len(items)):
# # echo(items[i][0])
# return items[0][1][0]
# def getGuessedDigit(items):
# # Iterate over the neighbors
# arrayElements = []
# frequencies = [0,0,0,0,0,0,0,0,0,0]
# # for i in range(len(items)):
# # arrayElements[i] = items[0][1][i]
#
# # arrayElements = items[0][1]
#
# echo(arrayElements)
#
# for i in range(len(items)):
# arrayElements.append(items[i][1][0])
#
#
# for i in range(len(arrayElements)):
# digit = int(arrayElements[i])
# echo(digit)
# frequencies[digit]= frequencies[digit]+1
#
# max = 0
# idx = 0
# for i in range(len(frequencies)):
#
# if(max<frequencies[i]):
# max = frequencies[i]
# idx = i
# return frequencies[idx]
# Returns the first closet neighbor
def getGuessedDigit(items):
# Iterate over the neighbors
arrayElements = []
for i in range(len(items)):
arrayElements.append(items[i][1][0])
return arrayElements[0]
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.