text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
from base64 import b64decode
import os
import imghdr
from datetime import datetime
from uuid import uuid4
from PIL import Image
from resizeimage import resizeimage
import io
from flask import request, jsonify
from flask.views import MethodView
from flask_jwt_extended import jwt_required
from cerberus_validate import CustomValidator
from model.products import ModelProducts
from model.products_image import ModelImagesProduct
from model.provider import ModelProvider
from model.products_category import ModelCategoryProduct
from model.products_brand import ModelBrandProduct
from model.products_unit import ModelProductUnit
schema = {
"internal_code": {"type": "string", "required": True, "empty": False, "description": "Código interno do produto", "maxlength": 12},
"name": {"type": "string", "required": True, "empty": False, "description": "Nome do produto"},
"category": {"type": "integer", "required": True, "min": 1, "description": "int ID da categoria"},
"brand": {"type": "integer", "required": True, "description": "int ID da marca ou 0 para nenhuma"},
"unit": {"type": "integer", "required": True, "min": 1, "description": "int ID Unidade de medida"},
"minimum_stock": {"type": "float", "required": True, "min": 1, "description": "Float Quantidade mínima em estoque."},
"maximum_stock": {"type": "float", "required": True, "min": 1, "description": "Float Quantidade máxima em estoque"},
"subtract": {"type": "boolean", "required": True, "description": "Reduzir estoque ao efetuar venda"},
"short_description": {"type": "string", "required": True, "empty": False, "description": "Descrição curta do produto, Máximo 200 caracteres ", "maxlength": 200},
"long_description": {"type": "string", "required": True, "empty": True, "description": "Descrição longa do produto. Aceita HTML"},
"cover": {"type": "string", "required": True, "empty": True, "description": "Imagem de descate do produto."},
"height": {"type": "float", "required": True, "description": "Altura da embalagem. "},
"width": {"type": "float", "required": True, "description": "Largura da embalagem"},
"length": {"type": "float", "required": True, "description": "Comprimento da embalagem"},
"weight": {"type": "float", "required": True, "description": "Peso da embalagem"},
"minimum_sale": {"type": "float", "required": True, "min": 1, "description": "Float Quantidade mínima de venda."},
"sale_price": {"type": "float", "required": True, "description": "Float valor de venda."},
"maximum_discount": {"type": "float", "required": True, "description": "Float porcentagem desconto máximo"},
"available": {"type": "boolean", "required": True, "description": "Se produto esta disponível"},
"images": {"type": "list", "schema": {"type": "string"}, "required": True, "empty": True, "description": "List com as imagens codificadas em base64"},
"provider": {"type": "list", "schema": {"type": "integer", "required": True}, "required": True, "empty": True, "description": "List com ID de fornecedores deste produto"}
}
# Function to converte base64 image to file
def upload_image(image, cover=False):
image = b64decode(image.encode())
extension = imghdr.what(None, h=image)
filename = str(uuid4()) + "." + extension
pic = io.BytesIO(image)
with Image.open(pic) as image_pil:
cover = resizeimage.resize_height(image_pil, 600, validate=False)
cover.save("static/images/{}".format(filename), image_pil.format)
return filename
class ProductApi(MethodView):
@jwt_required
def get(self, product_id):
if product_id is None:
data = ModelProducts.list_product()
return jsonify({"data": data}), 200
product = ModelProducts.find_product(product_id)
if not product:
return jsonify({"message": "Product not found"}), 404
return jsonify({"data": product.get_product()}), 200
@jwt_required
def post(self):
""" Adicionar ou editar produto.
Para criar envie string vazia em id e para editar envie um int com o ID do produto"""
data = request.json if request.json else{}
v = CustomValidator(schema)
if not v.validate(data):
return jsonify({"message": v.errors}), 400
data = v.document
product_code = ModelProducts.find_internal_code(
data.get("internal_code"))
if product_code:
return jsonify({"message": "Product Code in use to other product"}), 400
# Check if category exist
if not ModelCategoryProduct.find_category(data.get("category")):
return jsonify({"message": "Category id {} not found".format(data.get("category"))}), 400
# Check if provider exist
lst_provider = []
for id_provider in data.get("provider"):
provider = ModelProvider.find_provider(id_provider)
if not provider:
return jsonify({"message": "provider id {} not found".format(id_provider)}), 400
lst_provider.append(provider)
try:
b64_cover = data.get("cover")
if b64_cover:
data["cover"] = upload_image(b64_cover)
product = ModelProducts(**data)
# Appending Images
for images in data.get("images"):
product.images.append(ModelImagesProduct(
upload_image(images), product))
# Appending provider
[product.providers.append(provider) for provider in lst_provider]
# Save Product
product.save_product()
return jsonify({"message": "product created", "data": product.get_product()}), 201
except Exception as err:
print(err)
return jsonify({"message": "Internal error"}), 500
@jwt_required
def put(self, product_id):
data = request.json if request.json else{}
v = CustomValidator(schema)
if not v.validate(data):
return jsonify({"message": v.errors}), 400
data = v.document
product = ModelProducts.find_product(product_id)
# Check if category exist
if not ModelCategoryProduct.find_category(data.get("category")):
return jsonify({"message": "Category id {} not found".format(data.get("category"))}), 400
# Check if provider exist
lst_provider = []
for id_provider in data.get("provider"):
provider = ModelProvider.find_provider(id_provider)
if not provider:
return jsonify({"message": "provider id {} not found".format(id_provider)}), 400
lst_provider.append(provider)
product_code = ModelProducts.find_internal_code(
data.get("internal_code"))
if product_code:
if len(product_code) > 1:
return jsonify({"message": "Product Code in use to other product"}), 400
if product_code[0] != int(product_id):
return jsonify({"message": "Product Code in use to other product"}), 400
try:
if data.get("cover"):
data["cover"] = upload_image(data.get("cover"))
product.update_product(**data)
# Appending Images
for images in data.get("images"):
product.images.append(ModelImagesProduct(
upload_image(images), product))
# Appending provider
[product.providers.append(provider) for provider in lst_provider]
# Save Prodict
product.save_product()
return jsonify({"message": "Product Updated", "data": product.get_product()}), 200
except Exception as err:
print(err)
return {"message": "internal error"}, 500
@jwt_required
def delete(self, image_id):
"""
Deletar imagem por ID do produto e ID da Imagem.
"""
image = ModelImagesProduct.find_image(image_id)
if image:
try:
image.delete_image()
return jsonify({"message": "Image deleted!"}), 200
except:
return jsonify({"message": "Internal Error"})
return jsonify({"message": "Image not found"}), 404
class ProductSelect(MethodView):
@jwt_required
def get(self):
""" Itens Cadastro de Produto
Lista contendo todos os itens necessários para cadastro de produto """
providers = [data.list_provider_product()
for data in ModelProvider.query.all()]
categories = [category.list_category()
for category in ModelCategoryProduct.query.all()]
units = [unit.json_units() for unit in ModelProductUnit.query.all()]
brands = [brand.list_brand()
for brand in ModelBrandProduct.query.all()]
return {"data": {"providers": providers, "categories": categories, "brands": brands, "units": units}}, 200
schema_stock = {
"products": {"type": "list", "schema": {"type": "dict", "schema": {
"id": {"type": "integer", "required": True},
"value": {"type": "float", "required": True},
"purchase_price": {"type": "float", "required": True}
}}}
}
class StockApi(MethodView):
def get(self):
products = ModelProducts.list_initial_stock()
return jsonify({"data": products}), 200
def post(self):
data = request.json if request.json else {}
v = CustomValidator(schema_stock)
if not v.validate(data):
return jsonify({"message": v.errors}), 400
data = v.document
list_data = []
for product in data.get("products"):
if product.get("value"):
data_product = ModelProducts.find_product_without_stock(
product.get("id"))
if not data_product:
return jsonify({"message": "product {} not found or stock already registered".format(product.get("id"))}), 400
data_product.stock.available_stock = product.get("value")
data_product.stock.initial_stock = True
list_data.append(data_product)
try:
[data.save_product() for data in list_data]
return jsonify({"message": "Updated quantities", "data": [{"name": data.name, "qtde": data.stock.available_stock} for data in list_data]}), 200
except:
return jsonify({"message": "Internal Error"})
pass
return jsonify(data), 200
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
import openerp.addons.decimal_precision as dp
class product_product(osv.osv):
_inherit = "product.product"
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, user, view_id, view_type, context)
if res: return res
if (context.get('active_id', False)) and (context.get('active_model') == 'stock.location'):
return _('Products: ')+self.pool.get('stock.location').browse(cr, user, context['active_id'], context).name
return res
def _get_domain_locations(self, cr, uid, ids, context=None):
'''
Parses the context and returns a list of location_ids based on it.
It will return all stock locations when no parameters are given
Possible parameters are shop, warehouse, location, force_company, compute_child
'''
context = context or {}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_ids = []
if context.get('location', False):
if type(context['location']) == type(1):
location_ids = [context['location']]
elif type(context['location']) in (type(''), type(u'')):
domain = [('complete_name','ilike',context['location'])]
if context.get('force_company', False):
domain += [('company_id', '=', context['force_company'])]
location_ids = location_obj.search(cr, uid, domain, context=context)
else:
location_ids = context['location']
else:
if context.get('warehouse', False):
wids = [context['warehouse']]
else:
wids = warehouse_obj.search(cr, uid, [], context=context)
for w in warehouse_obj.browse(cr, uid, wids, context=context):
location_ids.append(w.view_location_id.id)
operator = context.get('compute_child', True) and 'child_of' or 'in'
domain = context.get('force_company', False) and ['&', ('company_id', '=', context['force_company'])] or []
return (
domain + [('location_id', operator, location_ids)],
domain + ['&', ('location_dest_id', operator, location_ids), '!', ('location_id', operator, location_ids)],
domain + ['&', ('location_id', operator, location_ids), '!', ('location_dest_id', operator, location_ids)]
)
def _get_domain_dates(self, cr, uid, ids, context):
from_date = context.get('from_date', False)
to_date = context.get('to_date', False)
domain = []
if from_date:
domain.append(('date', '>=', from_date))
if to_date:
domain.append(('date', '<=', to_date))
return domain
def _product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):
context = context or {}
field_names = field_names or []
domain_products = [('product_id', 'in', ids)]
domain_quant, domain_move_in, domain_move_out = self._get_domain_locations(cr, uid, ids, context=context)
domain_move_in += self._get_domain_dates(cr, uid, ids, context=context) + [('state', 'not in', ('done', 'cancel'))] + domain_products
domain_move_out += self._get_domain_dates(cr, uid, ids, context=context) + [('state', 'not in', ('done', 'cancel'))] + domain_products
domain_quant += domain_products
if context.get('lot_id') or context.get('owner_id') or context.get('package_id'):
if context.get('lot_id'):
domain_quant.append(('lot_id', '=', context['lot_id']))
if context.get('owner_id'):
domain_quant.append(('owner_id', '=', context['owner_id']))
if context.get('package_id'):
domain_quant.append(('package_id', '=', context['package_id']))
moves_in = []
moves_out = []
else:
moves_in = self.pool.get('stock.move').read_group(cr, uid, domain_move_in, ['product_id', 'product_qty'], ['product_id'], context=context)
moves_out = self.pool.get('stock.move').read_group(cr, uid, domain_move_out, ['product_id', 'product_qty'], ['product_id'], context=context)
quants = self.pool.get('stock.quant').read_group(cr, uid, domain_quant, ['product_id', 'qty'], ['product_id'], context=context)
quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants))
moves_in = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_in))
moves_out = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_out))
res = {}
for id in ids:
print"====quants============",quants
avail_quant=0.0
avail_move_in=0.0
avail_move_out=0.0
if self.browse(cr,uid,id).child_ids:
avail_quant=quants.get(id, 0.0)
avail_move_in=moves_in.get(id, 0.0)
avail_move_out=moves_out.get(id, 0.0)
for child in self.browse(cr,uid,id).child_ids:
print'childdddddddddd',child.parent_coefficient,quants.get(child.id, 0.0)
print"====quants=======child*************=====",quants
domain_quant, domain_move_in, domain_move_out = self._get_domain_locations(cr, uid, [child.id], context=context)
quants = self.pool.get('stock.quant').read_group(cr, uid, domain_quant, ['product_id', 'qty'], ['product_id'], context=context)
quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants))
avail_quant = avail_quant + quants.get(child.id, 0.0)*child.parent_coefficient
print"====moves_in=======child*************=====",moves_in
moves_in = self.pool.get('stock.move').read_group(cr, uid, domain_move_in, ['product_id', 'product_qty'], ['product_id'], context=context)
moves_in = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_in))
avail_move_in = avail_move_in + moves_in.get(child.id, 0.0)*child.parent_coefficient
print"====moves_out=======child*************=====",moves_out
moves_out = self.pool.get('stock.move').read_group(cr, uid, domain_move_out, ['product_id', 'product_qty'], ['product_id'], context=context)
moves_out = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_out))
avail_move_out = avail_move_out + moves_out.get(child.id, 0.0)*child.parent_coefficient
res[id] = {
'qty_available': avail_quant,
'incoming_qty': avail_move_in,
'outgoing_qty': avail_move_out,
'virtual_available': avail_quant + avail_move_in - avail_move_out,
}
else:
res[id] = {
'qty_available': quants.get(id, 0.0),
'incoming_qty': moves_in.get(id, 0.0),
'outgoing_qty': moves_out.get(id, 0.0),
'virtual_available': quants.get(id, 0.0) + moves_in.get(id, 0.0) - moves_out.get(id, 0.0),
}
return res
def _search_product_quantity(self, cr, uid, obj, name, domain, context):
res = []
for field, operator, value in domain:
#to prevent sql injections
assert field in ('qty_available', 'virtual_available', 'incoming_qty', 'outgoing_qty'), 'Invalid domain left operand'
assert operator in ('<', '>', '=', '<=', '>='), 'Invalid domain operator'
assert isinstance(value, (float, int)), 'Invalid domain right operand'
if operator == '=':
operator = '=='
product_ids = self.search(cr, uid, [], context=context)
ids = []
if product_ids:
#TODO: use a query instead of this browse record which is probably making the too much requests, but don't forget
#the context that can be set with a location, an owner...
for element in self.browse(cr, uid, product_ids, context=context):
if eval(str(element[field]) + operator + str(value)):
ids.append(element.id)
res.append(('id', 'in', ids))
return res
def _product_available_text(self, cr, uid, ids, field_names=None, arg=False, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = str(product.qty_available) + _(" On Hand")
return res
_columns = {
'active_flag':fields.boolean('Active Flag'),
'additional_info':fields.char('Additional Info'),
'mwi_db_id':fields.char('MWI DB ID'),
'available_to_purchase_flag':fields.boolean('Available to purchase flag'),
'backorder_flag':fields.boolean('BackOrder Flag'),
'case_qty':fields.integer('Case Quantity'),
'closeout_flag':fields.boolean('Close Out Flag'),
'compendium_code':fields.char('Compendium Code'),
'container_qty':fields.integer('Container Qty'),
'convert_from_mwi':fields.char('Convert From MWI'),
'cooler_item_flag':fields.boolean('Cooler Item Flag'),
'deacontrolled_substance_code':fields.boolean('DEAControlled Substance Code'),
'doses':fields.float('Doses', digits=(16,4)),
'dropship_flag':fields.boolean('Drop Ship Flag'),
'dropship_text':fields.char('Drop Ship Text'),
'has_image':fields.boolean('Has Image'),
'has_purchased':fields.boolean('Has Purchased'),
'has_relationship':fields.boolean('Has Relationship'),
'hazardous':fields.boolean('Hazardous'),
'hazardous_text':fields.char('HazardousText'),
'human_label_flag':fields.boolean('Human Label Flag'),
'labelfee_flag':fields.boolean('Label Fee Flag'),
'manufacturer':fields.char('Manufacturer'),
'manufacturer_id':fields.char('Manufacturer ID'),
'manu_minorder_fee':fields.integer('Manu Min Order Fee'),
'manu_minorder_weight':fields.integer('Manu Min Order Weight'),
'mfgproductcode':fields.char('Manufacturer ID'),
'qtymultiplier':fields.integer(''),
'rx_flag':fields.boolean('RX Flag'),
'searchableflag':fields.boolean('Searchable Flag'),
'searchterms':fields.char('SearchTerms'),
'showpricingflag':fields.boolean('Show Pricing Flag'),
'specialorderflag':fields.boolean('Special Order Flag'),
'specialordertext':fields.boolean('Special Order Text'),
'unit':fields.char('Unit'),
'web_description':fields.char('Web Description'),
'minqty':fields.integer('Min Qty'),
'msdc_code':fields.char('MSDS Code'),
'newproductflag':fields.boolean('New Product Flag'),
'newproducttext':fields.char('New Product Text'),
'nonreturnable':fields.boolean('Non Returnable'),
'nonreturnable_text':fields.char('Non Returnable Text'),
'product_code':fields.char('Product Code'),
'productdimwgt':fields.integer('Product DimWgt'),
'product_price':fields.char('Product Price'),
'description':fields.char('Description'),
'discontinued_flag':fields.boolean('Discontinued Flag'),
'act_ingrediants':fields.char('ActIngrediants'),
'parent_coefficient':fields.float('Parent Coefficient', digits=(16,4)),
'parent_product_id':fields.many2one('product.product','Parent'),
'child_ids':fields.one2many('product.product','parent_product_id','Childs'),
'qty_available_text': fields.function(_product_available_text, type='char'),
'qty_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Quantity On Hand',
fnct_search=_search_product_quantity,
help="Current quantity of products.\n"
"In a context with a single Stock Location, this includes "
"goods stored at this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'virtual_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Forecast Quantity',
fnct_search=_search_product_quantity,
help="Forecast quantity (computed as Quantity On Hand "
"- Outgoing + Incoming)\n"
"In a context with a single Stock Location, this includes "
"goods stored in this location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'incoming_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Incoming',
fnct_search=_search_product_quantity,
help="Quantity of products that are planned to arrive.\n"
"In a context with a single Stock Location, this includes "
"goods arriving to this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods arriving to the Stock Location of this Warehouse, or "
"any of its children.\n"
"Otherwise, this includes goods arriving to any Stock "
"Location with 'internal' type."),
'outgoing_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Outgoing',
fnct_search=_search_product_quantity,
help="Quantity of products that are planned to leave.\n"
"In a context with a single Stock Location, this includes "
"goods leaving this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods leaving the Stock Location of this Warehouse, or "
"any of its children.\n"
"Otherwise, this includes goods leaving any Stock "
"Location with 'internal' type."),
}
class product_supplierinfo(osv.osv):
_inherit = "product.supplierinfo"
_columns={
'supplier_uom':fields.char('Supplier UOM'),
}
|
from flask import Flask, render_template, request, jsonify
from lib import *
app = Flask(__name__)
app.debug = True
@app.route("/")
def hello():
return render_template('index.html')
@app.route('/permanize', methods=['POST', 'OPTIONS'])
def my_service():
request.get_json(force=True)
print request.json
text = request.json['text']
if 'apikey' in request.json and len(request.json['apikey'])>0:
return jsonify(text=replaceText(text,request.json['apikey']))
else:
return jsonify(text=replaceText(text,False))
if __name__ == "__main__":
app.run(host='0.0.0.0',port=8012) |
from typing import List
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
length1, lastL1 = self.getLinkedListLen(l1)
length2, lastL2 = self.getLinkedListLen(l2)
diff = length1 - length2
if diff > 0:
self.padLinkedList(lastL2, diff)
else:
self.padLinkedList(lastL1, diff)
res = self.addTwoNumbersH(l1, l2, 0)
# res = None
# carry = 0
# while l1 is not None:
# total = l1.val + l2.val + carry
# carry = total//10
# digitToInsert = total%10
# res = ListNode(digitToInsert, res)
# l1 = l1.next
# l2 = l2.next
# if carry == 1:
# res = ListNode(carry, res)
return res
def addTwoNumbersH(self, l1: ListNode, l2: ListNode, carry: int) -> ListNode:
if not l1:
if carry:
return ListNode(carry, None)
return None
total = l1.val + l2.val + carry
carry = total//10
digitToInsert = total%10
res = self.addTwoNumbersH(l1.next, l2.next, carry)
return ListNode(digitToInsert, res)
def getLinkedListLen(self, l: ListNode):
length = 1
while l.next is not None:
length += 1
l = l.next
return length, l
def padLinkedList(self, last: ListNode, padding: int):
for _ in range(abs(padding)):
newLast = ListNode(0, None)
last.next = newLast
last = newLast
def printLinkedList(self, l):
while l is not None:
print(l.val)
l = l.next
s = Solution()
l1 = ListNode(9, ListNode(9, ListNode(9, ListNode(9, ListNode(9, ListNode(9, ListNode(9, None)))))))
l2 = ListNode(9, ListNode(9, ListNode(9, ListNode(9, None))))
l3 = s.addTwoNumbers(l1, l2)
s.printLinkedList(l3)
|
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
## The MenuItemDefinition class defines the contents of a menu item for use
# with the MenuDefinition class. It does nothing towards actually implementing
# a user interface, but instead defines content for a user interface
# implementation to realise. This allows menus to be defined in a UI agnostic
# way and then used with different toolkits.
#
# The MenuItemDefinition has the following attributes :
#
# command : a callable object invoked when the user selects the menu item
#
# secondaryCommand : a callable object invoked when the user selects the menu item
# in some other way. this is toolkit dependent - for instance in maya this command
# would be used when the option box is selected.
#
# divider : True if the menu item is a divider, False otherwise.
#
# active : if False then the menu item is unselectable. may also be a callable
# object which returns a boolean value to allow dynamic activation
#
# description : a string with help for the menu item
#
# subMenu : a callable object which returns a MenuDefinition, to produce
# a dynamically generated submenu.
#
# checkBox : A callable item to return True or False for checkBox state, or None
# for no checkBox at all. When checkBox is not None, the callable specified by the
# command attribute will be called whenever the checkBox is toggled.
#
# \todo Validation of attribute values, so for instance divider and command
# can't both be set at the same time.
# \ingroup python
class MenuItemDefinition( object ) :
def __init__( self, dictionary = None, **kwArgs ) :
self.command = None
self.secondaryCommand = None
self.divider = False
self.active = True
self.description = ""
self.subMenu = None
self.checkBox = None
self.blindData = {}
if dictionary :
for k, v in dictionary.items() :
setattr( self, k, v )
for k, v in kwArgs.items() :
setattr( self, k, v )
def __repr__( self ) :
d = { k:v for k,v in self.__dict__.items() if not k.startswith('_') }
return "MenuItemDefinition( " + repr( d ) + " )"
|
import numpy as np
from sklearn.mixture import GaussianMixture
# Generate Synthetic data
mu1=300
sig1=100
mu2=1100
sig2=100
mu3=2000
sig3=200
x = list(np.random.normal(mu1,sig1,500))+ list(np.random.normal(mu2,sig2,500))+list(np.random.normal(mu3,sig3,500))
# Fit GMM
model = GaussianMixture(n_components=3, max_iter=2000)
model.fit(np.array(x).reshape(-1,1))
for mean, cov in sorted(zip(model.means_, np.sqrt(model.covariances_))):
print("Mean: {:4.1f} STD: {:4.1f}".format(mean[0], (cov)[0][0]))
|
# Declaring different data structures
tails=["T1","T2","T3","T4","T5","T6"]
gates=["AUS1","DAL1","DAL2","HOU1","HOU2","HOU3"]
airports=["AUS","DAL","HOU"]
flight_times={"AUSDAL":50,"DALAUS":50,"AUSHOU":45,"HOUAUS":45,
"DALHOU":65,"HOUDAL":65}
ground_time={"AUS":25,"DAL":30,"HOU":35}
schedule_flight=[]
#Defining header for csv file
header=["tail_number","origin","destination","departure_time","arrival_time"]
schedule_flight.append(header)
#Converting arrival time from minutes since midnight to military time
def arr_time_m():
m_arr_time=(arrival_time // 60,arrival_time%60)
if (m_arr_time[1] in range (0,10)):
y="0"+str(m_arr_time[1])
m_arr_time=(arrival_time//60,y)
new_m_time=int(''.join(str(x) for x in (m_arr_time)))
k=str(new_m_time).zfill(4)
return k
else:
new_m_time=int(''.join(str(x) for x in (m_arr_time)))
k=str(new_m_time).zfill(4)
return k
#Converting depature time from minutes since midnight to military time
def dept_time_m():
m_dept_time=(dept_time // 60,dept_time%60)
if (m_dept_time[1] in range (0,10)):
y="0"+str(m_dept_time[1])
m_dept_time=(dept_time//60,y)
new_m_time=int(''.join(str(x) for x in (m_dept_time)))
k=str(new_m_time).zfill(4)
return k
else:
new_m_time=int(''.join(str(x) for x in (m_dept_time)))
k=str(new_m_time).zfill(4)
return k
#Schedule for Flight T1 between Austin and Houston
#First flight departs at 6:00am
dept_time=6*60
x=600
initial=str(x).zfill(4)
arrival_time=dept_time + flight_times["AUSHOU"]
j=arr_time_m()
list1=[]
list1.append("T1")
list1.append("AUS")
list1.append("HOU")
list1.append(initial)
list1.append(j)
schedule_flight.append(list1)
print("T1, AUS, HOU, " + initial + ", " + j)
#Flight should be grounded before 10:00pm
while True:
loop1 = []
dept_time=arrival_time + ground_time["HOU"]
if (dept_time > 1320):
break
m=dept_time_m()
arrival_time=dept_time+flight_times["HOUAUS"]
k=arr_time_m()
loop1.append("T1")
loop1.append("HOU")
loop1.append("AUS")
loop1.append(m)
loop1.append(k)
schedule_flight.append(loop1) # Appending values to the empty list
print("T1, HOU, AUS, " + m + ", " + k )
loop2=[]
dept_time=arrival_time + ground_time["AUS"]
m=dept_time_m()
arrival_time = dept_time+flight_times["AUSHOU"]
if(arrival_time > 1320):
break
k=arr_time_m()
loop2.append("T1")
loop2.append("AUS")
loop2.append("HOU")
loop2.append(m)
loop2.append(k)
print("T1, AUS, HOU, " + m + ", " + k )
schedule_flight.append(loop2)
#Schedule for Flight T2 between Dallas and Houston
#First flight departs at 6:00am
dept_time=6*60
x=600
initial=str(x).zfill(4)
arrival_time=dept_time + flight_times["DALHOU"]
j=arr_time_m()
list2=[]
list2.append("T2")
list2.append("DAL")
list2.append("HOU")
list2.append(initial)
list2.append(j)
schedule_flight.append(list2)
print("T2, DAL, HOU, " + initial + ", " + j)
#Flight should be grounded before 10:00pm
while True:
loop3=[]
dept_time=arrival_time + ground_time["HOU"]
if(dept_time > 1320):
break
m=dept_time_m()
arrival_time=dept_time+flight_times["HOUDAL"]
k=arr_time_m()
loop3.append("T2")
loop3.append("HOU")
loop3.append("DAL")
loop3.append(m)
loop3.append(k)
schedule_flight.append(loop3)
print("T2, HOU, DAL, " + m + ", " + k )
loop4=[]
dept_time=arrival_time + ground_time["DAL"]
m=dept_time_m()
arrival_time= dept_time+flight_times["DALHOU"]
if (arrival_time >1320):
break
k=arr_time_m()
loop4.append("T2")
loop4.append("DAL")
loop4.append("HOU")
loop4.append(m)
loop4.append(k)
schedule_flight.append(loop4)
print("T2, DAL, HOU, " + m + ", " + k )
#Schedule for Flight T3 between Dallas and Houston
#First flight departs at 6:00am
dept_time=6*60
x=600
initial=str(x).zfill(4)
arrival_time=dept_time + flight_times["DALHOU"]
j=arr_time_m()
list3=[]
list3.append("T3")
list3.append("DAL")
list3.append("HOU")
list3.append(initial)
list3.append(j)
schedule_flight.append(list3)
print("T3, DAL, HOU, " + initial + ", " + j)
#Flight should be grounded before 10:00pm
while True:
loop5=[]
dept_time=arrival_time + ground_time["HOU"]
if (dept_time > 1320):
break
m=dept_time_m()
arrival_time=dept_time+flight_times["HOUDAL"]
k=arr_time_m()
loop5.append("T3")
loop5.append("HOU")
loop5.append("DAL")
loop5.append(m)
loop5.append(k)
schedule_flight.append(loop5)
print("T3, HOU, DAL, " + m + ", " + k )
loop6=[]
dept_time=arrival_time + ground_time["DAL"]
m=dept_time_m()
arrival_time= dept_time+flight_times["DALHOU"]
if (arrival_time > 1320):
break
k=arr_time_m()
k=arr_time_m()
loop6.append("T3")
loop6.append("DAL")
loop6.append("HOU")
loop6.append(m)
loop6.append(k)
schedule_flight.append(loop6)
print("T3, DAL, HOU, " + m + ", " + k )
#Schedule for Flight T4 between Houston and Austin
#First flight departs at 6:00am
dept_time=6*60
x=600
initial=str(x).zfill(4)
arrival_time=dept_time + flight_times["HOUAUS"]
j=arr_time_m()
list4=[]
list4.append("T4")
list4.append("HOU")
list4.append("AUS")
list4.append(initial)
list4.append(j)
schedule_flight.append(list4)
print("T4, HOU, AUS, " + initial + ", " + j)
#Flight should be grounded before 10:00pm
while True:
loop7=[]
dept_time=arrival_time + ground_time["AUS"]
if (dept_time > 1320):
break
m=dept_time_m()
arrival_time=dept_time+flight_times["AUSHOU"]
k=arr_time_m()
loop7.append("T4")
loop7.append("AUS")
loop7.append("HOU")
loop7.append(m)
loop7.append(k)
schedule_flight.append(loop7)
print("T4, AUS, HOU, " + m + ", " + k )
loop8=[]
dept_time=arrival_time + ground_time["HOU"]
m=dept_time_m()
arrival_time= dept_time+flight_times["HOUAUS"]
if (arrival_time > 1320):
break
loop8.append("T4")
loop8.append("HOU")
loop8.append("AUS")
loop8.append(m)
loop8.append(k)
schedule_flight.append(loop8)
k=arr_time_m()
print("T4, HOU, AUS, " + m + ", " + k )
#Schedule for Flight T5 between Houston and Dallas
#First flight departs at 6:00am
dept_time=6*60
x=600
initial=str(x).zfill(4)
arrival_time=dept_time + flight_times["HOUDAL"]
j=arr_time_m()
list5=[]
list5.append("T5")
list5.append("HOU")
list5.append("DAL")
list5.append(initial)
list5.append(j)
schedule_flight.append(list5)
print("T5, HOU, DAL, " + initial + ", " + j)
#Flight should be grounded before 10:00pm
while True:
loop9=[]
dept_time=arrival_time + ground_time["DAL"]
if (dept_time > 1320):
break
m=dept_time_m()
arrival_time=dept_time+flight_times["HOUDAL"]
if(arrival_time>=1300):
break
k=arr_time_m()
loop9.append("T5")
loop9.append("HOU")
loop9.append("DAL")
loop9.append(m)
loop9.append(k)
schedule_flight.append(loop9)
print("T5, HOU, DAL, " + m + ", " + k )
loop10=[]
dept_time=arrival_time + ground_time["HOU"]
m=dept_time_m()
arrival_time= dept_time+flight_times["DALHOU"]
if (arrival_time > 1320):
break
k=arr_time_m()
loop10.append("T5")
loop10.append("DAL")
loop10.append("HOU")
loop10.append(m)
loop10.append(k)
schedule_flight.append(loop10)
print("T5, DAL, HOU, " + m + ", " + k )
#Schedule for Flight T6 between Dallas and Houston
#First flight departs at 6:00am
dept_time=6*60
x=600
initial=str(x).zfill(4)
arrival_time=dept_time + flight_times["HOUDAL"]
j=arr_time_m()
list6=[]
list6.append("T6")
list6.append("HOU")
list6.append("DAL")
list6.append(initial)
list6.append(j)
schedule_flight.append(list6)
print("T6, HOU, DAL, " + initial + ", " + j)
#Flight should be grounded before 10:00pm
while True:
loop11=[]
dept_time=arrival_time + ground_time["DAL"]
if (dept_time > 1320):
break
m=dept_time_m()
arrival_time=dept_time+flight_times["HOUDAL"]
if(arrival_time>=1300):
break
k=arr_time_m()
loop11.append("T6")
loop11.append("HOU")
loop11.append("DAL")
loop11.append(m)
loop11.append(k)
schedule_flight.append(loop11)
print("T6, HOU, DAL, " + m + ", " + k )
loop12=[]
dept_time=arrival_time + ground_time["HOU"]
z=ground_time["HOU"]
m=dept_time_m()
arrival_time= dept_time+flight_times["DALHOU"]
if (arrival_time > 1320):
break
k=arr_time_m()
loop12.append("T6")
loop12.append("DAL")
loop12.append("HOU")
loop12.append(m)
loop12.append(k)
schedule_flight.append(loop12)
print("T6, DAL, HOU, " + m + ", " + k )
# Importing CSV module to write into CSV format
import csv
#Writing the List of List to Excel CSV
with open("flight_schedule.csv","w") as f:
wr=csv.writer(f,delimiter="\n",quoting=csv.QUOTE_NONE)
wr.writerow(schedule_flight) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: ivan
"""
"""
Dataset Creation
"""
#Loading Libraries
import time
import warnings
import concurrent
import pandas as pd
from functools import reduce
from pyhorse.Feature_Creation import Feature_Storage
from pyhorse.Database_Management import Extraction_Database, Load_Dataset_toDatabase
from statsmodels.tools.sm_exceptions import ConvergenceWarning
# MatchDay_DF = MatchDay_Dataset(Extraction_Database("""
# Select Distinct RARID from RaceDb where RADAT = {Date}
# """.format(Date='20130123')))
# One_Race_Feature(MatchDay_DF)
def One_Race_Feature(Dataframe):
"""
Feature Creation for one Race
Parameter
---------
Matchday_Dataset from Racecard
Return
------
Feature DataFrame
"""
"""
Get Feature Names
"""
#Get Feature_List from FeatureDb
Feature_List = Feature_Storage.Feature_List#list(Extraction_Database("""PRAGMA table_info('FeatureDb')""")['name'])
# Feature_List.remove('RARID') Feature_List.remove('HNAME')
Features_Dataframe = Dataframe.loc[:,['RARID', 'HNAME']]
"""
Create Features in Parallel
"""
#Prepare Matchday Dataset
HNAME_List = '('+str(Dataframe['HNAME'].tolist())[1:-1]+')'
Raceday = Dataframe.loc[:,'RADAT'].values[0]
# results = []
# for Feature in Feature_List:
# results.append(Create_Features([Feature, Dataframe, HNAME_List, Raceday]))
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
results = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for Feature in Feature_List:
#Run Functions
"""
All Feature Functions accepts a Matchday Dataframe
then return a dataframe of a race,
containing the following columns in the order of :
[HNAME, Feature Name]
"""
results.append(executor.submit(Create_Features, [Feature, Dataframe, HNAME_List, Raceday]))
results = [i.result() for i in results]
warnings.filterwarnings("default", category=RuntimeWarning)
warnings.filterwarnings("default", category=ConvergenceWarning)
#Combine all features into one dataframe
Features_DF = reduce(lambda x, y: pd.merge(x, y, on = 'HNAME'), results)
#Combine all features into one dataframe
# Features_Dataframe = pd.merge(Features_Dataframe, Features_DF, on = 'HNAME', how='left')
"""
Feature Transformation
"""
Transformation_List = Feature_Storage.Transformation_List
# results = []
# for Feature in Transformation_List:
# results.append(Transform_Features([Feature, Features_DF]))
results = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for Feature in Transformation_List:
#Run Functions
"""
All Feature Functions accepts a Base Features_Dataframe
then return a dataframe of a race,
containing the following columns in the order of :
[HNAME, Feature Name]
"""
results.append(executor.submit(Transform_Features, [Feature, Features_DF]))
results = [i.result() for i in results]
#Combine all features into one dataframe
Transformation_DF = reduce(lambda x, y: pd.merge(x, y, on = 'HNAME'), results)
#Combine all features into one dataframe
Features_Dataframe = reduce(lambda x, y: pd.merge(x, y, on = 'HNAME'), [Features_Dataframe, Features_DF, Transformation_DF])
if not len(Features_Dataframe.index)==len(Dataframe.index):
print(Features_Dataframe)
# print(Dataframe.loc[:,'RARID'].tolist()[0])
#Inserting Features_Dataframe to Database
Load_Dataset_toDatabase('FeatureDb', Features_Dataframe)
if sum(Features_Dataframe.isna().sum()) != 0 :
print(Dataframe.loc[:,'RARID'].to_list()[0])
Features_Dataframe.loc[:,Features_Dataframe.columns[2:]] = \
pd.DataFrame(Features_Dataframe.loc[:,Features_Dataframe.columns[2:]].values.astype(float))
return Features_Dataframe
def Create_Features(index):
Feature, Dataframe, HNAME_List, Raceday = index
Feature_Function = getattr(Feature_Storage, Feature)
Dataframe_Feature = Feature_Function(Dataframe, HNAME_List, Raceday)
return Dataframe_Feature
def Transform_Features(index):
Feature, Dataframe = index
Feature_Function = getattr(Feature_Storage, Feature)
Dataframe_Feature = Feature_Function(Dataframe)
return Dataframe_Feature
def Post_Raceday_Update(Raceday, Feature_DF, Result_DF):
# with concurrent.futures.ProcessPoolExecutor() as executor:
# executor.submit(Feature_Storage.Update_Running_Stat, Result_DF)
# executor.submit(Feature_Storage.Fit_Residual_Model, Raceday)
# executor.submit(Feature_Storage.Weight_Aug_Reg, Raceday)
# executor.submit(Feature_Storage.Update_Race_PosteriroDb, [Feature_DF, Result_DF])
"""
Post-Raceday - ELO Figures
"""
#Update Running Statistics Table with Results after feature is created
Feature_Storage.Update_Running_Stat(Result_DF)
"""
Post-Raceday - Auxiliary Regressions
"""
Feature_Storage.Fit_Residual_Model(Raceday)
Feature_Storage.Weight_Aug_Reg(Raceday)
"""
Post-Raceday - Race_PosteriroDb - Speed and Pace Figures, Preference Residuals
"""
#Updating Speed and Pace Figures
Feature_Storage.Update_Race_PosteriroDb([Feature_DF, Result_DF])
return None
def Feature_Creation(Dataframe):
"""
Creates Feature from Feature List and Insert into FeatureDb
Parameter
---------
Dataframe : MatchDay Data Format
Return
------
Dataframe
"""
#Start Timer
start_time = time.time()
for RADAT, Race_Day in Dataframe.groupby('RADAT'):
print(RADAT)
"""
Day by Day
"""
for RARID, Race in Race_Day.groupby('RARID'):
"""
Race to Race
"""
# print(RARID)
One_Race_Feature(Race)
"""
Post-Day
"""
Result_DF = Extraction_Database(""" Select * from RaceDb where RADAT = ? """,[RADAT])
Features_Dataframe = Extraction_Database(""" Select * from FeatureDb where RARID BETWEEN ? and ? """,[int(str(RADAT)+'00'), int(str(RADAT)+'99')])
Post_Raceday_Update(RADAT, Features_Dataframe, Result_DF)
print("---- %s Races are Created to FeatureDb in %s seconds ----" \
%(Dataframe['RARID'].nunique(), (str(round((time.time() - start_time),4)))))
return None
def MatchDay_Dataset(Race_ID):
"""
Extracting MatchDay Data from RaceDb
Parameter
---------
Race_ID : Dataframe of RaceID
Return
------
Dataframe
"""
#Start Timer
start_time = time.time()
Dataset = pd.DataFrame()
if len(Race_ID)>1:
Race_ID_List = [i for i in Race_ID['RARID'].tolist()]
Dataset = Extraction_Database("""
Select Distinct RARID, HNAME, HAGEI, HBWEI, HDRAW, HJRAT, HWEIC, JNAME, RESFO, RACLS, RADAT, RARAL,
RADIS, RAGOG, RALOC, RARUN, RATRA, SNAME
from RaceDb where RARID in {RARID}
Order By RARID, HNAME
""".format(RARID = '('+str(Race_ID_List)[1:-1]+')'))
else:
Dataset = Extraction_Database("""
Select Distinct RARID, HNAME, HAGEI, HBWEI, HDRAW, HJRAT, HWEIC, JNAME, RESFO, RACLS, RADAT, RARAL,
RADIS, RAGOG, RALOC, RARUN, RATRA, SNAME
from RaceDb where RARID = ?
Order By HNAME
""", [int(list(Race_ID.values)[0])])
#Print Time Taken to Load
print("---- %s Races are Extracted from RaceDb in %s seconds / %s minutes----" \
%(len(Race_ID), (str(round((time.time() - start_time),4))),(str(round(((time.time() - start_time))/60,4)))))
return Dataset
def Dataset_Extraction(Race_ID_List):
"""
Function for Extracting Datasets from FeatureDb
Feature Set : Data used for Feature Engineering
Modelling Set : Data used for trining base models and Hyperparameter Selection
Ensemble Set : Data used for training Ensemble Model
Testing Set : Testing Final Model
X Dataset for all Sets should be the same
They should go through the same Preprocessing Pipeline
Parameter
--------
Dataset_Type : Feature, Modelling, Ensemble, Harville, Testing
Race_ID : pd.Dataframe of RaceID
Return
------
Output ndArray of Panda Series
"""
#Start Timer
start_time = time.time()
#Select Race ID where
Race_ID_List = Extraction_Database("""
Select Distinct RARID from FeatureDb
where RARID in {RARID} and CC_FRB = 0
""".format(RARID = '('+str(Race_ID_List['RARID'].tolist())[1:-1]+')'))
"""
Constructing X_Dataset
"""
#Get Feature for one race
X_Dataset = Extraction_Database("""
Select * from FeatureDb where RARID in {RARID}
Order By RARID, HNAME
""".format(RARID = '('+str(Race_ID_List['RARID'].tolist())[1:-1]+')'))
#Convert all features into floats
col_list = X_Dataset.columns[2:]
for col in col_list:
X_Dataset[col] = X_Dataset[col].astype(float)
#Get RADIS, RALOC, RATRA
X_Condition = Extraction_Database("""
Select RARID, HNAME, RADIS, RALOC, RATRA from RaceDb where RARID in {RARID}
Order by RARID, HNAME
""".format(RARID = '('+str(Race_ID_List['RARID'].tolist())[1:-1]+')'))
#Merging Dataset
X_Dataset = X_Condition.merge(X_Dataset, on = ['HNAME', 'RARID'])
"""
Constructing Y_Dataset
"""
#Ensemble Model
Y_Dataset = Extraction_Database("""
Select RARID, HNAME, RESFO, RESWL, RESFP, ODPLA
from RaceDb where RARID in {RARID}
Order By RARID, HNAME
""".format(RARID = '('+str(Race_ID_List['RARID'].tolist())[1:-1]+')'))
#Convert all features into floats
col_list = Y_Dataset.columns[2:]
for col in col_list:
Y_Dataset[col] = Y_Dataset[col].astype(float)
#Print Time Taken to Load
print("---- %s Races are Extracted from FeatureDb in %s seconds ----"%(len(Race_ID_List), (str(round((time.time() - start_time),4)))))
return X_Dataset, Y_Dataset
def Get_RaceID(Season_List):
"""
Function for Extracting RaceID from RaceDb
Parameter
--------
Season : Season of Data to extract, in format of a List ['2017', '2018']
Return
------
Race_ID : Dataframe of RaceID
"""
#Extracting Race ID from RaceDb
Season_List = '('+str([int(i) for i in Season_List])[1:-1]+')'
Extraction = Extraction_Database("""
Select Distinct RARID from RaceDb where RASEA in {Season_List}
""".format(Season_List=Season_List))
return Extraction
|
# -*- coding: utf-8 -*-
import picamera
import picamera.array
import cv2
import pygame
import sys
pygame.init()
size=(320,240)
screen = pygame.display.set_mode(size)
def pygame_imshow(array):
b,g,r = cv2.split(array)
rgb = cv2.merge([r,g,b])
surface1 = pygame.surfarray.make_surface(rgb)
surface2 = pygame.transform.rotate(surface1, -90)
surface3 = pygame.transform.flip(surface2, True, False)
screen.blit(surface3, (0,0))
pygame.display.flip()
version = cv2.__version__.split(".")
CVversion = int(version[0])
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (320, 240)
camera.framerate = 15
while True:
# stream.arrayにBGRの順で映像データを格納
camera.capture(stream, 'bgr', use_video_port=True)
# 映像データをグレースケール画像grayに変換
gray = cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
# ガウシアンぼかしを適用して、認識精度を上げる
blur = cv2.GaussianBlur(gray, (9,9), 0)
# ハフ変換を適用し、映像内の円を探す
if CVversion == 2:
circles = cv2.HoughCircles(blur, cv2.cv.CV_HOUGH_GRADIENT,
dp=1, minDist=50, param1=120, param2=40,
minRadius=5, maxRadius=100)
else:
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,
dp=1, minDist=50, param1=120, param2=40,
minRadius=5, maxRadius=100)
if circles is not None:
for c in circles[0]:
# 見つかった円の上に赤い円を元の映像(system.array)上に描画
# c[0]:x座標, c[1]:y座標, c[2]:半径
cv2.circle(stream.array, (c[0],c[1]), c[2], (0,0,255), 2)
# pygameで画像を表示
pygame_imshow(stream.array)
# "q"を入力でアプリケーション終了
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_q:
pygame.quit()
sys.exit()
# streamをリセット
stream.seek(0)
stream.truncate()
|
"""
This module contains classes used to describe cluster configuration
"""
import os.path
import os
import inspect
import json
import contextlib
import tarfile
import hashlib
import tempfile
import base64
import io
import yaml
import pkg_resources
import docker
import docker.errors
from .. import utils
class BaseShip:
"""
Base mixin class for Ships.
"""
def __lt__(self, other):
return self.fqdn < other.fqdn
def __repr__(self):
return '{}(name={})'.format(type(self).__name__, self.name)
def containers(self, containers):
return [c for c in containers if c.ship == self]
@property
def logger(self):
return utils.getlogger(ship=self, bindto=3)
class Ship(BaseShip):
"""
Ship objects represents host running Docker listening on 4243 external port.
"""
def __init__(self, name, fqdn, **kwargs):
self.name = name
self.fqdn = fqdn
for k, v in kwargs.items():
setattr(self, k, v)
@property
@utils.cached
def memory(self):
if hasattr(self, 'novacluster'):
return utils.ship_memory_from_nova(self)
else:
return utils.ship_memory_from_bot(self.fqdn)
@property
@utils.cached
def islocal(self):
return self.name == os.uname()[1]
@property
@utils.cached
def docker(self):
self.logger.debug('connecting to ship', fqdn=self.fqdn)
return docker.Client('http://{}:4243/'.format(self.fqdn))
class LocalShip(BaseShip):
@property
def datacenter(self):
return 'localdc'
@property
def name(self):
return 'localship'
@property
def fqdn(self):
return utils.settings.get('localship-fqdn', 'localhost')
@property
def islocal(self):
return True
@property
@utils.cached
def memory(self):
import psutil
return psutil.virtual_memory().total
@property
@utils.cached
def docker(self):
return docker.Client(utils.settings.get('dockerurl'))
class Image:
def __init__(self, repository: str, tag: str='latest', id: str=''):
self.tag = tag
self.id = id
self.registry, self.repository = utils.getrepo(repository)
def __repr__(self):
return 'Image(repository={repository}, tag={tag}, id={id:.7}, registry={registry})'.format(**vars(self))
def __getstate__(self):
if self.id is '':
self.getid()
return vars(self)
def getfullrepository(self):
return self.repository if self.registry is None else '{}/{}'.format(self.registry, self.repository)
@property
def logger(self):
return utils.getlogger(image=self, bindto=3)
def getid(self):
self.logger.debug('retrieving id')
if self.id is '':
if self.tag not in self.gettags():
self.pull()
self.id = self.gettags()[self.tag]
return self.id
def _streamoperation(self, func, **kwargs):
logger = utils.getlogger('dominator.docker.{}'.format(func.__name__), image=self, docker=func.__self__)
for line in func(stream=True, **kwargs):
if line != '':
resp = json.loads(line)
if 'error' in resp:
raise docker.errors.DockerException('could not complete {} operation on {} ({})'.format(
func.__name__, self, resp['error']))
else:
message = resp.get('stream', resp.get('status', ''))
for line in message.split('\n'):
if line:
logger.debug(line, response=resp)
Image.gettags.cache_clear()
def push(self, dock=None):
self.logger.info("pushing repo")
dock = dock or utils.getdocker()
return self._streamoperation(dock.push, repository=self.getfullrepository())
def pull(self, dock=None):
self.logger.info("pulling repo")
dock = dock or utils.getdocker()
return self._streamoperation(dock.pull, repository=self.getfullrepository(), tag=self.tag)
def build(self, dock=None, **kwargs):
self.logger.info("building image")
dock = dock or utils.getdocker()
return self._streamoperation(dock.build, tag='{}:{}'.format(self.getfullrepository(), self.tag), **kwargs)
@utils.cached
@utils.asdict
def gettags(self):
self.logger.debug("retrieving tags")
images = utils.getdocker().images(self.getfullrepository(), all=True)
for image in images:
for tag in image['RepoTags']:
yield tag.split(':')[-1], image['Id']
def inspect(self):
result = utils.getdocker().inspect_image(self.getid())
# Workaround: Docker sometimes returns "config" key in different casing
if 'config' in result:
return result['config']
elif 'Config' in result:
return result['Config']
else:
raise RuntimeError("unexpected response from Docker: {}".format(result))
@utils.cached
def getports(self):
return [int(port.split('/')[0]) for port in self.inspect()['ExposedPorts'].keys()]
def getcommand(self):
return ' '.join(self.inspect()['Cmd'])
def getenv(self):
return dict(var.split('=', 1) for var in self.inspect()['Env'])
class SourceImage(Image):
def __init__(self, name: str, parent: Image, scripts: [], command: str=None,
env: dict={}, volumes: list=[], ports: list=[], files: dict={}):
self.parent = parent
self.scripts = scripts
self.command = command
self.volumes = volumes
self.ports = ports
self.files = files
self.env = env
Image.__init__(self, name)
self.tag = self.gettag()
def __getstate__(self):
filter_state = lambda: {k: v for k, v in Image.__getstate__(self).items() if k not in ['files']}
try:
return filter_state()
except docker.errors.DockerException:
# DockerException means that needed image not found in repository and needs rebuilding
pass
self.build(fileobj=self.gettarfile(), custom_context=True)
self.push()
return filter_state()
def gettag(self):
dump = json.dumps({
'repository': self.repository,
'parent': self.parent.__getstate__(),
'scripts': self.scripts,
'command': self.command,
'env': self.env,
'volumes': self.volumes,
'ports': self.ports,
'files': {path: hashlib.sha256(file.read()).hexdigest() for path, file in self.files.items()},
}, sort_keys=True)
digest = hashlib.sha256(dump.encode()).digest()
tag = base64.b64encode(digest, altchars=b'+-').decode()
return tag
def gettarfile(self):
f = tempfile.NamedTemporaryFile()
with tarfile.open(mode='w', fileobj=f) as tfile:
dockerfile = io.BytesIO()
dockerfile.write('FROM {}:latest\n'.format(self.parent.getfullrepository()).encode())
for name, value in self.env.items():
dockerfile.write('ENV {} {}\n'.format(name, value).encode())
for script in self.scripts:
dockerfile.write('RUN {}\n'.format(script).encode())
for volume in self.volumes:
dockerfile.write('VOLUME {}\n'.format(volume).encode())
for port in self.ports:
dockerfile.write('EXPOSE {}\n'.format(port).encode())
if self.command:
dockerfile.write('CMD {}\n'.format(self.command).encode())
for path, fileobj in self.files.items():
dockerfile.write('ADD {} {}\n'.format(path, path).encode())
tinfo = tfile.gettarinfo(fileobj=fileobj, arcname=path)
fileobj.seek(0)
tfile.addfile(tinfo, fileobj)
dockerfile.seek(0)
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
tfile.addfile(dfinfo, dockerfile)
f.seek(0)
return f
def getports(self):
return self.ports
class Container:
def __init__(self, name: str, ship: Ship, image: Image, command: str=None, hostname: str=None,
ports: dict={}, memory: int=0, volumes: list=[],
env: dict={}, extports: dict={}, portproto: dict={}):
self.name = name
self.ship = ship
self.image = image
self.command = command
self.volumes = volumes
self.ports = ports
self.memory = memory
self.env = env
self.extports = extports
self.portproto = portproto
self.id = ''
self.status = 'not found'
self.hostname = hostname or '{}-{}'.format(self.name, self.ship.name)
def __repr__(self):
return 'Container(name={name}, ship={ship}, Image={image}, env={env}, id={id})'.format(**vars(self))
def __getstate__(self):
return {k: v for k, v in vars(self).items() if k not in ['id', 'status']}
@property
def logger(self):
return utils.getlogger(container=self, bindto=3)
def __setstate__(self, state):
self.__dict__.update(state)
self.id = ''
self.status = 'not found'
def getvolume(self, volumename):
for volume in self.volumes:
if volume.name == volumename:
return volume
else:
raise RuntimeError('no such volume in container: %s', volumename)
@property
def running(self):
return 'Up' in self.status
def check(self, cinfo=None):
if cinfo is None:
self.logger.debug('checking container status')
matched = [cont for cont in self.ship.docker.containers(all=True)
if cont['Names'] and cont['Names'][0][1:] == self.name]
if len(matched) > 0:
cinfo = matched[0]
if cinfo:
self.id = cinfo.get('Id', self.id)
self.status = cinfo.get('Status', self.status)
else:
self.id = ''
self.status = 'not found'
@contextlib.contextmanager
def execute(self):
self.logger.debug('executing')
try:
try:
self.create()
except docker.errors.APIError as e:
if e.response.status_code != 409:
raise
self.check()
self.remove(force=True)
self.create()
self.logger.debug('attaching to stdout/stderr')
logs = utils.docker_lines(self.ship.docker.attach(
self.id, stdout=True, stderr=True, logs=True, stream=True))
self.start()
yield logs
finally:
try:
self.stop()
except:
self.logger.debug('could not stop container, ignoring')
def logs(self, follow):
self.logger.bind(follow=follow).debug('getting logs from container')
try:
if follow:
lines = utils.docker_lines(self.ship.docker.logs(self.id, stream=True))
else:
lines = self.ship.docker.logs(self.id).decode().split('\n')
for line in lines:
print(line)
except KeyboardInterrupt:
self.logger.debug('received keyboard interrupt')
def stop(self):
self.logger.debug('stopping container')
self.ship.docker.stop(self.id, timeout=2)
self.check({'Status': 'stopped'})
def remove(self, force=False):
self.logger.debug('removing container')
self.ship.docker.remove_container(self.id, force=force)
self.check({'Id': '', 'Status': 'not found'})
def create(self):
self.logger.debug('preparing to create container')
for volume in self.volumes:
volume.render(self)
try:
cinfo = self._create()
except docker.errors.APIError as e:
if e.response.status_code != 404:
raise
# image not found - pull repo and try again
# Check if ship has needed image
self.logger.info('could not find requested image, pulling repo')
self.image.pull(self.ship.docker)
cinfo = self._create()
self.check(cinfo)
self.logger.debug('container created')
def _create(self):
self.logger.debug('creating container', image=self.image)
return self.ship.docker.create_container(
image='{}:{}'.format(self.image.getfullrepository(), self.image.getid()),
hostname=self.hostname,
command=self.command,
mem_limit=self.memory,
environment=self.env,
name=self.name,
ports=list(self.ports.values()),
stdin_open=True,
detach=False,
)
def run(self):
try:
self.create()
except docker.errors.APIError as e:
if e.response.status_code != 409:
raise
self.check()
if self.id:
if self.running:
self.logger.info('found running container with the same name, comparing config with requested')
diff = utils.compare_container(self, self.inspect())
if diff:
self.logger.info('running container config differs from requested, stopping', diff=diff)
self.stop()
else:
self.logger.info('running container config identical to requested, keeping')
return
self.logger.info('found stopped container with the same name, removing')
self.remove()
self.create()
self.start()
def start(self):
self.logger.debug('starting container')
self.ship.docker.start(
self.id,
port_bindings={
'{}/{}'.format(port, self.portproto.get(name, 'tcp')): ('::', self.extports.get(name, port))
for name, port in self.ports.items()
},
binds={v.getpath(self): {'bind': v.dest, 'ro': v.ro} for v in self.volumes},
)
self.check({'Status': 'Up'})
self.logger.debug('container started')
def inspect(self):
return self.ship.docker.inspect_container(self.id)
def wait(self):
return self.ship.docker.wait(self.id)
def getport(self, name):
return self.extports.get(name, self.ports[name])
class Volume:
def __repr__(self):
return '{}(name={name}, dest={dest})'.format(type(self).__name__, **vars(self))
@property
def logger(self):
return utils.getlogger(volume=self, bindto=3)
class DataVolume(Volume):
def __init__(self, dest: str, path: str=None, name: str='data', ro=False):
self.name = name
self.dest = dest
self.path = path
self.ro = ro
def render(self, _):
pass
def getpath(self, container):
return self.path or os.path.expanduser(os.path.join(utils.settings['datavolumedir'],
container.name, self.name))
class ConfigVolume(Volume):
def __init__(self, dest: str, files: dict={}, name: str='config'):
self.name = name
self.dest = dest
self.files = files
def getpath(self, container):
return os.path.expanduser(os.path.join(utils.settings['configvolumedir'],
container.name, self.name))
@property
def ro(self):
return True
def render(self, cont):
self.logger.debug('rendering')
path = self.getpath(cont)
os.makedirs(path, exist_ok=True)
for filename in os.listdir(path):
os.remove(os.path.join(path, filename))
for file in self.files:
file.dump(cont, self)
class BaseFile:
def __init__(self, name):
self.name = name
@property
def logger(self):
return utils.getlogger(file=self, bindto=3)
def getpath(self, container: Container, volume: Volume):
return os.path.join(volume.getpath(container), self.name)
def dump(self, container: Container, volume: Volume, data: str=None):
if data is None:
data = self.data(container)
path = self.getpath(container, volume)
self.logger.debug("writing file", path=path)
with open(path, 'w+', encoding='utf8') as f:
f.write(data)
def load(self, container: Container, volume: Volume):
path = self.getpath(container, volume)
self.logger.debug("loading text file contents", path=path)
with open(path) as f:
return f.read()
class TextFile(BaseFile):
def __init__(self, filename: str, text: str=None):
BaseFile.__init__(self, filename)
if text is not None:
self.content = text
else:
parent_frame = inspect.stack()[1]
parent_module = inspect.getmodule(parent_frame[0])
self.content = pkg_resources.resource_string(parent_module.__name__, filename).decode()
def __str__(self):
return 'TextFile(name={})'.format(self.name)
def data(self, _container):
return self.content
class TemplateFile:
def __init__(self, file: BaseFile, **kwargs):
self.file = file
self.context = kwargs
def __str__(self):
return 'TemplateFile(file={file}, context={context})'.format(vars(self))
@property
def logger(self):
return utils.getlogger(file=self, bindto=3)
def dump(self, container, volume):
self.logger.debug("rendering file")
self.file.dump(container, volume, self.data(container))
def data(self, container):
import mako.template
template = mako.template.Template(self.file.data(container))
context = {'this': container}
context.update(self.context)
self.logger.debug('rendering template file', context=context)
return template.render(**context)
def load(self, container, volume):
return self.file.load(container, volume)
@property
def name(self):
return self.file.name
class YamlFile(BaseFile):
def __init__(self, name: str, data: dict):
BaseFile.__init__(self, name)
self.content = data
def __str__(self):
return 'YamlFile(name={name})'.format(vars(self))
def data(self, _container):
return yaml.dump(self.content)
class JsonFile(BaseFile):
def __init__(self, name: str, data: dict):
BaseFile.__init__(self, name)
self.content = data
def __str__(self):
return 'JsonFile(name={name})'.format(vars(self))
def data(self, _container):
return json.dumps(self.content, sort_keys=True, indent=' ')
|
class Node(object):
def __init__(self, value=None):
self.value = value
self.left = None
self.right = None
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def get_left_child(self):
return self.left # PJ: We are calling this method as Node object in the Tree class object and the pointer thing is abstracted and
#playing with value directly so we remove value and dont need to conver pointer location to value (Dont have to DO: return self.left.value)
def set_left_child(self, node):
self.left = node
def get_right_child(self):
return self.right
def set_right_child(self, node):
self.right = node
def has_left_child(self):
return self.left != None
def has_right_child(self):
return self.right != None
def __repr__(self):
return f"Node({self.get_value()})"
def __str__(self):
return f"Node({self.get_value()})"
class Tree(object): # class inherits from object
def __init__(self, value=None): # constructor
self.root = Node(value)
def get_root(self):
return self.root
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def __repr__(self):
return f"Node({self.peek()})"
def __str__(self):
return f"Node({self.peek()})"
def pre_order_with_stack_buggy(tree):
visit_order = list()
stack = Stack()
#start at the root node, visit it and the add it to the stack
node = tree.get_root()
stack.push(node)
node = stack.peek()
# Visit node
visit_order.append(node.get_value())
count = 0
loop_limit = 7
while(node and count < loop_limit):
print(f"""
Visit Order: {visit_order}
Stack:
{stack}
""")
count += 1
if node.has_left_child():
node = node.get_left_child()
stack.push(node)
#Repetative but still
node = stack.peek()
visit_order.append(node.get_value())
elif node.has_right_child():
node = node.get_right_child()
stack.push(node)
#print("Pushed to stack: ", node)
node = stack.peek()
visit_order.append(node.get_value())
else:
stack.pop()
if not stack.isEmpty():
node = stack.peek()
else:
node = None
# Driver
# Tree with nodes'
tree = Tree("Google")
print(' ',tree.get_root())
tree.get_root().set_left_child(Node("Amazon"))
print(' Left Child: ', tree.get_root().get_left_child())
tree.get_root().set_right_child(Node("Microsoft"))
print(' Right Child: ',tree.get_root().get_right_child())
tree.get_root().get_left_child().set_left_child(Node("SpaceX"))
print('Left Left Child: ', tree.get_root().get_left_child().get_left_child())
pre_order_with_stack_buggy(tree)
# the issue here is SpaceX is visited 2 more times because the loop is re occuring in left child forever. We can solve this issue with Track State |
#!/usr/bin/python
rows = input("Enter no. of rows:")
for i in range(rows,0,-1):
for i in range(i):
print "*",
print ""
|
"""
Takes an unsolved Sudoku puzzle and returns it solved.
"""
import re
from collections import Counter
NUM_SEARCHES = 0
# Define some helpful global variables
def cross(A, B):
"""
Cross product of elements in A and elements in B.
"""
return [a + b for a in A for b in B]
rows = 'ABCDEFGHI'
cols = '123456789'
boxes = cross(rows, cols)
row_units = [cross(row, cols) for row in rows]
column_units = [cross(rows, col) for col in cols]
square_units = [cross(row, col)
for row in ['ABC', 'DEF', 'GHI']
for col in ['123', '456', '789']]
peer_group_list = row_units + column_units + square_units
peer_group = dict((s, [u for u in peer_group_list if s in u]) for s in boxes)
peers = dict((s, set(sum(peer_group[s], [])) - {s}) for s in boxes)
def assign_value(grid_dict, box, value) -> dict:
grid_dict[box] = value
return grid_dict
def naked_twins(grid_dict) -> dict:
"""Eliminate values using the naked twins strategy.
Args:
grid_dict(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
# Find all instances of naked twins
# Eliminate the naked twins as possibilities for their peers
for unit in peer_group_list:
# Collect all the twins in a unit in case there are multiple twins in one unit
twins = [digits for digits, count in
Counter(grid_dict[box] for box in unit if len(grid_dict[box]) == 2).items()
if count > 1]
for twin in twins:
for box in unit:
if set(grid_dict[box]) == set(twin):
# Skip if this box is exactly equal to the twin
# Take set in case order got mixed up
continue
for digit in twin:
if digit in grid_dict[box]:
new_value = grid_dict[box].replace(digit, '')
assign_value(grid_dict, box, new_value)
return grid_dict
def sudoku_input_string_to_grid_dict(grid_string) -> dict:
"""Convert input format into a dict of {square: char} with '.' for empties."""
assert len(grid_string) == 81
board = []
digits = '123456789'
for val in grid_string:
if val in digits:
board.append(val)
if val == '.':
board.append(digits)
return dict(zip(boxes, board))
def display(grid_dict: dict, debugging_display: bool = False) -> None:
"""
Display these values as a 2-D grid.
:param grid_dict: A dictionary of the squares and potential values for each square.
:param debugging_display: Whether we are displaying for debugging purposes or output purposes.
"""
if not grid_dict:
return
width = max(len(grid_dict[k]) for k in boxes) + 1 if debugging_display else 1
for i, row in enumerate(rows):
if i % 3 == 0:
if i is 0:
print('')
else:
print((('-' * width * 3 + '-+-') * 3)[:-2])
display_row = []
for j, col in enumerate(cols):
bar = ''
if j % 3 == 2 and j is not 8:
bar = ' | '
if not debugging_display and grid_dict[row + col] == "123456789":
display_row.append("_".center(width, ' ') + bar)
else:
display_row.append(grid_dict[row + col].center(width, ' ') + bar)
print(''.join(display_row))
def eliminate(grid_dict) -> dict:
"""
Eliminate possibilities from a box if one of its
peers definitely already has that value.
"""
for box, value in grid_dict.items():
for peer in peers[box]:
if len(grid_dict[peer]) == 1:
value = value.replace(grid_dict[peer][0], '')
assign_value(grid_dict, box, value)
return grid_dict
def only_choice(grid_dict) -> dict:
"""
Assign a box to a value if it's the only box
in a unit that could contain that value
"""
for unit in peer_group_list:
occurs_only_once = set()
occurs_more_than_once = set()
for box in unit:
for possibility in grid_dict[box]:
if possibility in occurs_more_than_once:
continue
elif possibility in occurs_only_once:
occurs_only_once.remove(possibility)
occurs_more_than_once.add(possibility)
else:
occurs_only_once.add(possibility)
for box in unit:
for possibility in grid_dict[box]:
if possibility in occurs_only_once:
assign_value(grid_dict, box, possibility)
return grid_dict
def grid_to_output_format(grid_dict) -> str:
if not is_solved(grid_dict):
raise Exception(f"This grid is not solved: {grid_dict}")
else:
result = ""
for box in boxes:
result += grid_dict[box]
return result
def fill_in_with_constraint_satisfaction(grid_dict) -> dict:
stalled = False
while not stalled:
number_solved_before = len([box for box in grid_dict.keys() if len(grid_dict[box]) == 1])
grid_dict = eliminate(grid_dict)
grid_dict = only_choice(grid_dict)
grid_dict = naked_twins(grid_dict)
number_solved_after = len([box for box in grid_dict.keys() if len(grid_dict[box]) == 1])
stalled = number_solved_before == number_solved_after
return grid_dict
def is_valid(grid_dict) -> bool:
for group in peer_group_list:
group_values = set()
for box in group:
box_value = grid_dict[box]
if len(box_value) == 1:
if box_value in group_values:
return False
else:
group_values.add(grid_dict[box])
return True
def is_solved(grid_dict) -> bool:
return grid_dict and is_valid(grid_dict) and all(len(val) == 1 for val in grid_dict.values())
def get_min_box(grid_dict) -> str:
min_val, min_box = min((len(grid_dict[box]), box) for box in boxes if len(grid_dict[box]) > 1)
return min_box
def search(grid_dict, use_constraint_satisfaction_heuristics: bool = True) -> dict:
"""
:param grid_dict:
:param use_constraint_satisfaction_heuristics: A flag for
:return: The solved grid_dict
"""
if use_constraint_satisfaction_heuristics:
grid_dict = fill_in_with_constraint_satisfaction(grid_dict)
if is_solved(grid_dict):
return grid_dict
min_box = get_min_box(grid_dict)
for possibility in grid_dict[min_box]:
new_search_grid_dict = grid_dict.copy()
new_search_grid_dict[min_box] = possibility
if is_valid(new_search_grid_dict):
attempt = search(new_search_grid_dict, use_constraint_satisfaction_heuristics)
if is_solved(attempt):
return attempt
def solve(sudoku_input: str, use_constraint_satisfaction_heuristics: bool = True, print_results: bool = True, ):
"""
Takes an input string and returns a grid with the corresponding values filled in.
param: sudoku_input: A string in a format like this
'2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
param print_results: Whether we should print the results of the finished Sudoku puzzle to the terminal
"""
m = re.match("[1-9.]{81}", sudoku_input)
if not m:
raise Exception("Improperly formatted input.")
input_grid = sudoku_input_string_to_grid_dict(sudoku_input)
if print_results:
print("..........................Solving.........................")
display(input_grid)
solution_grid = search(input_grid, use_constraint_satisfaction_heuristics)
if print_results:
print("..........................Solved..........................")
display(solution_grid)
return grid_to_output_format(solution_grid)
if __name__ == "__main__":
with open("tests/sudoku_puzzle3.txt") as f:
sample_input = f.read().strip()
output = solve(sample_input)
|
import sys, logging
logging.basicConfig(stream = sys.stderr)
from flask import render_template, request, jsonify, make_response
from app import app
from datetime import datetime, timedelta
import pymysql as mdb
import json
import re, os
import pandas as pd
import numpy as np
from time import mktime
import pickle
# utc to etc
dst = 4
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return int(mktime(obj.timetuple()))
return json.JSONEncoder.default(self, obj)
# with open('../db.pkl', 'rb') as handle:
with open('/home/ubuntu/instaNYC/db.pkl', 'rb') as handle:
db_info = pickle.load(handle)
db = mdb.connect(user=db_info["user"], password=db_info["password"], host=db_info["host"], db=db_info["database"], charset='utf8')
@app.route('/')
@app.route('/index')
def index():
hours_to_show = 2
date_start = (datetime.utcnow() - timedelta(hours=dst, days=6)).date()
time_start = datetime.utcnow() - timedelta(hours=dst, minutes=15)
time_end = datetime.utcnow() - timedelta(hours=dst)
# get current markers
marker = {}
cur = db.cursor()
cur.execute('SELECT loc_id, type FROM events WHERE date_time >= "%s" AND date_time < "%s"' % (time_start, time_end))
results = cur.fetchall()
for i, (ID, event_type) in enumerate(results):
cur = db.cursor()
cur.execute('SELECT loc_lat, loc_lon, loc_name, loc_id FROM top_places_nyc WHERE id = "%s"' % ID)
results_2 = cur.fetchall()
cur.execute('SELECT thumbnail_url FROM nyc_data WHERE loc_id IN (SELECT loc_id FROM top_places_nyc WHERE id = "%s") AND time > "%s" GROUP BY user ORDER BY time DESC' % (ID, datetime.utcnow() - timedelta(hours=hours_to_show)))
results_3 = cur.fetchall()
cur.execute('SELECT date_time, counts FROM hourly_counts WHERE loc_id IN (SELECT loc_id FROM top_places_nyc WHERE id = "%s") AND DATE(date_time) > "%s"' % (ID, date_start))
results_4 = cur.fetchall()
marker[ID] = {'lat': float(results_2[0][0]), 'lon': float(results_2[0][1]),\
'event_type': event_type, 'name': re.sub('[.!,;]', '', results_2[0][2].encode("ascii")),\
'url': [x[0] for x in results_3][:18],\
't_h': [x[0].strftime("%Y-%m-%d %H:%M:%S") for x in results_4],\
'x_h': [x[1] for x in results_4],\
'hashtags': 'empty'}
return render_template("index.html", records=json.dumps(marker))
@app.route('/_query')
def _query():
given_date = request.args.get('given_date')
given_date = datetime.strptime(given_date, "%m/%d/%Y").date()
if given_date == (datetime.utcnow() - timedelta(hours=dst)).date():
marker = "today"
elif given_date < datetime(2015, 1, 27).date():
marker = "Please choose a date after Jan 27, 2015."
elif given_date > (datetime.utcnow() - timedelta(hours=dst)).date():
marker = "No prediction for the future events yet!"
else:
print os.getcwd()
filename = 'anomalies_' + given_date.strftime("%Y-%m-%d") + '.pkl'
with open('app/static/pickles/' + filename, 'rb') as handle:
marker = pickle.load(handle)
return jsonify(result=marker)
@app.route('/slides')
def slides():
return render_template("slides.html")
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/monitor')
def monitor():
return render_template("monitor.html")
@app.route('/<path:filename>')
def return_image(filename):
response = make_response(app.send_static_file(filename))
response.cache_control.max_age = 0
return response |
from ethernet_servo.control import units
from ethernet_servo.api import api, BaseResource
from . import models
ns = api.namespace('devices', description='Configured servo controllers')
@ns.route('/<string:name>/goto')
@ns.param('name', 'The servo controller name as configured')
class DeviceGotoRaw(BaseResource):
@ns.doc('Sets target position (raw encoder value)')
@ns.marshal_with(models.DeviceStatus)
@ns.expect(models.RawPosition)
def put(self, name):
device = self.get_device(name)
device.controller.target_raw = api.payload['value']
return device
@ns.route('/<string:name>/goto/astronomical')
@ns.param('name', 'The servo controller name as configured')
class DeviceGotoAstronomical(BaseResource):
@ns.doc('Sets target position (astronomical)')
@ns.marshal_with(models.DeviceStatus)
@ns.expect(models.AstronomicalPosition)
def put(self, name):
device = self.get_device(name)
target = units.AstronomicalPosition()
target.hours = api.payload['hours']
target.minutes = api.payload['minutes']
target.seconds = api.payload['seconds']
device.controller.target_astronomical = target
return device
@ns.route('/<string:name>/goto/angle')
@ns.param('name', 'The servo controller name as configured')
class DeviceGotoAngle(BaseResource):
@ns.doc('Sets target position (angle)')
@ns.marshal_with(models.DeviceStatus)
@ns.expect(models.AnglePosition)
def put(self, name):
device = self.get_device(name)
target = units.AnglePosition()
target.degrees = api.payload['degrees']
target.minutes = api.payload['minutes']
target.seconds = api.payload['seconds']
device.controller.target_angle = target.to_decimal()
return device
@ns.route('/<string:name>/goto/relative/angle')
@ns.param('name', 'The servo controller name as configured')
class DeviceGotoRelativeAngle(BaseResource):
@ns.doc('Sets target position (angle) relative to current position')
@ns.marshal_with(models.DeviceStatus)
@ns.expect(models.AnglePosition)
def put(self, name):
device = self.get_device(name)
current_target = device.controller.target_angle
current_target.degrees += api.payload['degrees']
current_target.minutes += api.payload['minutes']
current_target.seconds += api.payload['seconds']
device.controller.target_angle = current_target
return device
@ns.route('/<string:name>/goto/relative/astronomical')
@ns.param('name', 'The servo controller name as configured')
class DeviceGotoRelativeAstronomical(BaseResource):
@ns.doc('Sets target position (astronomical) relative to current position')
@ns.marshal_with(models.DeviceStatus)
@ns.expect(models.AstronomicalPosition)
def put(self, name):
device = self.get_device(name)
current_target = device.controller.target_astronomical
current_target.hours += api.payload['hours']
current_target.minutes += api.payload['minutes']
current_target.seconds += api.payload['seconds']
device.controller.target_astronomical = current_target
return device
|
from django.conf import settings
from django.contrib.auth.models import User
from django.test import override_settings
from model_mommy import mommy
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from .utils import (assign_user_to_role, create_default_roles,
remove_all_role_mappings)
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class TestProjectListAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.main_project_member_name = 'project_member_name'
cls.main_project_member_pass = 'project_member_pass'
cls.sub_project_member_name = 'sub_project_member_name'
cls.sub_project_member_pass = 'sub_project_member_pass'
cls.approver_name = 'approver_name_name'
cls.approver_pass = 'approver_pass'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = 'super_user_pass'
create_default_roles()
main_project_member = User.objects.create_user(username=cls.main_project_member_name,
password=cls.main_project_member_pass)
sub_project_member = User.objects.create_user(username=cls.sub_project_member_name,
password=cls.sub_project_member_pass)
approver = User.objects.create_user(username=cls.approver_name,
password=cls.approver_pass)
User.objects.create_superuser(username=cls.super_user_name,
password=cls.super_user_pass,
email='fizz@buzz.com')
cls.main_project = mommy.make('TextClassificationProject', users=[main_project_member])
cls.sub_project = mommy.make('TextClassificationProject', users=[sub_project_member])
assign_user_to_role(project_member=main_project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=sub_project_member, project=cls.sub_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=approver, project=cls.main_project,
role_name=settings.ROLE_ANNOTATION_APPROVER)
cls.url = reverse(viewname='project_list')
cls.data = {'name': 'example', 'project_type': 'DocumentClassification',
'description': 'example', 'guideline': 'example',
'resourcetype': 'TextClassificationProject'}
cls.num_project = main_project_member.projects.count()
def test_returns_main_project_to_approver(self):
self.client.login(username=self.approver_name,
password=self.approver_pass)
response = self.client.get(self.url, format='json')
project = response.data[0]
num_project = len(response.data)
self.assertEqual(num_project, self.num_project)
self.assertEqual(project['id'], self.main_project.id)
def test_returns_main_project_to_main_project_member(self):
self.client.login(username=self.main_project_member_name,
password=self.main_project_member_pass)
response = self.client.get(self.url, format='json')
project = response.data[0]
num_project = len(response.data)
self.assertEqual(num_project, self.num_project)
self.assertEqual(project['id'], self.main_project.id)
def test_do_not_return_main_project_to_sub_project_member(self):
self.client.login(username=self.sub_project_member_name,
password=self.sub_project_member_pass)
response = self.client.get(self.url, format='json')
project = response.data[0]
num_project = len(response.data)
self.assertEqual(num_project, self.num_project)
self.assertNotEqual(project['id'], self.main_project.id)
def test_allows_superuser_to_create_project(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertFalse(response.json().get('collaborative_annotation'))
self.assertFalse(response.json().get('randomize_document_order'))
def test_allows_superuser_to_create_project_with_flags(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
data = dict(self.data)
data['collaborative_annotation'] = True
data['randomize_document_order'] = True
response = self.client.post(self.url, format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(response.json().get('collaborative_annotation'))
self.assertTrue(response.json().get('randomize_document_order'))
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class TestProjectDetailAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = 'project_member_pass'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = 'non_project_member_pass'
cls.admin_user_name = 'admin_user_name'
cls.admin_user_pass = 'admin_user_pass'
create_default_roles()
cls.project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_pass)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=cls.non_project_member_pass)
project_admin = User.objects.create_superuser(username=cls.admin_user_name,
password=cls.admin_user_pass,
email='fizz@buzz.com')
cls.main_project = mommy.make('TextClassificationProject', users=[cls.project_member, project_admin])
mommy.make('TextClassificationProject', users=[non_project_member])
cls.url = reverse(viewname='project_detail', args=[cls.main_project.id])
cls.data = {'description': 'lorem'}
assign_user_to_role(project_member=cls.project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=project_admin, project=cls.main_project,
role_name=settings.ROLE_PROJECT_ADMIN)
def test_returns_main_project_detail_to_main_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['id'], self.main_project.id)
def test_do_not_return_main_project_to_sub_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_admin_to_update_project(self):
self.client.login(username=self.admin_user_name,
password=self.admin_user_pass)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.data['description'], self.data['description'])
def test_disallows_non_project_member_to_update_project(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_admin_to_delete_project(self):
self.client.login(username=self.admin_user_name,
password=self.admin_user_pass)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_disallows_non_project_member_to_delete_project(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestTagAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.main_project_member_name = 'project_member_name'
cls.main_project_member_pass = 'project_member_pass'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = 'super_user_pass'
create_default_roles()
main_project_member = User.objects.create_user(username=cls.main_project_member_name,
password=cls.main_project_member_pass)
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=cls.super_user_pass,
email='fizz@buzz.com')
cls.main_project = mommy.make('TextClassificationProject', users=[main_project_member, super_user])
assign_user_to_role(project_member=main_project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=super_user, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
cls.tag = mommy.make('Tag', project=cls.main_project, text='Tag 1')
cls.url = reverse(viewname='tag_list', args=[cls.main_project.id])
cls.project_url = reverse(viewname='project_list')
cls.delete_url = reverse(viewname='tag_list', args=[cls.main_project.id])
def test_create_tag(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.post(self.url, data={'text': 'Tag 2'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(self.project_url, format='json')
self.assertTrue(response.data[0]['tags'][1]['text'] == 'Tag 2' ,'Content of tags differs.')
def test_tag_list(self):
self.client.login(username=self.main_project_member_name,
password=self.main_project_member_pass)
response = self.client.get(self.project_url, format='json')
self.assertTrue(len(response.data[0]['tags']) == 1 ,'Number of tags differs expected amount.')
self.assertTrue(response.data[0]['tags'][0]['text'] == 'Tag 1' ,'Content of tags differs.')
def test_delete_tag(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.delete(self.delete_url, data={'id': self.tag.id})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(self.project_url, format='json')
self.assertTrue(len(response.data[0]['tags']) == 0 ,'Number of tags differs expected amount.')
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
|
# vim: ai ts=4 sts=4 et sw=4
from mwana.apps.userverification.models import DeactivatedUser
from mwana.apps.userverification.models import UserVerification
from django.contrib import admin
from django.db.models import Max
from rapidsms.contrib.messagelog.models import Message
class UserVerificationAdmin(admin.ModelAdmin):
list_display = ("facility", "contact", "is_active", "verification_freq", "request",
"response", "responded", "request_date", "response_date",
'date_of_most_recent_sms',)
list_filter = ("responded", "request", "facility", )
def is_active(self, obj):
return "Yes" if obj.contact.is_active else "No"
def date_of_most_recent_sms(self, obj):
latest = Message.objects.filter(
contact=obj.contact.id,
direction='I',
).aggregate(date=Max('date'))
if latest['date']:
return latest['date'].strftime('%d/%m/%Y %H:%M:%S')
else:
return 'None'
admin.site.register(UserVerification, UserVerificationAdmin)
class DeactivatedUserAdmin(admin.ModelAdmin):
list_display = ("district", "clinic", "contact", "connection", "deactivation_date",)
def clinic(self, obj):
return obj.contact.clinic
def district(self, obj):
return obj.contact.district
admin.site.register(DeactivatedUser, DeactivatedUserAdmin)
|
# Generated by Django 3.1.7 on 2021-06-04 17:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0006_auto_20210604_2027'),
]
operations = [
migrations.AddField(
model_name='whatjob',
name='title',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='whyus',
name='title',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
from model.contact import Contact
from random import randrange
def test_modify_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='test'))
old_contacts = db.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(firstname="New_name", lastname='New_lastname')
contact.id = old_contacts[index].id
app.contact.modify_contact_by_id(contact.id, contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
new_ui_contacts = app.contact.get_contact_list()
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(new_ui_contacts, key=Contact.id_or_max)
|
import math
import sys
fin = sys.stdin
num_cases = int(fin.readline().strip())
def solve(B,M):
if M > 2 ** (B-2):
return None
slides = []
for _ in range(B):
slides.append([0] * B)
for i in range(B-1):
for j in range(i+1, B-1):
slides[i][j] = 1
for j in range(B-2, -1, -1):
if 2 ** (max(j-1, 0)) <= M:
slides[j][B-1] = 1
M -= 2 ** (j-1)
#
#
# for
# C = int(math.floor(math.log(M,2))) + 1
#
# for i in range(C):
# for j in range(i+1, C):
# slides[i][j] = 1
#
# remainder = M - (2 ** (C-1))
#
#
# for i in range(C-1, -1, -1):
# if 2 ** i <= remainder:
# slides[i][C] = 1
# left = M - 2 ** (full_slides_until-2)
# print(left)
#
# for j in range(full_slides_until-1, -1, -1):
# if j ** 2 < left:
# slides[j][full_slides_until] = 1
# left -= j ** 2
#
# for j in range(min(B-1,full_slides_until+1)):
# slides[j][B-1] = 1
return slides
for t in range(num_cases):
B,M = (int(a) for a in fin.readline().strip().split())
res = solve(B,M)
if res is None:
print("Case #{}: {}".format(t + 1, "IMPOSSIBLE"))
else:
print("Case #{}: {}".format(t + 1, "POSSIBLE"))
for s in res:
print("".join(str(a) for a in s))
|
from __future__ import annotations
from os import getcwd, path
from traceback import extract_tb, print_exception
from flask import Response, jsonify, current_app
from werkzeug.exceptions import HTTPException
from jsonclasses.excs import (ObjectNotFoundException,
ValidationException,
UniqueConstraintException,
UnauthorizedActionException)
from .remove_none import remove_none
def exception_handler(exception: Exception) -> tuple[Response, int]:
code = exception.code if isinstance(exception, HTTPException) else 500
code = 404 if isinstance(exception, ObjectNotFoundException) else code
code = 400 if isinstance(exception, ValidationException) else code
code = 400 if isinstance(exception, UniqueConstraintException) else code
code = 401 if isinstance(exception, UnauthorizedActionException) else code
if current_app.debug:
if code == 500:
print_exception(etype=type[exception], value=exception, tb=exception.__traceback__)
return jsonify({
'error': remove_none({
'type': 'Internal Server Error',
'message': 'There is an internal server error.',
'error_type': exception.__class__.__name__,
'error_message': str(exception),
'fields': (exception.keypath_messages
if (isinstance(exception, ValidationException) or isinstance(exception, UniqueConstraintException))
else None),
'traceback': [f'file {path.relpath(f.filename, getcwd())}:{f.lineno} in {f.name}' for f in extract_tb(exception.__traceback__)], # noqa: E501
})
}), code
else:
return jsonify({
'error': remove_none({
'type': exception.__class__.__name__,
'message': str(exception),
'fields': (exception.keypath_messages
if (isinstance(exception, ValidationException) or isinstance(exception, UniqueConstraintException))
else None),
'traceback': [f'file {path.relpath(f.filename, getcwd())}:{f.lineno} in {f.name}' for f in extract_tb(exception.__traceback__)], # noqa: E501
})
}), code
else:
if code == 500:
print_exception(etype=type[exception], value=exception, tb=exception.__traceback__)
return jsonify({
'error': remove_none({
'type': 'Internal Server Error',
'message': 'There is an internal server error.'
})
}), code
else:
return jsonify({
'error': remove_none({
'type': exception.__class__.__name__,
'message': str(exception),
'fields': (exception.keypath_messages
if (isinstance(exception, ValidationException) or isinstance(exception, UniqueConstraintException))
else None)
})
}), code
|
# Copyright 2016-2022. Couchbase, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (TYPE_CHECKING,
Any,
Dict,
Optional)
from twisted.internet.defer import Deferred
from couchbase.logic.analytics import AnalyticsQuery
from couchbase.logic.n1ql import N1QLQuery
from couchbase.logic.search import SearchQueryBuilder
from couchbase.result import (AnalyticsResult,
QueryResult,
SearchResult)
from couchbase.transcoder import Transcoder
from txcouchbase.analytics import AnalyticsRequest
from txcouchbase.collection import Collection
from txcouchbase.n1ql import N1QLRequest
from txcouchbase.search import SearchRequest
if TYPE_CHECKING:
from couchbase.options import (AnalyticsOptions,
QueryOptions,
SearchOptions)
from couchbase.search import SearchQuery
class Scope:
def __init__(self, bucket, scope_name):
self._bucket = bucket
self._set_connection()
self._loop = bucket.loop
self._scope_name = scope_name
@property
def connection(self):
"""
**INTERNAL**
"""
return self._connection
@property
def loop(self):
"""
**INTERNAL**
"""
return self._loop
@property
def default_transcoder(self) -> Optional[Transcoder]:
return self._bucket.default_transcoder
@property
def name(self):
return self._scope_name
@property
def bucket_name(self):
return self._bucket.name
def collection(self, name # type: str
) -> Collection:
return Collection(self, name)
def query(
self,
statement, # type: str
*options, # type: QueryOptions
**kwargs # type: Dict[str, Any]
) -> Deferred[QueryResult]:
opt = QueryOptions()
opts = list(options)
for o in opts:
if isinstance(o, QueryOptions):
opt = o
opts.remove(o)
# set the query context as this bucket and scope if not provided
if not ('query_context' in opt or 'query_context' in kwargs):
kwargs['query_context'] = '`{}`.`{}`'.format(self.bucket_name, self.name)
query = N1QLQuery.create_query_object(
statement, *options, **kwargs)
request = N1QLRequest.generate_n1ql_request(self.connection,
self.loop,
query.params,
default_serializer=self.default_serializer)
d = Deferred()
def _on_ok(_):
d.callback(QueryResult(request))
def _on_err(exc):
d.errback(exc)
query_d = request.execute_query()
query_d.addCallback(_on_ok)
query_d.addErrback(_on_err)
return d
def analytics_query(
self,
statement, # type: str
*options, # type: AnalyticsOptions
**kwargs # type: Dict[str, Any]
) -> Deferred[AnalyticsResult]:
opt = AnalyticsOptions()
opts = list(options)
for o in opts:
if isinstance(o, AnalyticsOptions):
opt = o
opts.remove(o)
# set the query context as this bucket and scope if not provided
if not ('query_context' in opt or 'query_context' in kwargs):
kwargs['query_context'] = 'default:`{}`.`{}`'.format(self.bucket_name, self.name)
query = AnalyticsQuery.execute_analytics_query(
statement, *options, **kwargs)
request = AnalyticsRequest.generate_analytics_request(self.connection,
self.loop,
query.params,
default_serializer=self.default_serializer)
d = Deferred()
def _on_ok(_):
d.callback(AnalyticsResult(request))
def _on_err(exc):
d.errback(exc)
query_d = request.execute_analytics_query()
query_d.addCallback(_on_ok)
query_d.addErrback(_on_err)
return d
def search_query(
self,
index, # type: str
query, # type: SearchQuery
*options, # type: SearchOptions
**kwargs # type: Dict[str, Any]
) -> Deferred[SearchResult]:
opt = SearchOptions()
opts = list(options)
for o in opts:
if isinstance(o, SearchOptions):
opt = o
opts.remove(o)
# set the scope_name as this scope if not provided
if not ('scope_name' in opt or 'scope_name' in kwargs):
kwargs['scope_name'] = f'{self.name}'
query = SearchQueryBuilder.create_search_query_object(
index, query, *options, **kwargs
)
request = SearchRequest.generate_search_request(self.connection,
self.loop,
query.as_encodable(),
default_serializer=self.default_serializer)
d = Deferred()
def _on_ok(_):
d.callback(SearchResult(request))
def _on_err(exc):
d.errback(exc)
query_d = request.execute_search_query()
query_d.addCallback(_on_ok)
query_d.addErrback(_on_err)
return d
def _connect_bucket(self):
"""
**INTERNAL**
"""
return self._bucket.on_connect()
def _set_connection(self):
"""
**INTERNAL**
"""
self._connection = self._bucket.connection
@staticmethod
def default_name():
return "_default"
|
from django.db import models
from django.contrib.auth import get_user_model
from patients.models import Patients
class Doctors(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='doctors_user')
specialised = models.CharField(max_length=15, default='PATIENT')
addr = models.CharField(max_length=155, null=True)
qualification = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return str(self.user)
class Appointments(models.Model):
patients = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='appointments_patients')
doctors = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='appointments_doctors')
date = models.DateField(blank=True, null=True)
time = models.TimeField(null=True, blank=True)
is_accepted = models.BooleanField(default=None, null=True)
message = models.TextField(null=True)
def __str__(self):
return str(self.doctors)
|
from ROAR.planning_module.local_planner.smooth_waypoint_following_local_planner import SmoothWaypointFollowingLocalPlanner
from functools import reduce
from ROAR.utilities_module.utilities import two_points_to_yaw_pitch
from ROAR.perception_module.lane_detector import LaneDetector
from ROAR.planning_module.local_planner.local_planner import LocalPlanner
from ROAR.utilities_module.data_structures_models import Location, Rotation, Transform
from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl
from ROAR.control_module.controller import Controller
from ROAR.planning_module.mission_planner.mission_planner import MissionPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
import logging
from typing import Union
from ROAR.utilities_module.errors import (
AgentException,
)
from ROAR.agent_module.agent import Agent
import json
from pathlib import Path
class LaneFollowingLocalPlanner(SmoothWaypointFollowingLocalPlanner):
def run_in_series(self) -> VehicleControl:
"""
Run step for the local planner
Procedure:
1. Sync data
2. get the correct look ahead for current speed
3. get the correct next waypoint
4. feed waypoint into controller
5. return result from controller
Returns:
next control that the local think the agent should execute.
"""
if (
len(self.mission_planner.mission_plan) == 0
and len(self.way_points_queue) == 0
):
return VehicleControl()
# get vehicle's location
vehicle_transform: Union[Transform, None] = self.agent.vehicle.transform
if vehicle_transform is None:
raise AgentException("I do not know where I am, I cannot proceed forward")
# redefine closeness level based on speed
self.set_closeness_threhold(self.closeness_threshold_config)
# get current lane center
lane_detector: LaneDetector = self.agent.lane_detector
lane_center = lane_detector.lane_center
if lane_center is not None:
next_location = Location.from_array(lane_center[0]*0.9+lane_center[1]*0.1)
next_yaw, next_pitch = two_points_to_yaw_pitch(lane_center[0], lane_center[1])
next_rotation = Rotation(yaw=next_yaw, pitch=next_pitch, roll=0)
target_waypoint = self.target_waypoint = Transform(location=next_location, rotation=next_rotation)
else:
target_waypoint = self.target_waypoint
curr_closest_dist = float("inf")
while True:
if len(self.way_points_queue) == 0:
self.logger.info("Destination reached")
return VehicleControl()
# waypoint: Transform = self.way_points_queue[0]
waypoint: Transform = self.next_waypoint_smooth()
curr_dist = vehicle_transform.location.distance(waypoint.location)
if curr_dist < curr_closest_dist:
# if i find a waypoint that is closer to me than before
# note that i will always enter here to start the calculation for curr_closest_dist
curr_closest_dist = curr_dist
elif curr_dist < self.closeness_threshold:
# i have moved onto a waypoint, remove that waypoint from the queue
self.way_points_queue.popleft()
else:
break
target_waypoint = target_waypoint*0.4+self.next_waypoint_smooth()*0.6
control: VehicleControl = self.controller.run_in_series(next_waypoint=target_waypoint)
self.logger.debug(f"\n"
f"Curr Transform: {self.agent.vehicle.transform}\n"
f"Target Location: {target_waypoint.location}\n"
f"Control: {control} | Speed: {Vehicle.get_speed(self.agent.vehicle)}\n")
return control
|
import flask
from simplejson import dumps
from .. import auth
@auth.bp.route("/register",methods=("GET","POST"))
def register():
# front-end
if flask.request.method=="GET":
if auth.check_client_session():
return flask.redirect("/")
return flask.render_template(
"auth.html",
title="Register",
action_name="register",
ctrl_script_src="register.js"
)
# back-end
elif flask.request.method=="POST":
form=flask.request.get_json()
try:
username=str(form["username"])
salt=str(form["salt"])
password_hash=str(form["password_hash"])
email=str(form["email"]).lower()
except (KeyError,TypeError):
return "{}",400,{"Content-Type":"application/json"}
if not(
auth.check_username(username) and
auth.check_salt(salt) and
auth.check_response(password_hash) and
auth.check_email(email)
):
return "{}",400,{"Content-Type":"application/json"}
conn=auth.connectDB()
cur=conn.cursor()
err=False
err_msg=[]
cur.execute("select * from users where email=%s and status!=%s limit 1;",(email,"unverified"))
if cur.fetchone() is not None:
err=True
err_msg.append("The email address you entered has already been used.")
cur.execute("select * from users where username=%s and status!=%s limit 1;",(username,"unverified"))
if cur.fetchone() is not None:
err=True
err_msg.append("The username you entered has already been used.")
if err:
conn.close()
return dumps({"err_msg":err_msg}),403,{"Content-Type":"application/json"}
session=auth.generate_salt()
cur.execute("delete from users where username=%s or email=%s;",(username,email))
cur.execute("insert into users(username,salt,password_hash,email,session,challenge) values(%s,%s,%s,%s,%s,%s);",(username,salt,password_hash,email,session,auth.generate_salt()))
try:
verify_url="https://%s/auth/verify.html"%(auth.DOMAIN)
auth.send_email(auth.NOREPLY,email,"Verify your registration at %s"%auth.PROJECTNAME,"<p>Hello, dear %s:</p><p>Your verification code is:</p><p><code>%s</code></p>Please click <a href=\"%s\">here</a> or paste the following url to your web browser to verify your registration:</p><p>%s</p><br/><p>Best regards,</p><p>%s</p>"%(username,session+salt,verify_url,verify_url,auth.PROJECTNAME))
except auth.smtplib.SMTPRecipientsRefused:
conn.close()
return dumps({"err_msg":["The email address you entered is invalid."]}),403,{"Content-Type":"application/json"}
conn.commit()
conn.close()
return "{}",{"Content-Type":"application/json"} |
import pandas as pd
import anndata as ad
from scipy.sparse import csr_matrix
from anndata import AnnData
def read_snv_genotyping(filename: str) -> AnnData:
""" Read SNV genotyping into an AnnData
Parameters
----------
filename : str
SNV genotyping filename
Returns
-------
AnnData
SNV matrix with alt_counts in X and ref_counts in layers['ref_counts']
"""
data = pd.read_csv(filename, dtype={
'chrom': 'category',
'ref': 'category',
'alt': 'category',
'cell_id': 'category'})
data['snv_idx'] = data.groupby(['chrom', 'pos', 'ref', 'alt'], observed=True).ngroup()
alt_counts_matrix = csr_matrix(
(data['alt_counts'], (data['cell_id'].cat.codes, data['snv_idx'].values)),
shape=(data['cell_id'].cat.categories.size, data['snv_idx'].max() + 1))
ref_counts_matrix = csr_matrix(
(data['ref_counts'], (data['cell_id'].cat.codes, data['snv_idx'].values)),
shape=(data['cell_id'].cat.categories.size, data['snv_idx'].max() + 1))
obs = data[['cell_id']].drop_duplicates()
obs['cell_idx'] = obs['cell_id'].cat.codes
obs = obs.sort_values('cell_idx').set_index('cell_id').drop('cell_idx', axis=1)
var = data[['snv_idx', 'chrom', 'pos', 'ref', 'alt']].drop_duplicates()
var['snv_id'] = (
var['chrom'].astype(str) + '_' +
var['pos'].astype(str) + '_' +
var['ref'].astype(str) + '_' +
var['alt'].astype(str))
var = var.sort_values('snv_idx').set_index('snv_id')
assert (var['snv_idx'].values == range(len(var.index))).all()
adata = ad.AnnData(
alt_counts_matrix,
obs,
var,
layers={
'ref_counts': ref_counts_matrix,
}
)
return adata
|
import streamlit as st
from multiapp import MultiApp
import home
from classification import ClassificationMain
from clustering import ClusteringMain
# import your app modules here
app = MultiApp()
st.set_page_config(layout="wide")
# Add all your application here
app.add_app("Home", home.app)
app.add_app("Predict Loan Risk", ClassificationMain.main)
app.add_app("Bank Marketing Analysis", ClusteringMain.main)
#app.add_app("Model", model.app)
# The main app
app.run()
|
import sqlite3
conn = sqlite3.connect('database.db')
print("Database opened successfully")
conn.execute('CREATE TABLE allItems (name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT)')
print ("Table created Successfully")
conn.execute('CREATE TABLE food (fileid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT )')
conn.execute('CREATE TABLE hygiene (fileid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT)')
conn.execute('CREATE TABLE jobs (fileid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT)')
conn.execute('CREATE TABLE clothes (fileid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT)')
conn.execute('CREATE TABLE shelter (fileid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT)')
conn.execute('CREATE TABLE transportation (fileid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT, addr TEXT, city TEXT, descrp TEXT, Type TEXT, aid TEXT)')
conn.execute('CREATE TABLE userId (wNum TEXT PRIMARY KEY,name TEXT,username TEXT, password TEXT,clubA INTEGER, clubB INTEGER)')
conn.close()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 09:15:49 2017
@author: Administrator
"""
from pyquery import PyQuery as pq
import pandas as pd
main_doc=pq('http://data.eastmoney.com/cjsj/yzgptjnew.html')
table_doc=main_doc('tr[class=""]')
text_list=[]
for each in table_doc:
text=pq(each).text().split()
text_list.append(text)
df_new_investor=pd.DataFrame(text_list,columns=['date','new_thwk','new_peak','final_thwk','final_peak','hold_thwk','hold_peak','trade_thwk','trade_peak','per_thwk','per_peak'])
df_new_investor.to_csv(u'E:\\work\\宏观数据\\新增投资者.csv',index=None)
|
from functions import *
def test_func():
assert func(3) == 4
def test_multiplication():
assert multiplication(4, 8) == 32
def test_price_calculation():
assert price_calculation(19) == 100
assert price_calculation(20) == 120
assert price_calculation(40) == 150
assert price_calculation(65) == 200
assert price_calculation(68) == 200
assert price_calculation(-1) == None
def test_if_exists():
assert if_exists("Kacper") == True
assert if_exists(0) == False
assert if_exists(125) == True
assert if_exists(False) == False
|
import os
import astropy.units as u
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 500)
from colorama import Fore
from tqdm import tqdm
from cta_plots import load_signal_events, load_background_events
from cta_plots.binning import make_default_cta_binning
from cta_plots.sensitivity.plotting import plot_crab_flux, plot_reference, plot_requirement, plot_sensitivity
from cta_plots.sensitivity import calculate_n_off, calculate_n_signal
from cta_plots.sensitivity.optimize import find_best_cuts
from cta_plots.coordinate_utils import calculate_distance_to_true_source_position
from cta_plots.spectrum import CrabSpectrum
from cta_plots.sensitivity import find_relative_sensitivity_poisson, check_validity, check_validity_counts
from scipy.ndimage import gaussian_filter1d
crab = CrabSpectrum()
def optimize_event_selection_fixed_theta(gammas, background, bin_edges, alpha=0.2, n_jobs=4):
results = []
groups = pd.cut(gammas.gamma_energy_prediction_mean, bins=bin_edges)
g = gammas.groupby(groups)
groups = pd.cut(background.gamma_energy_prediction_mean, bins=bin_edges)
b = background.groupby(groups)
for (_, signal_in_range), (_, background_in_range) in tqdm(zip(g, b), total=len(bin_edges) - 1):
distance = calculate_distance_to_true_source_position(signal_in_range)
theta_cuts = np.array([np.nanpercentile(distance, 50)])
best_sensitivity, best_prediction_cut, best_theta_cut, best_significance, best_mult = find_best_cuts(
theta_cuts, PREDICTION_CUTS, MULTIPLICITIES, signal_in_range, background_in_range, alpha=alpha, n_jobs=n_jobs
)
d = {
'prediction_cut': best_prediction_cut,
'significance': best_significance,
'theta_cut': best_theta_cut,
'multiplicity': best_mult,
}
results.append(d)
results_df = pd.DataFrame(results)
results_df['e_min'] = bin_edges[:-1]
results_df['e_max'] = bin_edges[1:]
return results_df
def optimize_event_selection(gammas, background, bin_edges, alpha=0.2, n_jobs=4):
results = []
# theta_cuts = np.arange(0.01, 0.18, 0.01)
# prediction_cuts = np.arange(0.0, 1.05, 0.05)
# multiplicities = np.arange(2, 10)
groups = pd.cut(gammas.gamma_energy_prediction_mean, bins=bin_edges)
g = gammas.groupby(groups)
groups = pd.cut(background.gamma_energy_prediction_mean, bins=bin_edges)
b = background.groupby(groups)
for (_, signal_in_range), (_, background_in_range) in tqdm(zip(g, b), total=len(bin_edges) - 1):
best_sensitivity, best_prediction_cut, best_theta_cut, best_significance, best_mult = find_best_cuts(
THETA_CUTS, PREDICTION_CUTS, MULTIPLICITIES, signal_in_range, background_in_range, alpha=alpha, n_jobs=n_jobs
)
d = {
'prediction_cut': best_prediction_cut,
'significance': best_significance,
'theta_cut': best_theta_cut,
'multiplicity': best_mult,
}
results.append(d)
results_df = pd.DataFrame(results)
results_df['e_min'] = bin_edges[:-1]
results_df['e_max'] = bin_edges[1:]
return results_df
def calc_relative_sensitivity(gammas, background, cuts, alpha, sigma=0):
bin_edges = list(cuts['e_min']) + [cuts['e_max'].iloc[-1]]
results = []
if sigma > 0:
cuts.prediction_cut = gaussian_filter1d(cuts.prediction_cut, sigma=sigma)
cuts.theta_cut = gaussian_filter1d(cuts.theta_cut, sigma=sigma)
cuts.multiplicity = gaussian_filter1d(cuts.multiplicity, sigma=sigma)
groups = pd.cut(gammas.gamma_energy_prediction_mean, bins=bin_edges)
g = gammas.groupby(groups)
groups = pd.cut(background.gamma_energy_prediction_mean, bins=bin_edges)
b = background.groupby(groups)
for (_, signal_in_range), (_, background_in_range), (_, r) in tqdm(zip(g, b, cuts.iterrows()), total=len(bin_edges) - 1):
best_mult = r.multiplicity
best_prediction_cut = r.prediction_cut
best_theta_cut = r.theta_cut
best_significance = r.significance
gammas_gammalike = signal_in_range[
(signal_in_range.gamma_prediction_mean >= best_prediction_cut)
&
(signal_in_range.num_triggered_telescopes >= best_mult)
]
background_gammalike = background_in_range[
(background_in_range.gamma_prediction_mean >= best_prediction_cut)
&
(background_in_range.num_triggered_telescopes >= best_mult)
]
n_signal, n_signal_counts = calculate_n_signal(
gammas_gammalike, best_theta_cut,
)
n_off, n_off_counts, total_bkg_counts = calculate_n_off(
background_gammalike, best_theta_cut, alpha=alpha
)
# print('----------------')
# valid = check_validity(n_signal_counts, n_off_counts, total_bkg_counts, alpha=alpha, silent=True)
# print('----------------')
valid = check_validity(n_signal, n_off, alpha=alpha, silent=False)
valid &= check_validity_counts(n_signal_counts, n_off_counts, total_bkg_counts, alpha=alpha, silent=False)
# print('----------------')
rs = find_relative_sensitivity_poisson(n_signal, n_off, n_signal_counts, n_off_counts, alpha=alpha)
m, l, h = rs
d = {
'sensitivity': m,
'sensitivity_low': l,
'sensitivity_high': h,
'prediction_cut': best_prediction_cut,
'significance': best_significance,
'signal_counts': n_signal_counts,
'background_counts': n_off_counts,
'weighted_signal_counts': n_signal,
'weighted_background_counts': n_off,
'theta_cut': best_theta_cut,
'multiplicity': best_mult,
'total_bkg_counts': total_bkg_counts,
'valid': valid,
}
results.append(d)
results_df = pd.DataFrame(results)
results_df['e_min'] = bin_edges[:-1]
results_df['e_max'] = bin_edges[1:]
return results_df
THETA_CUTS = np.arange(0.01, 0.18, 0.01)
PREDICTION_CUTS = np.arange(0.3, 1.05, 0.05)
MULTIPLICITIES = np.arange(2, 11)
@click.command()
@click.argument('gammas_path', type=click.Path(exists=True))
@click.argument('protons_path', type=click.Path(exists=True))
@click.argument('electrons_path', type=click.Path(exists=True))
@click.option('-o', '--output', type=click.Path(exists=False))
@click.option('-m', '--multiplicity', default=2)
@click.option('-t', '--t_obs', default=50)
@click.option('-c', '--color', default='xkcd:purple')
@click.option('--n_jobs', default=4)
@click.option('--landscape/--no-landscape', default=False)
@click.option('--reference/--no-reference', default=False)
@click.option('--fix_theta/--no-fix_theta', default=False)
@click.option('--correct_bias/--no-correct_bias', default=True)
@click.option('--requirement/--no-requirement', default=False)
@click.option('--flux/--no-flux', default=True)
def main(
gammas_path,
protons_path,
electrons_path,
output,
multiplicity,
t_obs,
color,
n_jobs,
landscape,
reference,
fix_theta,
correct_bias,
requirement,
flux,
):
t_obs *= u.h
gammas, source_alt, source_az = load_signal_events(gammas_path, assumed_obs_time=t_obs)
background = load_background_events(
protons_path, electrons_path, source_alt, source_az, assumed_obs_time=t_obs
)
e_min, e_max = 0.02 * u.TeV, 200 * u.TeV
bin_edges, bin_center, _ = make_default_cta_binning(e_min=e_min, e_max=e_max)
SIGMA = 0
if correct_bias:
from scipy.stats import binned_statistic
from cta_plots import create_interpolated_function
e_reco = gammas.gamma_energy_prediction_mean
e_true = gammas.mc_energy
resolution = (e_reco - e_true) / e_true
median, _, _ = binned_statistic(e_reco, resolution, statistic=np.nanmedian, bins=bin_edges)
energy_bias = create_interpolated_function(bin_center, median, sigma=SIGMA)
e_corrected = e_reco / (energy_bias(e_reco) + 1)
gammas.gamma_energy_prediction_mean = e_corrected
e_reco = background.gamma_energy_prediction_mean
e_corrected = e_reco / (energy_bias(e_reco) + 1)
background.gamma_energy_prediction_mean = e_corrected
else:
print(Fore.YELLOW + 'Not correcting for energy bias' + Fore.RESET)
if fix_theta:
print('Not optimizing theta!')
df_cuts = optimize_event_selection_fixed_theta(gammas, background, bin_edges, alpha=0.2, n_jobs=n_jobs)
else:
df_cuts = optimize_event_selection(gammas, background, bin_edges, alpha=0.2, n_jobs=n_jobs)
df_sensitivity = calc_relative_sensitivity(gammas, background, df_cuts, alpha=0.2, sigma=SIGMA)
print(df_sensitivity)
if landscape:
size = plt.gcf().get_size_inches()
plt.figure(figsize=(8.24, size[0] * 0.9))
ax = plot_sensitivity(df_sensitivity, bin_edges, bin_center, color=color, lw=2)
if reference:
plot_reference(ax)
if requirement:
plot_requirement(ax)
if flux:
plot_crab_flux(bin_edges, ax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([1e-2, 10 ** (2.5)])
ax.set_ylim([4e-14, 3E-10])
ylabel = '$\\text{E}^2 \\frac{\\text{dN}}{\\text{dE}} / \\text{erg}\;\\text{cm}^{-2}\\text{s}^{-1}$'
ax.set_ylabel(ylabel)
ax.set_xlabel(r'Estimated Energy / TeV')
# fix legend handles. The handle for the reference is different form a line2d handle. this makes it consistent.
from matplotlib.lines import Line2D
handles = ax.get_legend_handles_labels()[0]
labels = ax.get_legend_handles_labels()[1]
handles[2] = Line2D([0], [0], color=handles[2].lines[0].get_color())
legend = ax.legend(handles, labels, framealpha=0, borderaxespad=0.025)
# add meta information to legend title
legend.set_title('CTA Prod3B (Paranal HB9)')
legend._legend_box.align = "left"
legend.get_title().set_alpha(0.5)
# legend.get_title().set_linespacing(1.1)
plt.tight_layout(pad=0, rect=(0, 0, 1, 1))
if output:
n, _ = os.path.splitext(output)
print(f"writing csv to {n + '.csv'}")
df_sensitivity.to_csv(n + '.csv', index=False, na_rep='NaN')
plt.savefig(output)
with open(f'{n}_theta_cuts.txt', 'w') as f:
f.write(cuts_to_latex(THETA_CUTS))
with open(f'{n}_prediction_cuts.txt', 'w') as f:
f.write(cuts_to_latex(PREDICTION_CUTS))
with open(f'{n}_multiplicities.txt', 'w') as f:
f.write(cuts_to_latex(MULTIPLICITIES, integer=True))
else:
plt.show()
def cuts_to_latex(array, integer=False):
if integer:
return f'\{{ {array[0]}, {array[1]}, \\ldots, {array[-1]} \}} '
else:
return f'\{{ {array[0]:.2f}, {array[1]:.2f}, \\ldots, {array[-1]:.2f} \}} '
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
main()
|
# Copyright 2020 The Weakly-Supervised Control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generates a dataset of observations from a multiworld environment.
python -m scripts.generate_corrupted_dataset --input gs://weakly-supervised-control/datasets/SawyerPushRandomLightsEnv-v1-n256-hand_x-hand_y-obj_x-obj_y-light.npz --noise 0.1
python -m scripts.generate_corrupted_dataset --input gs://weakly-supervised-control/datasets/SawyerPushRandomLightsEnv-v1-n256-hand_x-hand_y-obj_x-obj_y-light.npz --noise 0.05
python -m scripts.generate_corrupted_dataset --input gs://weakly-supervised-control/datasets/SawyerPush2PucksRandomLightsEnv-v1-n512-hand_x-hand_y-obj1_x-obj1_y-obj2_x-obj2_y-light.npz --noise 0.05
python -m scripts.generate_corrupted_dataset --input gs://weakly-supervised-control/datasets/SawyerPush3PucksRandomLightsEnv-v1-n512-hand_x-hand_y-obj1_x-obj1_y-obj2_x-obj2_y-light.npz --noise 0.05
"""
import os
from typing import Any, Callable, Dict, List, Optional
import click
import gym
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.io import gfile
from weakly_supervised_control.envs import register_all_envs
from weakly_supervised_control.experiment_utils import load_dset
# from weakly_supervised_control.disentanglement.np_data import NpGroundTruthData
@click.command()
@click.option('--input', type=str, help="Input dataset path")
@click.option('--noise', type=float, default=0.05, help="Probability of corrupting a factor label")
@click.option('--num-output', type=int, default=5, help="Number of corrupted datasets to create")
def main(input: str, noise: float, num_output: int):
dset = load_dset(input)
for n in range(num_output):
num_corrupted_labels = 0
for i in range(dset.size):
# Corrupt factor labels with 0.05 probability.
one_hot = np.random.choice([False, True], size=5, p=[1 - noise, noise])
for j, x in enumerate(one_hot):
if x:
fake_factor_value = np.random.uniform(0, 1)
dset.factors[i, j] = fake_factor_value
num_corrupted_labels += 1
print(f'Corrupted {num_corrupted_labels}/{dset.size} labels.')
# Save to file.
temp_file = f'/tmp/generate_corrupted_dataset_output-{n}.npz'
dset.save(temp_file)
print(f'Saved to: {temp_file}')
output_prefix = input.split('.npz')[0] + f"-noise{noise}-seed{n}"
gfile.copy(temp_file, f'{output_prefix}.npz')
gfile.remove(temp_file)
print(f'Saved to: {output_prefix}.npz')
if __name__ == '__main__':
main()
|
max = 1000000
n = [1] * max
c = 2
t = 0
array=[]
while c<max:
array.append(c)
i = c
while i < max:
n[i] = 0
i += c
while c<max and n[c]==0:
c += 1
#print array
if 4 in array:
print 'hi'
else:
print 'no'
"""a=[1,2,3,4]
del a[2]
print a"""
|
"""
The ledger_data method retrieves contents of
the specified ledger. You can iterate through
several calls to retrieve the entire contents
of a single ledger version.
`See ledger data <https://xrpl.org/ledger_data.html>`_
"""
from dataclasses import dataclass, field
from typing import Any, Optional, Union
from xrpl.models.requests.request import Request, RequestMethod
from xrpl.models.utils import require_kwargs_on_init
@require_kwargs_on_init
@dataclass(frozen=True)
class LedgerData(Request):
"""
The ledger_data method retrieves contents of
the specified ledger. You can iterate through
several calls to retrieve the entire contents
of a single ledger version.
`See ledger data <https://xrpl.org/ledger_data.html>`_
"""
method: RequestMethod = field(default=RequestMethod.LEDGER_DATA, init=False)
ledger_hash: Optional[str] = None
ledger_index: Optional[Union[str, int]] = None
binary: bool = False
limit: Optional[int] = None
# marker data shape is actually undefined in the spec, up to the
# implementation of an individual server
marker: Optional[Any] = None
|
import numpy as np
from .module import Module
class MaxPool2d(Module):
def __init__(self, kernel_size, stride=(1, 1), padding=(0, 0)):
super(MaxPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def forward(self, inputs):
inputs = self.pad(inputs)
self.inputs_shape = inputs.shape
self.batch_size, C, H_in, W_in = inputs.shape
H_out = (inputs.shape[2] - self.kernel_size[0]) // self.stride[0] + 1
W_out = (inputs.shape[3] - self.kernel_size[1]) // self.stride[1] + 1
max_pool = np.empty((self.batch_size, C, H_out, W_out))
self.max_flag = np.zeros_like(max_pool, dtype=np.int)
for ih in range(H_out):
begin_h = ih * self.stride[0]
for iw in range(W_out):
begin_w = iw * self.stride[1]
temp = inputs[:, :, begin_h:(begin_h+self.kernel_size[0]), begin_w:(begin_w+self.kernel_size[1])].reshape(self.batch_size, C, -1)
max_index = np.argmax(temp, axis=2)
self.max_flag[:, :, ih, iw] = max_index
max_val = np.take_along_axis(temp, max_index[:, :, np.newaxis], axis=2)
max_pool[:, :, ih, iw] = max_val.squeeze()
return max_pool
def backward(self, grad=None):
grad_backward = np.zeros(self.inputs_shape)
for ih in range(grad.shape[2]):
begin_h = ih * self.stride[0]
for iw in range(grad.shape[3]):
begin_w = iw * self.stride[1]
max_index = self.max_flag[:, :, ih, iw]
temp = np.zeros((self.batch_size*grad.shape[1], self.kernel_size[0]*self.kernel_size[1]))
temp[np.arange(len(temp)), max_index.reshape(-1)] = 1
temp = temp.reshape(self.batch_size, grad.shape[1], self.kernel_size[0], self.kernel_size[1])
temp *= grad[:, :, ih, iw][:, :, np.newaxis, np.newaxis]
grad_backward[:, :, begin_h:(begin_h+self.kernel_size[0]), begin_w:(begin_w+self.kernel_size[1])] += temp
return grad_backward[:, :, self.padding[0]:self.inputs_shape[2]-self.padding[0], self.padding[1]:self.inputs_shape[3]-self.padding[1]]
def pad(self, inputs):
padding_width = ((0, 0),
(0, 0),
(self.padding[0], self.padding[0]),
(self.padding[1], self.padding[1]))
return np.pad(inputs, padding_width, mode='constant')
class AvgPool2d(Module):
def __init__(self, kernel_size, stride=(1, 1), padding=(0, 0)):
super(AvgPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.num = np.prod(kernel_size)
def forward(self, inputs):
inputs = self.pad(inputs) / self.num
self.inputs_shape = inputs.shape
H_out = (inputs.shape[2] - self.kernel_size[0]) // self.stride[0] + 1
W_out = (inputs.shape[3] - self.kernel_size[1]) // self.stride[1] + 1
avg_pool = np.empty((inputs.shape[0], inputs.shape[1], H_out, W_out))
for ih in range(H_out):
begin_h = ih * self.stride[0]
for iw in range(W_out):
begin_w = iw * self.stride[1]
avg_pool[:, :, ih, iw] = np.sum(inputs[:, :, begin_h:begin_h+self.kernel_size[0], begin_w:begin_w+self.kernel_size[1]], axis=(2, 3))
return avg_pool
def backward(self, grad=None):
grad_backward = np.zeros(self.inputs_shape)
for ih in range(grad.shape[2]):
begin_h = ih * self.stride[0]
for iw in range(grad.shape[3]):
begin_w = iw * self.stride[1]
grad_backward[:, :, begin_h:begin_h+self.kernel_size[0], begin_w:begin_w+self.kernel_size[1]] += grad[:, :, ih, iw][:, :, np.newaxis, np.newaxis]
grad_backward /= self.num
return grad_backward[:, :, self.padding[0]:self.inputs_shape[2]-self.padding[0], self.padding[1]:self.inputs_shape[3]]
def pad(self, inputs):
if tuple(self.padding) == (0, 0):
return inputs
padding_width = ((0, 0),
(0, 0),
(self.padding[0], self.padding[0]),
(self.padding[1], self.padding[1]))
return np.pad(inputs, padding_width, mode='constant')
if __name__ == '__main__':
import torch
from torch import nn
import time
x = np.random.randint(0, 10, size=(2, 3, 4, 4))
model = nn.AvgPool2d(kernel_size=(2, 2), stride=(1, 1))
y = model(torch.as_tensor(x, dtype=torch.float32)).detach().numpy()
model_ = AvgPool2d(kernel_size=(2, 2), stride=(1, 1))
y_ = model_.forward(x)
z_ = model_.backward(y_)
print(y_[0, 0])
print(z_[0, 0])
|
from collections import defaultdict, Counter
def load_foods(input_filename):
foods = []
with open(input_filename) as f:
for line in f:
line = line.rstrip("\n")
ingredients_and_allergens = line.split()
ingredients = set()
allergens = set()
parse_allergens = False
for item in ingredients_and_allergens:
if item == "(contains":
parse_allergens = True
continue
if not parse_allergens:
ingredients.add(item)
else:
item = item[:-1] # always either a comma or closing paren
allergens.add(item)
foods.append((ingredients, allergens))
return foods
def get_known_ingredient_allergens(ingredient_allergen):
known_ingredients = defaultdict(list)
for allergen, ingredients in ingredient_allergen.items():
if len(ingredients) == 1:
known_ingredients[allergen] = list(ingredients)[0]
return known_ingredients
def gather_all_ingredients(foods):
ingredients_counter = Counter()
for ingredients, _ in foods:
for ingredient in ingredients:
ingredients_counter[ingredient] += 1
return ingredients_counter
def get_non_allergen_ingredient_count(foods, known_ingredients):
total = 0
ingredients_counter = gather_all_ingredients(foods)
for ingredient, count in ingredients_counter.items():
if ingredient not in known_ingredients:
total += count
return total
if __name__ == "__main__":
foods = load_foods("input_day21.txt")
ingredient_allergen = {} # maps from allergen -> possible ingredients
for food in foods:
ingredients, allergens = food
for allergen in allergens:
if allergen in ingredient_allergen:
ingredient_allergen[allergen] = ingredient_allergen[allergen].intersection(ingredients)
else:
ingredient_allergen[allergen] = ingredients
# now iterate on removing known ingredients
known_ingredients = get_known_ingredient_allergens(ingredient_allergen)
while len(known_ingredients.values()) < len(ingredient_allergen):
for allergen, ingredients in ingredient_allergen.items():
if len(ingredients) > 1:
ingredient_allergen[allergen] = ingredients.difference(known_ingredients.values())
known_ingredients = get_known_ingredient_allergens(ingredient_allergen)
sorted_allergens = sorted(known_ingredients.keys())
canonical_ingredient_list = ",".join([known_ingredients[allergen] for allergen in sorted_allergens])
print(canonical_ingredient_list)
# print(get_non_allergen_ingredient_count(foods, known_ingredients.values()))
|
from django.db import models
class State(models.Model):
name = models.TextField(max_length=255)
abbr = models.TextField(max_length=255)
class Meta:
db_table = 'states'
def __unicode__(self):
return self.name
|
#Henry Murillo
#11/30/2020
from Project2_Flask import app
if __name__ == '__main__':
app.run(debug=True) |
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
class nnUNetTrainerCosAnneal(nnUNetTrainer):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
lr_scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs)
return optimizer, lr_scheduler
|
'''
author : teja
date : 10/8/2018
'''
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand
"""
sum_v = 0
for i_1 in hand:
sum_v = sum_v + hand[i_1]
return sum_v
def main():
'''
main
'''
n_1 = input()
adict = {}
for i_1 in range(int(n_1)):
data = input()
l_1 = data.split()
i_1 += 1
adict[l_1[0]] = int(l_1[1])
print(calculate_handlen(adict))
if __name__ == "__main__":
main()
|
"""
基于Keras的LSTM多变量时间序列预测https://blog.csdn.net/qq_28031525/article/details/79046718
数据文件https://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv
目标:利用前N_in个时刻的pollution信息,预测下N_out个时刻的pollution
笔记:
1.将时序数据转化为有监督数据的方法,这里采用随机分离的方式获取train和test
2.回归问题,loss='mae', optimizer='adam',评估采用RMSE
3.采用tf.keras.models以及fit来训练
4.sklearn.preprocessing.MinMaxScaler的归一化与反归一化
5.LabelEncoder对字符型变量的整数编码
6.DataFrame存储数据的方式,以及命名columns、删除列、提取元素
7.改变epochs和batch_size提升训练效率
8.使用tf.keras.models.save和tf.keras.models.load_model来存贮和读取HDF5格式的网络模型
9.plot(figsize=(5,5)) # 500*500的图,同样适用svg
"""
# region import
import time
start = time.perf_counter()
from datetime import datetime
from math import sqrt
from numpy import concatenate
import matplotlib.pyplot as plt
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
import tensorflow as tf
import os
import numpy as np
# endregion
file_dir = "./Output"
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
file_dir = "./Input"
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
N_in = 24
N_out = 3
# region 1.读入数据并获取train和test
# 将时序数据集转化为有监督学习问题
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
for i in range(n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
agg = concat(cols, axis=1)
agg.columns = names
if dropnan:
agg.dropna(inplace=True)
return agg
def get_train_and_test_data():
# 读取最原始的数据并处理时间列
parse = lambda x: datetime.strptime(x, '%Y %m %d %H')
dataset = read_csv('./Input/PRSA_data_2010.1.1-2014.12.31.csv', parse_dates=[['year', 'month', 'day', 'hour']],
index_col=0, date_parser=parse)
dataset.drop('No', axis=1, inplace=True)
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
dataset.index.name = 'date'
dataset.drop(['dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain'], axis=1, inplace=True) # 只保留一列数据
dataset['pollution'].fillna(0, inplace=True)
dataset = dataset[24:]
dataset.to_csv('./Input/pollution.csv')
# 归一化
values = dataset.values
values = values.astype('float32')
scaled = scaler.fit_transform(values)
# 绘制信息
# groups = [0] # 选择要绘制的列的索引
# i = 1
# plt.figure(figsize=(10, 10))
# for group in groups:
# plt.subplot(len(groups), 1, i)
# plt.plot(values[:, group])
# plt.title(dataset.columns[group])
# i += 1
# 时序转为有监督
reframed = series_to_supervised(scaled, N_in, N_out)
# 【方式1】前n_train_hours个数据为train,剩余的是test
values = reframed.values
n_train_hours = 365 * 24 * 3
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
train_X, train_y = train[:, :-N_out], train[:, -N_out:]
test_X, test_y = test[:, :-N_out], test[:, -N_out:]
train_X = train_X.reshape(train_X.shape[0], 1, train_X.shape[1])
test_X = test_X.reshape(test_X.shape[0], 1, test_X.shape[1])
# 【方式2】随机采集n_train_hours个数据为train,剩余的是test
values = reframed.values
n_train_hours = 365 * 24 * 3
ind_train = np.random.randint(0, len(values), n_train_hours)
ind_test = np.delete(np.arange(len(values)), ind_train)
train = values[ind_train]
test = values[ind_test]
train_X, train_y = train[:, :-N_out], train[:, -N_out:]
test_X, test_y = test[:, :-N_out], test[:, -N_out:]
train_X = train_X.reshape(train_X.shape[0], 1, train_X.shape[1])
test_X = test_X.reshape(test_X.shape[0], 1, test_X.shape[1])
return train_X, train_y, test_X, test_y
# endregion
scaler = MinMaxScaler(feature_range=(0, 1)) # 归一化与反归一化的函数
train_X, train_y, test_X, test_y = get_train_and_test_data()
# region 2.设计模型并使用train和test进行fit
def disign_fit():
# design network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
# model.add(tf.keras.layers.Dense(10)) # 可以加一层
model.add(tf.keras.layers.Dense(N_out))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X, train_y, epochs=300, batch_size=10000, validation_data=(test_X, test_y), verbose=2,
shuffle=False)
plt.figure(figsize=(7, 7))
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.savefig('./Output/history.svg')
model.save('./Output/my_model.h5')
# endregion
disign_fit()
# region 3.评估模型
def eval(train_X, train_y, test_X, test_y):
model = tf.keras.models.load_model('./Output/my_model.h5') # 从文件读取模型
# 预测值yhat与inv_yhat
yhat = model.predict(test_X)
inv_yhat = scaler.inverse_transform(yhat)
# 实际值test_y与inv_y
test_y = test_y.reshape(test_y.shape[0], N_out)
inv_y = scaler.inverse_transform(test_y)
# 方均根误差rmse
rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
print('test RMSE: %.3f' % rmse)
# 随便取100组数据绘图对比预测值与实际值
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
ind = np.random.randint(0, len(test_y), 100) # 随便取100组数据绘图
plt.plot(inv_yhat[ind].flatten(), label='yhat')
plt.plot(inv_y[ind].flatten(), label='y')
plt.legend()
plt.subplot(1, 2, 2)
ind = np.random.randint(0, len(test_y), 200)
plt.hist(inv_yhat[ind].flatten(), bins=40, label='yhat')
plt.hist(inv_y[ind].flatten(), bins=40, label='y')
plt.legend()
plt.savefig('./Output/y_vs_yhat.svg')
# endregion
eval(train_X, train_y, test_X, test_y)
plt.show()
end = time.perf_counter()
print("\nRunning time %d:%d:%.1f" % ((end - start) // 3600, ((end - start) % 3600) // 60, ((end - start) % 3600) % 60))
print("=============== Run to end successfully! ===============")
# region
# endregion
|
import networkx as nx
from file_operations import load_pickle, save_pickle
from settings import GRAPH_GML_FILENAME, LANGUAGE_MAP_FILENAME, GRAPH_LANGUAGE_GML_FILENAME
class LanguageGraphCreator:
def __init__(self, data_dir: str, language: str):
self.data_dir = data_dir
self.graph = nx.read_gml(self.data_dir + '/' + GRAPH_GML_FILENAME)
self.language_map = load_pickle(self.data_dir + '/' + LANGUAGE_MAP_FILENAME)
self.language = language
def _filter_out_language_nodes(self):
filtered = filter(lambda x: self.language in self.language_map[x]['languages'], self.language_map)
return set(filtered)
def create_language_restricted_graph(self):
matching_nodes = self._filter_out_language_nodes()
nodes_to_remove = []
for node in self.graph.nodes():
if node not in matching_nodes:
nodes_to_remove.append(node)
self.graph.remove_nodes_from(nodes_to_remove)
nx.write_gml(self.graph, self.data_dir + '/' + GRAPH_LANGUAGE_GML_FILENAME.format(self.language))
save_pickle(self.graph, self.data_dir + '/' + GRAPH_LANGUAGE_GML_FILENAME.format(self.language) + '.pickle')
print("Number of nodes:", self.graph.number_of_nodes())
print("Number of edges:", self.graph.number_of_edges())
|
import setup_path
import airsim
import sys
import time
print("""This script is designed to fly on the streets of the Neighborhood environment
and assumes the unreal position of the drone is [160, -1500, 120].""")
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
print("arming the drone...")
client.armDisarm(True)
state = client.getMultirotorState()
if state.landed_state == airsim.LandedState.Landed:
print("taking off...")
client.takeoffAsync().join()
else:
client.hoverAsync().join()
time.sleep(1)
state = client.getMultirotorState()
if state.landed_state == airsim.LandedState.Landed:
print("take off failed...")
sys.exit(1)
# AirSim uses NED coordinates so negative axis is up.
# z of -7 is 7 meters above the original launch point.
z = -7
print("make sure we are hovering at 7 meters...")
client.moveToZAsync(z, 1).join()
# see https://github.com/Microsoft/AirSim/wiki/moveOnPath-demo
# this method is async and we are not waiting for the result since we are passing timeout_sec=0.
print("flying on path...")
result = client.moveOnPathAsync([airsim.Vector3r(125,0,z),
airsim.Vector3r(125,-130,z),
airsim.Vector3r(0,-130,z),
airsim.Vector3r(0,0,z)],
12, 120,
airsim.DrivetrainType.ForwardOnly, airsim.YawMode(False,0), 20, 1)
# Checking reproducabiltiy
f = open("Path_dataXYZ.txt","a+")
f.write("\n\nRunning multirotor: \n")
state = client.getMultirotorState()
startTime = state.timestamp
prevTime = startTime
print("Checking reproducability:")
while (state.timestamp - startTime)/1000000000 <=10 :
state = client.getMultirotorState()
currentTime = state.timestamp
if (currentTime-prevTime)/1000000000 >= 0.02:
pos = state.kinematics_estimated.position
f.write("%f %f %f %f\n" %((currentTime-prevTime)/1000000000,
pos.x_val, pos.y_val, pos.z_val))
prevTime = currentTime
print("Ended checking")
# drone will over-shoot so we bring it back to the start point before landing.
#client.moveToPositionAsync(0,0,z,1).join()
print("landing...")
#client.landAsync().join()
print("disarming...")
#client.armDisarm(False)
client.enableApiControl(False)
print("done.")
|
from datetime import datetime, timezone, timedelta
def now():
hours_diference = timedelta(hours=-3)
time_zone = timezone(hours_diference)
return datetime.now().astimezone(time_zone).strftime("%d/%m/%Y - %H:%M:%S")
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
from scrapy.exceptions import DropItem
from pymysql.err import IntegrityError
class MoocPipeline:
def process_item(self, item, spider):
return item
class Mysql_Pipeline(object):
def __init__(self, mysql_host, mysql_db, mysql_user, mysql_passwd):
# 建立Mysql连接和创建游标
self.connect = pymysql.connect(
host=mysql_host,
db=mysql_db,
user=mysql_user,
passwd=mysql_passwd,
)
self.cursor = self.connect.cursor()
@classmethod
def from_crawler(cls, crawler):
# 从settings中获取Mysql配置信息
return cls(
mysql_host=crawler.settings.get('MYSQL_HOST'),
mysql_db=crawler.settings.get('MYSQL_DBNAME'),
mysql_user=crawler.settings.get('MYSQL_USER'),
mysql_passwd=crawler.settings.get('MYSQL_PASSWD')
)
def process_item(self, item, spider):
# 存储数据至Mysql
data = dict(item)
keys = ', '.join(data.keys())
values = ', '.join(['%s'] * len(data))
sql = 'insert into %s (%s) values(%s)' % (item.table, keys, values)
try:
if self.cursor.execute(sql, tuple(data.values())):
self.connect.commit()
except DropItem as e:
print(DropItem('ERROR!'))
self.connect.rollback()
except IntegrityError:
print("该课程courseURL已存在")
return item
def close_spider(self, spider):
# 关闭游标和连接
self.cursor.close()
self.connect.close()
|
class Prediction:
def __init__(self, mapper=None, rareWords=None):
self.rareWords = rareWords
self.mapper = mapper
def predictOne(self, data, confidence=False):
# WordSwapperWithMapper
data = set(
filter(
lambda val: val is not None,
map(
lambda x: self.mapper[x][1] if x in self.mapper else None,
data.split(' ')
)
)
)
# Prediction
hitCount = {key: 0 for key in self.rareWords.keys()}
for key in self.rareWords.keys():
for i in data:
x = str(i) if type(list(self.rareWords['BILL'].keys())[0]) == str else i
if x in self.rareWords[key]:
hitCount[key] += 1
if not confidence:
return sorted(hitCount.items(), key=lambda x: x[1], reverse=True)[0][0]
hitCountSorted = sorted(hitCount.items(), key=lambda x: x[1], reverse=True)
prediction = hitCountSorted.pop(0)
# Confidence
predictedVal = list(map(lambda x: x[1], hitCountSorted))
average = sum(predictedVal) / len(predictedVal)
confidence = prediction[1] / (prediction[1] + average)
return {'prediction': prediction[0], 'confidence': round(confidence, 2)}
def predictMany(self, dataList, confidence=False):
return [self.predictOne(dataList[i], confidence) for i in range(len(dataList))]
|
from django.conf.urls import url
from trndy_cleaners.accounts.views import (
ClientDetailView ,
EmployeeDetailView ,
ClientListView ,
EmployeeListView
)
app_name = "accounts"
urlpatterns = [
url(r'^clients/$', ClientListView.as_view()),
url(r'^clients/(?P<user_id>[\d]+)$', ClientDetailView.as_view()),
url(r'^employees/$', EmployeeListView.as_view()),
url(r'^employees/(?P<user_id>[\d]+)$', EmployeeDetailView.as_view()),
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class ModGRU(nn.Module):
def __init__(self, input_dim, h_dim):
super(ModGRU, self).__init__()
self.cell = nn.GRUCell(input_dim, h_dim)
self.comb = nn.Linear(input_dim*2, input_dim)
self.com2 = nn.Linear(input_dim+h_dim, h_dim)
def forward(self, X, hx, M=None, mask=None):
"""
X: [seqlen, batchsize, dim]
"""
output = []
if M is None:
M = torch.ones_like(X)
for i in range(X.size(0)):
comb = self.comb(torch.cat([X[i], M[i]], dim=-1))
_hx = self.cell(comb, hx)
mask_i = mask[i].unsqueeze(-1).expand(hx.size())
hx = _hx * mask_i + hx * (1 - mask_i)
out = self.com2(torch.cat([comb, hx],dim=-1))
output.append(out)
output = torch.stack(output)
return output, hx
class GRUv10(nn.Module):
def __init__(self, args, base_list):
super(GRUv10, self).__init__()
i_dim = args.i_dim
self.h_dim = args.h_dim
#self.use_src = (args.base_list is None or "src" in args.base_list)
if base_list is not None: base_list = base_list[1]
self.use_src = (base_list is None or "src" in base_list)
if self.use_src:
self.i2h = nn.Linear(6, self.h_dim)
self.rnn1 = ModGRU(i_dim-6, self.h_dim)
else:
self.i2h = nn.Linear(i_dim, self.h_dim)
self.rnn1 = ModGRU(i_dim, self.h_dim)
self.rnn2 = ModGRU(self.h_dim, self.h_dim)
self.lnorm1 = nn.LayerNorm(self.h_dim)
self.lnorm2 = nn.LayerNorm(self.h_dim)
self.lnorm3 = nn.LayerNorm(8)
self.nn1 = nn.Linear(self.h_dim, 8)
self.nn2 = nn.Linear(8, 2)
def forward(self, X, M, mask):
"""
Input: [seqlen, batch, channel]
Output: [seqlen, batch, channel]
"""
bs = X.size(1)
if self.use_src:
patient_info = X[:, :, -6:][0]
Xin = X[:, :, :-6]
Min = M[:, :, :-6]
else:
patient_info = X[0]
Xin, Min = X, M
h0 = self.i2h(patient_info)
hx, _ = self.rnn1(Xin, h0, Min, mask)
hx = self.lnorm1(hx)
hx, _ = self.rnn2(hx, h0, None, mask)
hx = self.lnorm2(hx)
hx = F.relu(self.nn1(hx))
hx = self.lnorm3(hx)
hx = self.nn2(hx)
return hx
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-2021 Pycord Development
Copyright (c) 2021-present Texus
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import Union, Dict, Callable
__all__ = (
"Permission",
"has_role",
"has_any_role",
"is_user",
"is_owner",
"permission",
)
class Permission:
"""The class used in the application command decorators
to hash permission data into a dictionary using the
:meth:`to_dict` method to be sent to the discord API later on.
.. versionadded:: 2.0
Attributes
-----------
id: Union[:class:`int`, :class:`str`]
A string or integer that represents or helps get
the id of the user or role that the permission is tied to.
type: :class:`int`
An integer representing the type of the permission.
permission: :class:`bool`
A boolean representing the permission's value.
guild_id: :class:`int`
The integer which represents the id of the guild that the
permission may be tied to.
"""
def __init__(
self,
id: Union[int, str],
type: int,
permission: bool = True,
guild_id: int = None,
):
self.id = id
self.type = type
self.permission = permission
self.guild_id = guild_id
def to_dict(self) -> Dict[str, Union[int, bool]]:
return {"id": self.id, "type": self.type, "permission": self.permission}
def permission(
role_id: int = None,
user_id: int = None,
permission: bool = True,
guild_id: int = None,
):
"""The method used to specify application command permissions
for specific users or roles using their id.
This method is meant to be used as a decorator.
.. versionadded:: 2.0
Parameters
-----------
role_id: :class:`int`
An integer which represents the id of the role that the
permission may be tied to.
user_id: :class:`int`
An integer which represents the id of the user that the
permission may be tied to.
permission: :class:`bool`
A boolean representing the permission's value.
guild_id: :class:`int`
The integer which represents the id of the guild that the
permission may be tied to.
"""
def decorator(func: Callable):
if not role_id is None:
app_cmd_perm = Permission(role_id, 1, permission, guild_id)
elif not user_id is None:
app_cmd_perm = Permission(user_id, 2, permission, guild_id)
else:
raise ValueError("role_id or user_id must be specified!")
# Create __app_cmd_perms__
if not hasattr(func, "__app_cmd_perms__"):
func.__app_cmd_perms__ = []
# Append
func.__app_cmd_perms__.append(app_cmd_perm)
return func
return decorator
def has_role(item: Union[int, str], guild_id: int = None):
"""The method used to specify application command role restrictions.
This method is meant to be used as a decorator.
.. versionadded:: 2.0
Parameters
-----------
item: Union[:class:`int`, :class:`str`]
An integer or string that represent the id or name of the role
that the permission is tied to.
guild_id: :class:`int`
The integer which represents the id of the guild that the
permission may be tied to.
"""
def decorator(func: Callable):
# Create __app_cmd_perms__
if not hasattr(func, "__app_cmd_perms__"):
func.__app_cmd_perms__ = []
# Permissions (Will Convert ID later in register_commands if needed)
app_cmd_perm = Permission(
item, 1, True, guild_id
) # {"id": item, "type": 1, "permission": True}
# Append
func.__app_cmd_perms__.append(app_cmd_perm)
return func
return decorator
def has_any_role(*items: Union[int, str], guild_id: int = None):
"""The method used to specify multiple application command role restrictions,
The application command runs if the invoker has **any** of the specified roles.
This method is meant to be used as a decorator.
.. versionadded:: 2.0
Parameters
-----------
*items: Union[:class:`int`, :class:`str`]
The integers or strings that represent the ids or names of the roles
that the permission is tied to.
guild_id: :class:`int`
The integer which represents the id of the guild that the
permission may be tied to.
"""
def decorator(func: Callable):
# Create __app_cmd_perms__
if not hasattr(func, "__app_cmd_perms__"):
func.__app_cmd_perms__ = []
# Permissions (Will Convert ID later in register_commands if needed)
for item in items:
app_cmd_perm = Permission(
item, 1, True, guild_id
) # {"id": item, "type": 1, "permission": True}
# Append
func.__app_cmd_perms__.append(app_cmd_perm)
return func
return decorator
def is_user(user: int, guild_id: int = None):
"""The method used to specify application command user restrictions.
This method is meant to be used as a decorator.
.. versionadded:: 2.0
Parameters
-----------
user: :class:`int`
An integer that represent the id of the user that the permission is tied to.
guild_id: :class:`int`
The integer which represents the id of the guild that the
permission may be tied to.
"""
def decorator(func: Callable):
# Create __app_cmd_perms__
if not hasattr(func, "__app_cmd_perms__"):
func.__app_cmd_perms__ = []
# Permissions (Will Convert ID later in register_commands if needed)
app_cmd_perm = Permission(
user, 2, True, guild_id
) # {"id": user, "type": 2, "permission": True}
# Append
func.__app_cmd_perms__.append(app_cmd_perm)
return func
return decorator
def is_owner(guild_id: int = None):
"""The method used to limit application commands exclusively
to the owner of the bot.
This method is meant to be used as a decorator.
.. versionadded:: 2.0
Parameters
-----------
guild_id: :class:`int`
The integer which represents the id of the guild that the
permission may be tied to.
"""
def decorator(func: Callable):
# Create __app_cmd_perms__
if not hasattr(func, "__app_cmd_perms__"):
func.__app_cmd_perms__ = []
# Permissions (Will Convert ID later in register_commands if needed)
app_cmd_perm = Permission(
"owner", 2, True, guild_id
) # {"id": "owner", "type": 2, "permission": True}
# Append
func.__app_cmd_perms__.append(app_cmd_perm)
return func
return decorator
|
def is_well_formed1(nstr):
left_chars = []
for ch in nstr:
if ch == "(":
left_chars.append(")")
if ch == "{":
left_chars.append("}")
if ch == "[":
left_chars.append("]")
if ch == ")":
if not left_chars or ch != left_chars.pop():
return False
if ch == "}":
if not left_chars or ch != left_chars.pop():
return False
if ch == "]":
if not left_chars or ch != left_chars.pop():
return False
return True
def is_well_formed2(nstr):
left_chars = []
LOOKUP = {'{': '}', '[': ']', '(': ')'}
for ch in nstr:
if ch in LOOKUP:
left_chars.append(ch)
elif not left_chars or (ch != LOOKUP[left_chars.pop()]):
return False
return True
nstr = "(){}(((({[[[]]]})){{[]}}[])"
print(is_well_formed2(nstr))
print(is_well_formed2("[[[]{}]([])]"))
|
# A Viginere Cipher takes an input of text
# and a key that is used to encrypt it
# using the Vigenere table.
# Refer to the image
import argparse
import os
parser = argparse.ArgumentParser(
description = "Encrypt/Decrypt using Vigenere Cipher",
usage = os.path.basename(__file__) + " -e <text> -k <key>",
epilog = "Author: Billy Khaled, billy.codes@gmail.com"
)
parser.add_argument("-e", "--encrypt", metavar='', help="Encrypt Text")
parser.add_argument("-d", "--decrypt", metavar='', help="Decrypt Text")
parser.add_argument("-k", "--key", metavar='', required=True, help="Cipher Key")
args, unknown = parser.parse_known_args()
class Vigenere:
def encrypt(self, text, key):
encoded = []
for x, y in zip(text, key):
shift = ord(x) - 65 # 65 is ascii for first letter 'A'
ciphered = shift + ord(y)
if(ciphered > 90):
ciphered = ciphered - 91 + 65
encoded.append(chr(ciphered))
return ''.join(l for l in encoded)
def decrypt(self, encrypted, key):
decoded = []
for x,y in zip(encrypted, key):
shift_count = 1
start = ord(y)
while start != ord(x):
start+=1
shift_count+=1
if(start == 90): # if char reaches 'Z'
start = 65
deciphered = 65 + shift_count
decoded.append(chr(deciphered))
return ''.join(l for l in decoded)
if __name__ == '__main__':
en = Vigenere()
if(args.encrypt):
print("Plain Text: ", args.encrypt)
print("Key: ", args.key)
print("Ciphered: ", en.encrypt(args.encrypt.upper(), args.key.upper()))
elif(args.decrypt):
print("Ciphered Text: ", args.decrypt)
print("Key: ", args.key)
print("Deciphered Text: ", en.decrypt(args.decrypt.upper(), args.key.upper()))
|
import asyncio
import datetime
import unittest
class Test(unittest.TestCase):
def test_command_echo(self):
counter = 0
list_input = ['echo Hello, Python!', 'echo Hello, World', 'echo very very very very long line']
list_exp = ['Hello, Python!', 'Hello, World', 'very very very very long line']
for _ in list_exp:
_, data = command_echo(list_input[counter])
assert data == list_exp[counter]
counter += 1
def command_echo(data):
command_echo_ = False
if data.startswith('echo'):
data = data.replace('echo ', '')
command_echo_ = True
return command_echo_, data
async def handle_echo(reader, writer):
addr = writer.get_extra_info('peername')
print(f"Connected the client{addr!r}")
while True:
data = await reader.readline()
# print(data.decode())
data = data.decode()
if not data.startswith('echo'):
if not data.startswith('calendar'):
if not data.startswith('stop'):
info = str('Commands: *echo* message - returns the sent message,'
' *calendar* - returns the current time in the '
'format dd.mm.YYYY HH:MM, *stop* - closes the server\n')
writer.write(info.encode())
writer.write(''.encode())
await writer.drain()
command_echo_, data = command_echo(data)
if command_echo_:
writer.write(data.encode())
await writer.drain()
if data.startswith('calendar'):
writer.write((datetime.datetime.today().strftime('%d.%m.%Y %H:%M') + '\n').encode())
await writer.drain()
if data.startswith('stop'):
writer.write(data.encode())
addr = writer.get_extra_info('peername')
print(f"Disconnected the client{addr!r}")
writer.close()
break
async def main():
server = await asyncio.start_server(handle_echo, '127.0.0.1', 8889)
addr = server.sockets[0].getsockname()
print(f'Serving on {addr}')
async with server:
await server.serve_forever()
if __name__ == '__main__':
asyncio.run(main())
|
# Generated by Django 2.0.7 on 2018-08-27 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='location',
name='foursquare_id',
field=models.CharField(default='4b05886cf964a520fcc422e3', max_length=250),
preserve_default=False,
),
]
|
class Solution:
def solveSudoku(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
if not board or not board[0]:
return
d = self.build_dict(board)
def build_set(self, board):
s = set()
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j].isdigit():
d.add('c'+board[i][j]
|
import glob # sciezki do plikow
import pandas as pd # wyswietlenie na stronie
import datetime as dt #pandas
import os #odpalanie
import last_zero
files_path = "in/*.txt" # sciezka do plikow in
files_path2 = "out/*.txt" # sciezka plikow out
files_in = glob.glob(files_path)
files_out = glob.glob(files_path2)
in_stuff =[] #in pliki
out_stuff = [] #out pliki
for name in files_in: #lecimy po in
with open(name) as f:
word = f.readlines()
word = [x.strip() for x in word] #potrzebny strip, poniewaz bralo \n
in_stuff.append(word) #wypisanie danych do in_stuff
f.close()
for name in files_out:
with open(name) as f:
word = f.readlines() #wypisanie danych do out_stuff
word = [x.strip() for x in word] #potrzebny strip, poniewaz bralo \n
out_stuff.append(word)
f.close()
s="," #separator
flat_in_list = [] #mam [[plik1], [plik2] itd], chce ['plik1', 'plik2'] itd
for list in in_stuff:
flat_in_list.append(s.join(list))
flat_out_list = [] #mam [[plik1], [plik2] itd], chce ['plik1', 'plik2'] itd
for list in out_stuff:
flat_out_list.append(s.join(list))
data_frame = pd.DataFrame({ #uzycie biblioteki pandas do wyseietlenia na stronie
'input' : flat_in_list,
'output' : flat_out_list,
'input created': last_zero.time_in,
'output created': dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
pd.set_option('colheader_justify', 'center') #wycentrowanie
#poczatek HTML, wpisuje tytul html, dodaje styl css
html_string ='''
<html>
<head><title> LastNonZeroDigit {data} </title></head>
<link rel="stylesheet" type="text/css" href="df_style.css"/>
<body>
<h1> BACKUP {data} </h1>
{table}
</body>
</html>.
'''
#wykorzystanie datatime
file_name= dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+".html"
# OUTPUT HTML
with open('backup\\'+file_name, "w") as f: #utworzenie backupu pliku html
f.write(html_string.format(data= dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), table=data_frame.to_html(classes='mystyle')))
os.startfile('backup\\'+file_name) #otwierania pliku |
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.exceptions import UserError, ValidationError
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
encumb_id = fields.Many2one('encumb.order', string='Encumberance Order', copy=False,domain=[('state','=','approve')])
@api.multi
@api.constrains('encumb_id', 'order_line','order_line.account_analytic_id', 'order_line.price_subtotal_signed')
def _check_encumb_id(self):
"""
This constrain to ensure the encumberance is sufficient for all the purchase lines amount
:return:
"""
if self.env['ir.config_parameter'].sudo().get_param('budget_management.allow_encumber_and_funds_check'):
for rec in self:
if rec.encumb_id:
diff_currency = False
if rec.currency_id.id != self.env.user.company_id.currency_id.id:
diff_currency = True
for line in rec.order_line:
encumb_line_ids = self.env['encumb.order.line'].search([('order_id','=',rec.encumb_id.id),('analytic_account_id','=',line.account_analytic_id.id)])
result = 0
for enc_line in encumb_line_ids:
result += enc_line.remaining_amount
amount = line.price_total
if diff_currency:
amount = rec.currency_id.with_context(date=rec.date_approve).compute(line.price_total,self.env.user.company_id.currency_id)
if result < amount:
raise ValidationError(
_('There is no enough encumbered amount for the line with analytic account %s') % (
line.account_analytic_id.name))
@api.multi
def button_confirm(self):
res = super(PurchaseOrder, self).button_confirm()
for rec in self:
if not rec.encumb_id and not self.env['ir.config_parameter'].sudo().get_param('budget_management.module_bills_encumberance'):
raise ValidationError(_('Please add Encumberance Order before confirming this purchase order'))
if not rec.encumb_id and self.env['ir.config_parameter'].sudo().get_param('budget_management.module_bills_encumberance'):
rec.encumb_id = self.env['encumb.order'].create_po_encumb(rec)
return res
@api.multi
def action_view_invoice(self):
res = super(PurchaseOrder, self).action_view_invoice()
res['context']['default_encumb_id'] = self.encumb_id.id
return res
@api.multi
def po_fund_check(self):
for rec in self:
date_order = rec.date_order or fields.Date.today()
self.env.cr.execute("""
SELECT SUM(price_total) , account_analytic_id
FROM purchase_order_line WHERE order_id = %s GROUP BY account_analytic_id """, (rec.id,))
result = self.env.cr.fetchall() or False
for (amount, analytic_account_id) in result:
self.env.cr.execute("""
SELECT SUM(remain_amount)
FROM crossovered_budget_lines WHERE date_from < %s and date_to > %s and analytic_account_id = %s GROUP BY analytic_account_id """,
(date_order, date_order, analytic_account_id))
result = self.env.cr.fetchone()[0] or False
if result < amount:
analytic_account = self.env['account.analytic.account'].browse(analytic_account_id)
raise ValidationError(
_('No enough fund for lines with analytic account %s ') % (analytic_account.name))
return True
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account', required=True)
|
from django.test.client import RequestFactory
from nose import tools
from .cases import ContestTestCase
class TestContestUrls(ContestTestCase):
def setUp(self):
super(TestContestUrls, self).setUp()
self.rf = RequestFactory()
self.url = self.contest.get_absolute_url()
self.contest2_url = self.contest_question_less.get_absolute_url()
def test_unregistered_custom_url(self):
response = self.client.get(self.url + 'results/')
tools.assert_equals(404, response.status_code)
response = self.client.get(self.url + 'question/a/')
tools.assert_equals(404, response.status_code)
def test_custom_url_for_questions_wizard(self):
response = self.client.get(self.url + 'question/1/')
tools.assert_equals(200, response.status_code)
response = self.client.get(self.url + 'question/2/')
tools.assert_equals(302, response.status_code)
response = self.client.get(self.url + 'question/3/')
tools.assert_equals(302, response.status_code)
response = self.client.get(self.url + 'question/0/')
tools.assert_equals(302, response.status_code)
response = self.client.get(self.url + 'question/4/')
tools.assert_equals(302, response.status_code)
def test_custom_url_for_questions_wizard_contest_without_questions(self):
response = self.client.get(self.contest2_url + 'question/1/')
tools.assert_equals(404, response.status_code)
response = self.client.get(self.contest2_url + 'question/0/')
tools.assert_equals(302, response.status_code)
response = self.client.get(self.contest2_url + 'question/4/')
tools.assert_equals(302, response.status_code)
def test_detail_url_for_contest(self):
response = self.client.get(self.url)
tools.assert_equals(200, response.status_code)
def test_detail_url_for_contest_without_questions(self):
response = self.client.get(self.contest2_url)
tools.assert_equals(200, response.status_code)
def test_rest_custom_url_for_contest(self):
response = self.client.get(self.url + 'contestant/')
tools.assert_equals(302, response.status_code)
response = self.client.get(self.url + 'result/')
tools.assert_equals(200, response.status_code)
response = self.client.get(self.url + 'conditions/')
tools.assert_equals(200, response.status_code)
|
import json
def get_file_data(file_name):
file = open(file_name)
data = json.load(file)
file.close()
return data
colors_data = get_file_data("colors.json")
families_data = get_file_data("families.json")
genders_data = get_file_data("genders.json")
life_forms_data = get_file_data("lifeForms.json")
photos_data = get_file_data("photos.json")
places_data = get_file_data("places.json")
species_data = get_file_data("species.json")
species_places_data = get_file_data("speciesPlaces.json")
species = []
all_colors = []
all_life_forms = []
all_places = []
en = {}
es = {}
def find_by_id(array, item_id):
for item in array:
if item['id'] == item_id:
return item
def find_for_species(array, item_id):
found = []
for item in array:
if item["especie_id"] == str(item_id):
found.append(item)
return found
def get_photos(item_id):
photos_raw = find_for_species(photos_data, item_id)
photos = []
for photo in photos_raw:
path = get_image_path('full_size', photo["path"]),
photos.append(path)
return photos
def get_places(item_id):
places_raw = find_for_species(species_places_data, item_id)
places = []
for place_raw in places_raw:
place_id = place_raw["place_id"]
place = find_by_id(places_data, int(place_id))
place_name = place["nombre_norm"]
if place_name not in all_places:
all_places.append(place_name)
places.append(place_name)
return places
def get_colors(item):
color1_id = int(item["color1_id"])
color1_name = find_by_id(colors_data, color1_id)["nombre"]
if color1_name not in all_colors:
all_colors.append(color1_name)
colors = [color1_name]
if item["color2_id"] != 'null':
color2_id = int(item["color2_id"])
color2 = find_by_id(colors_data, color2_id)
color2_name = color2["nombre"]
if color2_name not in all_colors:
all_colors.append(color2_name)
colors.append(color2_name)
return colors
def get_life_forms(item):
life_form1_id = int(item["forma_vida1_id"])
life_form1_name = find_by_id(life_forms_data, life_form1_id)["nombre"]
if life_form1_name not in all_life_forms:
all_life_forms.append(life_form1_name)
life_forms = [life_form1_name]
if item["forma_vida2_id"] != 'null':
life_form2_id = int(item["forma_vida2_id"])
life_form2 = find_by_id(life_forms_data, life_form2_id)
life_form2_name = life_form2["nombre"]
if life_form2_name not in all_life_forms:
all_life_forms.append(life_form2_name)
life_forms.append(life_form2_name)
return life_forms
def get_image_path(folder, image):
return f'require("../assets/images/encyclopedia/{folder}/{image}")',
for index, element in enumerate(species_data):
genero_id = int(element["genero_id"])
genero = find_by_id(genders_data, genero_id)
familia_id = int(genero["familia_id"])
familia = find_by_id(families_data, familia_id)
name = element["nombre"]
gender_name = genero["nombre"]
family_name = familia["nombre"]
key = f'{family_name}_{gender_name}_{name}'
key = key.lower()
specie = {
"id": index,
"tropicosId": element["id_tropicos"],
"key": key,
"name": name,
"gender": gender_name,
"family": family_name,
"colors": get_colors(element),
"lifeForms": get_life_forms(element),
"thumbnail": get_image_path('encyclopedia_thumbnails', element["thumbnail"]),
"detailImage": get_image_path('detail_images', element["thumbnail"]),
"photos": get_photos(element["id"]),
"places": get_places(element["id"])
}
species.append(specie)
i18n_es = {
"description": element["descripcion_es"],
"distribution": element["distribucion_es"]
}
es[key] = i18n_es
i18n_en = {
"description": element["descripcion_en"],
"distribution": element["distribucion_en"]
}
en[key] = i18n_en
all_colors.sort()
all_life_forms.sort()
all_places.sort()
es["colors"] = {}
en["colors"] = {}
for color in all_colors:
es["colors"][color] = color
en["colors"][color] = color
es["life_forms"] = {}
en["life_forms"] = {}
for life_form in all_life_forms:
es["life_forms"][life_form] = life_form
en["life_forms"][life_form] = life_form
es["places"] = {}
en["places"] = {}
for place in all_places:
es["places"][place] = place
en["colors"][place] = place
with open('out/en.json', 'w') as outfile:
json.dump(en, outfile)
with open('out/es.json', 'w') as outfile:
json.dump(es, outfile)
with open('out/species.json', 'w') as outfile:
json.dump(species, outfile)
|
import random
def quicksort(list):
if len(list)<2:
return list
else:
rad = random.randint(0,len(list)-1)
pivot = list[rad]
smaller = [i for i in list[1:] if i<pivot]
bigger = [i for i in list[1:] if i>=pivot]
return quicksort(smaller) + [pivot] + quicksort(bigger)
list1 = [4,12,33,1,55,2,7,34,4]
print(quicksort(list1)) |
class NumberingSchema(Element,IDisposable):
""" A class to support assigning numbers to elements of a particular kind for the purpose of tagging and scheduling them. """
def AppendSequence(self,fromPartition,toPartition):
"""
AppendSequence(self: NumberingSchema,fromPartition: str,toPartition: str)
Appends all elements of one numbering sequence to the end of another sequence.
fromPartition: Name of the partition that determines which numbering sequence to append.
The sequence must exist already,otherwise an exception will be thrown.
toPartition: Name of a partition into which the source sequence is going to be appended.
The sequence must exist already,otherwise an exception will be thrown.
"""
pass
def AssignElementsToSequence(self,elementIds,partitionName):
""" AssignElementsToSequence(self: NumberingSchema,elementIds: ISet[ElementId],partitionName: str) """
pass
def ChangeNumber(self,partition,fromNumber,toNumber):
"""
ChangeNumber(self: NumberingSchema,partition: str,fromNumber: int,toNumber: int) -> IList[ElementId]
Replaces an existing number with a new one (that does not exist yet).
partition: Name of the partition that identifies the sequence containing the number to be
changed.
fromNumber: Number to be changed; there must already be an element with that number in the
sequence.
toNumber: Number to change to; no element must have this number yet in the sequence.
Returns: A collection of elements affected by the change of the number
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
@staticmethod
def GetMinimumNumberOfDigits(document):
"""
GetMinimumNumberOfDigits(document: Document) -> int
Returns the minimum number of digits to be used for formating
the Number
parameter of all enumerable elements of the given document.
document: The document this value is going to be applied to.
Returns: The current number of formatting digits
"""
pass
@staticmethod
def GetNumberingSchema(document,schemaType):
"""
GetNumberingSchema(document: Document,schemaType: NumberingSchemaType) -> NumberingSchema
Returns an instance of the specified Numbering Schema in the given document.
document: A document to get the numbering schema from.
schemaType: The type of a built-in schema to get.
Returns: Instance of the specified schema.
"""
pass
def GetNumberingSequences(self):
"""
GetNumberingSequences(self: NumberingSchema) -> IList[str]
Returns all numbering sequences within this numbering schema.
Returns: A collection of partition names of all numbering sequences currently present in
this schema.
"""
pass
def GetNumbers(self,partition):
"""
GetNumbers(self: NumberingSchema,partition: str) -> IList[IntegerRange]
Returns all numbers currently used in the given numbering sequence
partition: Name of the partition that identifies the sequence. The sequence must exist.
Returns: A collection of integer ranges
"""
pass
@staticmethod
def GetSchemasInDocument(document):
"""
GetSchemasInDocument(document: Document) -> ISet[ElementId]
Returns a set of Ids of all Numbering Schema elements for a given document.
document: A document to get numbering schema from.
Returns: Ids of NumberingSchema elements. An empty set if no schemas are found in the
given document.
"""
pass
@staticmethod
def IsValidPartitionName(name,message):
"""
IsValidPartitionName(name: str) -> (bool,str)
Tests if the given string can be used as a name for a numbering partition.
name: A name to validate.
Returns: Returns True if the name can be used; or False if the string contains invalid
characters.
"""
pass
def MergeSequences(self,sourcePartitions,newPartition):
""" MergeSequences(self: NumberingSchema,sourcePartitions: IList[str],newPartition: str) """
pass
def MoveSequence(self,fromPartition,newPartition):
"""
MoveSequence(self: NumberingSchema,fromPartition: str,newPartition: str)
Moves all elements of a numbering sequence from one partition to another.
fromPartition: Name of the partition that determines which numbering sequence to move.
The
sequence must exist already,otherwise an exception will be thrown.
newPartition: Name of a partition into which the source sequence is going to be moved.
The schema must not have a sequence for this partition yet
(i.e. the schema
does not have an element that was assigned to such a partition.)
Leading
and trailing white space is ignored in the given string and will be
removed
automatically.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def RemoveGaps(self,partition):
"""
RemoveGaps(self: NumberingSchema,partition: str)
Removes gaps,if any,in a numbering sequence
partition: Name of the partition that identifies the sequence. The sequence must exist.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
@staticmethod
def SetMinimumNumberOfDigits(document,value):
"""
SetMinimumNumberOfDigits(document: Document,value: int)
Sets a new value for the minimum number of digits to be used for formating
the Number parameter of all numbered elements of the given document.
document: The document in which the new value will be in applied.
value: New value for the minimum number of digits.
"""
pass
def ShiftNumbers(self,partition,firstNumber):
"""
ShiftNumbers(self: NumberingSchema,partition: str,firstNumber: int)
Shifts all numbers in the sequence so the starting number has the given value.
partition: Name of the partition that identifies the sequence. The sequence must exist.
firstNumber: Value for the new first (lowest) number of the sequence.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
NumberingParameterId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Id of the parameter that stores values of the numbers on enumerated elements.
Get: NumberingParameterId(self: NumberingSchema) -> ElementId
"""
SchemaType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies the kind of elements/objects this numbering schema is used for.
Get: SchemaType(self: NumberingSchema) -> NumberingSchemaType
"""
MaximumStartingNumber=1073741822
|
import importlib
if importlib.util.find_spec("web"):
print("import flask_sqlalchemy for models")
from web import db
Base = db.Model
orm = db
else:
print("import sqlalchemy for models")
import sqlalchemy as db
import sqlalchemy.orm as orm
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from datetime import datetime
class User(Base):
# __tablename__ = 'user'
# __table_args__ = {'mysql_collate': 'utf8_general_ci'}
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return '<User %r:%r>' % (self.id, self.username)
class Post(Base):
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
author = orm.relationship('User', backref=orm.backref('posts', lazy=True))
title = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return '<Post %r:%r>' % (self.id, self.title)
class Task(Base):
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
owner = orm.relationship('User', backref=orm.backref('tasks', lazy=True))
src_file_id = db.Column(db.Integer, db.ForeignKey('file.id'), nullable=False)
dst_ext = db.Column(db.String(16), nullable=False)
status = db.Column(db.String(64), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
started = db.Column(db.DateTime, nullable=True)
elapsed_time = db.Column(db.Integer, nullable=True)
dst_file_id = db.Column(db.Integer, db.ForeignKey('file.id'), nullable=True)
ended = db.Column(db.DateTime, nullable=True)
def __repr__(self):
return '<Task %r:%r>' % (self.id, self.status)
class File(Base):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(1024), nullable=False)
ext = db.Column(db.String(255), nullable=False)
size = db.Column(db.Integer, nullable=True)
path = db.Column(db.String(2048), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return '<File %r:%r>' % (self.id, self.name)
|
import os
def get_temp():
# f = open("/dev/DHT11_Device", "rb")
# temp = f.readlines()
# f.close
# print(temp)
#f = os.open("/dev/DHT11_Device", os.O_RDONLY)
#text = os.read(f)
#print(text)
#os.close(f)
# with open("/dev/DHT11_Device", "rb") as f:
# print repr(f.read(10))
#print("Hello")
return os.popen("sudo /home/pi/ArturBot/bot3/temp").read()
if __name__ == '__main__':
print(get_temp()) |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from lesson3.db.dbhelper import DBHelper
class Lesson3Pipeline(object):
def process_item(self, item, spider):
return item
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
class ImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
url = request.url
file_name = url.split('/')[-1]
return file_name
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem('Image Downloaded Failed')
return item
def get_media_requests(self, item, info):
yield Request(item['url'])
class MysqlPipeline():
def open_spider(self, spider):
self.db = pymysql.connect("localhost","test","123qwe","tot" )
self.cursor = self.db.cursor()
# self.db_conn = pymysql.connect(self.host, self.user, self.password, self.database, charset='utf-8', port=self.port)
# self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
values = (
item['trade_id'],
item['sold_num'],
item['post_time'],
item['area'],
item['username'],
item['userid'],
item['reg_time'],
item['title'],
item['price'],
item['content'],
)
sql = "insert into forum(trade_id, sold_num, \
post_time, area, username, userid, reg_time, title, price, content) \
values (%s, %s, %s, '%s', '%s', %s, %s, '%s', %s, '%s')"
try:
self.cursor.execute(sql % values)
self.db.commit()
print('save to mysql!')
except:
self.db.rollback()
print('Failed to save!')
class topicpipeline():
def open_spider(self, spider):
self.file = open('item', 'a')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
for i in item:
self.file.write(i + ' ==> ' + item[i] + '\r\n') |
from django.contrib import admin
from .models import Category,Image
admin.site.register(Category)
admin.site.register(Image)
# Register your models here.
|
#! /usr/bin/env python
import os
import sys
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
stack = sys.argv[1]
first_bs_sec, last_bs_sec = section_range_lookup[stack]
dm = DataManager(stack=stack, labeling_dir='/home/yuncong/csd395/CSHL_data_labelings_losslessAlignCropped')
#########################
username = 'yuncong'
f = open(os.path.join(dm.root_labelings_dir, stack + '_' + username + '_latestAnnotationFilenames.txt'), 'w')
for sec in range(first_bs_sec, last_bs_sec + 1):
dm.set_slice(sec)
ret = dm.load_review_result_path(username, 'latest', suffix='consolidated')
if ret is not None:
fn = ret[0]
print fn
f.write(fn + '\n')
f.close()
#########################
username = 'autoAnnotate'
f = open(os.path.join(dm.root_labelings_dir, stack + '_' + username + '_latestAnnotationFilenames.txt'), 'w')
for sec in range(first_bs_sec, last_bs_sec + 1):
dm.set_slice(sec)
ret = dm.load_review_result_path(username, 'latest', suffix='consolidated')
if ret is not None:
fn = ret[0]
print fn
f.write(fn + '\n')
f.close() |
import os
# import nose
# from nose.tools import *
import logging
import shutil
LD = logging.Logger('root')
LD.setLevel(logging.INFO)
LD.addHandler(logging.StreamHandler())
class TestPlawLen(object):
@classmethod
def setup_class(cls):
num = 0
with open("data/plaws.csv", 'rb') as source:
for bill_num in enumerate(source):
num = bill_num[0]
source.close()
cls.lenplaw = num
msg = "# plaws found: %s"%cls.lenplaw
LD.info(msg)
msg = "# plaws expected: 10861"
LD.info(msg)
def test_plawlen(self):
assert abs(self.lenplaw - 10861) < 2, "Downloaded files not within 20 of sampled public laws"
msg = "negligable diff of: %s"%abs(self.lenplaw - 10861)
LD.info(msg)
class TestLen(object):
@classmethod
def setup_class(cls):
cls.files = [os.path.join(paths[0], j) for paths in os.walk('.') \
for j in paths[2] if j.endswith("json")]
cls.lenfiles = len(cls.files)
num = 0
with open("data/plaws.csv", 'rb') as source:
for bill_num in enumerate(source):
num = bill_num[0]
source.close()
cls.lenplaw = num
msg = "# plaws selected: %s"%cls.lenplaw
LD.info(msg)
msg = "# of bills processed %s"%cls.lenfiles
LD.info(msg)
def test_close(self):
assert abs(self.lenfiles-self.lenplaw) < 3, "Downloaded files not within 2 of sampled public laws"
|
import numpy as np
# Zadanie 1 - rozklad LU macierzy
def gauss(a, level=0):
if level < a.shape[0] - 1:
for i in range(level + 1, a.shape[0]):
a[i][level] /= a[level][level]
for j in range(level + 1, a.shape[0]):
a[i][j] = a[i][j] - a[level][j] * a[i][level]
gauss(a, level + 1)
def lu(a):
b = a.astype('f').copy()
l = np.zeros(a.shape)
u = np.zeros(a.shape)
gauss(b)
for i in range(a.shape[0]):
for j in range(a.shape[0]):
if i < j:
u[i][j] = b[i][j]
elif i == j:
l[i][j] = 1
u[i][j] = b[i][j]
else:
l[i][j] = b[i][j]
return l, u
|
if __name__ == '__main__':
with open('input.txt', 'r') as f:
with open('output.txt', 'w') as o:
lines_list = f.read().splitlines()[1:]
for idx, pancakes in enumerate(lines_list):
num_flips = 0
index = 0
curr_pancake = pancakes[0]
for pancake in pancakes:
if pancake != curr_pancake:
curr_pancake = pancake
num_flips += 1
if curr_pancake == '-':
num_flips += 1
o.write('Case #{}: {}\n'.format(idx + 1, num_flips)) |
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for displaying GradingSurveyGroups and records.
"""
from google.appengine.api import taskqueue
from soc.views import forms
from soc.views.helper import lists
from soc.views.helper import url_patterns
from soc.views.helper.access_checker import isSet
from soc.views.template import Template
from soc.modules.gsoc.logic import grading_record
from soc.modules.gsoc.models.grading_record import GSoCGradingRecord
from soc.modules.gsoc.views import forms as gsoc_forms
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import url_patterns as gsoc_url_patterns
from soc.modules.gsoc.views.helper.url_patterns import url
class GradingRecordsOverview(RequestHandler):
"""View to display all GradingRecords for a single group.
"""
def djangoURLPatterns(self):
return [
url(r'grading_records/overview/%s$' % url_patterns.ID,
self, name='gsoc_grading_record_overview'),
]
def checkAccess(self):
self.mutator.surveyGroupFromKwargs()
self.check.isHost()
def templatePath(self):
return 'v2/modules/gsoc/grading_record/overview.html'
def context(self):
return {
'page_name': 'Evaluation Group Overview',
'record_list': GradingRecordsList(self.request, self.data)
}
def jsonContext(self):
"""Handler for JSON requests.
"""
idx = lists.getListIndex(self.request)
if idx == 0:
return GradingRecordsList(self.request, self.data).listContent().content()
else:
super(GradingRecordsOverview, self).jsonContext()
def post(self):
"""Handles the POST request from the list and starts the appropriate task.
"""
post_dict = self.data.POST
if post_dict['button_id'] == 'update_records':
task_params = {'group_key': self.data.survey_group.key().id_or_name()}
task_url = '/tasks/gsoc/grading_record/update_records'
task = taskqueue.Task(params=task_params, url=task_url)
task.add()
elif post_dict['button_id'] == 'update_projects':
task_params = {'group_key': self.data.survey_group.key().id_or_name(),
'send_mail': 'true'}
task_url = '/tasks/gsoc/grading_record/update_projects'
task = taskqueue.Task(params=task_params, url=task_url)
task.add()
class GradingRecordsList(Template):
"""Lists all GradingRecords for a single GradingSurveyGroup.
"""
def __init__(self, request, data):
"""Initializes the template.
Args:
request: The HTTPRequest object
data: The RequestData object
"""
self.request = request
self.data = data
list_config = lists.ListConfiguration(add_key_column=False)
list_config.addColumn(
'key', 'Key',
(lambda ent, *args: "%s/%d/%d" % (
ent.parent_key().parent().name(),
ent.parent_key().id(),
ent.key().id())),
hidden=True)
title_func = lambda rec, *args: rec.parent().title
list_config.addColumn('project_title', 'Project Title', title_func)
org_func = lambda rec, *args: rec.parent().org.name
list_config.addColumn('org_name', 'Organization', org_func)
stud_rec_func = lambda rec, *args: \
'Present' if rec.student_record else 'Missing'
list_config.addColumn('student_record', 'Survey by Student', stud_rec_func)
stud_id_func = lambda rec, *args: rec.parent().parent().link_id
list_config.addColumn('student_id', 'Student Link Id', stud_id_func, hidden=True)
list_config.addPostButton('update_records', 'Update Records', '', [0,'all'], [])
list_config.addPostButton('update_projects', 'Update Projects', '', [0,'all'], [])
def mentorRecordInfo(rec, *args):
"""Displays information about a GradingRecord's mentor_record property.
"""
if not rec.mentor_record:
return 'Missing'
if rec.mentor_record.grade:
return 'Passing Grade'
else:
return 'Fail Grade'
list_config.addColumn('mentor_record', 'Survey by Mentor', mentorRecordInfo)
list_config.addSimpleColumn('grade_decision', 'Decision')
r = data.redirect
list_config.setRowAction(lambda e, *args:
r.grading_record(e).urlOf('gsoc_grading_record_detail'))
self._list_config = list_config
def context(self):
"""Returns the context for the current template.
"""
list = lists.ListConfigurationResponse(self.data, self._list_config, idx=0)
return {'lists': [list]}
def listContent(self):
"""Returns the ListContentResponse object that is constructed from the data.
"""
q = GSoCGradingRecord.all()
q.filter('grading_survey_group', self.data.survey_group)
starter = lists.keyStarter
prefetcher = lists.modelPrefetcher(
GSoCGradingRecord, ['mentor_record', 'student_record'], parent=True)
response_builder = lists.RawQueryContentResponseBuilder(
self.request, self._list_config, q,
starter, prefetcher=prefetcher)
return response_builder.build()
def templatePath(self):
"""Returns the path to the template that should be used in render().
"""
return 'v2/soc/list/lists.html'
class GradingRecordForm(gsoc_forms.GSoCModelForm):
"""Django form to edit a GradingRecord manually.
"""
class Meta:
model = GSoCGradingRecord
css_prefix = 'gsoc_grading_record'
fields = ['grade_decision', 'locked']
widgets = forms.choiceWidgets(GSoCGradingRecord, ['grade_decision'])
class GradingRecordDetails(RequestHandler):
"""View to display GradingRecord details.
"""
def djangoURLPatterns(self):
return [
url(r'grading_records/detail/%s$' % gsoc_url_patterns.GRADING_RECORD,
self, name='gsoc_grading_record_detail'),
]
def checkAccess(self):
self.mutator.gradingSurveyRecordFromKwargs()
self.check.isHost()
def context(self):
assert isSet(self.data.record)
record = self.data.record
if self.data.POST:
record_form = GradingRecordForm(self.data.POST)
else:
# locked is initially set to true because the user is editing it manually
record_form = GradingRecordForm(instance=record, initial={'locked': True})
return {
'page_name': 'Grading Record Details',
'record': record,
'record_form': record_form,
}
def post(self):
"""Handles the POST request when editing a GradingRecord.
"""
assert isSet(self.data.record)
record_form = GradingRecordForm(self.data.POST)
if not record_form.is_valid():
return self.get()
decision = record_form.cleaned_data['grade_decision']
locked = record_form.cleaned_data['locked']
record = self.data.record
record.grade_decision = decision
record.locked = locked
record.put()
grading_record.updateProjectsForGradingRecords([record])
# pass along these params as POST to the new task
task_params = {'record_key': str(record.key())}
task_url = '/tasks/gsoc/grading_record/mail_result'
mail_task = taskqueue.Task(params=task_params, url=task_url)
mail_task.add('mail')
self.redirect.id(record.grading_survey_group.key().id_or_name())
self.redirect.to('gsoc_grading_record_overview')
def templatePath(self):
return 'v2/modules/gsoc/grading_record/details.html'
|
# NERD TRUE OR FALSE TEST
def nerd():
print("O usuário é nerd? [S/N]")
nerd = input()
if nerd == 's' or nerd == "sim":
print("O usuário É nerd, portanto, \nNão faltaras!!")
elif nerd == 'n' or nerd == "nao":
print("O usuário NAO É nerd, dessa forma, \nFaltarás!")
else:
print('Responda certo, cacete!')
nerd()
|
# condition to send email if 'True' email will be sent
SEND_EMAIL = True
# email smtp (smpt of yahoo, gmail, msn, outlook etc.,)
SMPT = "smtp.gmail.com:587"
# email subject
SUBJECT = "MyProject Automation Execution Status"
# credentials
FROM = "XXXXX@gmail.com"
PASSWORD = "XXXXX"
# receivers
TO = "XXXXX@gmail.com"
CC = "XXXXXX@gmail.com"
COMPANY_NAME = "My Company Title" |
import pickle
import traceback
import glob
import os
import numpy as np
import modules.common_params.common_headless as c
import modules.memory_classes.memory_headless as m
import modules.queues.queue_headless as q
import modules.organisms.organism_headless as o
from conf.config import Config, default_ancestors
import math
import json
from tqdm import tqdm
from typing import Dict
from copy import deepcopy
from random import randint
import logging
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s|%(filename)s|%(lineno)s| %(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename='example.log',
)
logger = logging.getLogger(__name__)
c.screen = None
class FungeraHeadless:
def __init__(self, no_mutations: bool = False):
np.random.seed(c.config['random_seed'])
if not os.path.exists('snapshots'):
os.makedirs('snapshots')
self.cycle = 0
self.is_minimal = True
self.purges = 0
self.no_mutations = no_mutations
coords = np.array(c.config['memory_size']) // 2
ip = np.copy(coords)
if c.instructions_set_name == 'error_correction':
ip = ip + 1
genome_size = self.load_genome_into_memory(
self.read_genome(default_ancestors[c.instructions_set_name]), coords
)
o.organism_class(coords, genome_size, ip=ip)
if c.config['snapshot_to_load'] != 'new':
self.load_state()
self.information_per_site_tables = []
self.entropy = 0.0
self.timer = c.RepeatedTimer(
c.config['autosave_rate'], self.save_state, (True,)
)
def run(self):
try:
self.input_stream()
except KeyboardInterrupt:
curses.endwin()
self.timer.cancel()
except Exception:
curses.endwin()
self.timer.cancel()
def read_genome(self, filename):
with open(filename) as genome_file:
genome = np.array([list(line.strip()) for line in genome_file])
return genome
def load_genome_into_memory(self, genome, address: np.array) -> np.array:
m.memory.load_genome(genome, address, genome.shape)
return genome.shape
def update_position(self, delta):
m.memory.scroll(delta)
q.queue.update_all()
def toogle_minimal(self, memory=None):
self.is_minimal = not self.is_minimal
m.memory.clear()
m.memory = m.memory.toogle() if memory is None else memory.toogle()
m.memory.update(refresh=True)
q.queue.toogle_minimal()
def save_state(self, from_timer=False):
return_to_full = False
if not self.is_minimal:
if from_timer:
return
self.toogle_minimal()
return_to_full = True
filename = 'snapshots/{}_cycle_{}.snapshot'.format(
c.config['simulation_name'].lower().replace(' ', '_'), self.cycle
)
if c.config['dump_full_snapshots']:
with open(filename, 'wb') as f:
state = {
'cycle': self.cycle,
'memory': m.memory,
'queue': q.queue,
'information_per_site': self.information_per_site_tables,
'entropy': self.entropy
}
pickle.dump(state, f)
metrics = {
'cycle': self.cycle,
'information_per_site': self.information_per_site_tables,
'entropy': self.entropy,
'number_of_organisms': len(q.queue.organisms),
'commands_distribution': self.get_commands_distribution(),
'sizes': self.get_organism_sizes()
}
metrics_file = 'snapshots/{}_cycle_{}.snapshot'.format(
c.config['simulation_name'].lower().replace(' ', '_'), self.cycle
) + '2'
with open(metrics_file, 'wb') as mf:
pickle.dump(metrics, mf)
def load_state(self):
return_to_full = False
if not self.is_minimal:
self.toogle_minimal()
return_to_full = True
try:
if (
c.config['snapshot_to_load'] == 'last'
or c.config['snapshot_to_load'] == 'new'
):
filename = max(glob.glob('snapshots/*'), key=os.path.getctime)
else:
filename = c.config['snapshot_to_load']
with open(filename, 'rb') as f:
state = pickle.load(f)
memory = state['memory']
q.queue = state['queue']
self.cycle = state['cycle']
except Exception as e:
print(e)
pass
if not self.is_minimal or return_to_full:
self.toogle_minimal(memory)
else:
m.memory = memory
# self.update_info_minimal()
def make_cycle(self):
m.memory.update(refresh=True)
if self.cycle % c.config['random_rate'] == 0 and not self.no_mutations:
m.memory.cycle()
# print('Mutation!')
if self.cycle % c.config['cycle_gap'] == 0:
if m.memory.is_time_to_kill():
q.queue.kill_organisms()
self.purges += 1
if not self.is_minimal:
q.queue.update_all()
self.cycle += 1
# self.update_info()
@staticmethod
def calculate_entropy(distribution, num_commands):
entropy = 0
for key in distribution:
p = distribution[key]
log_p = math.log(p, num_commands)
entropy -= p * log_p
return entropy
def get_commands_distribution(self) -> Dict:
organisms_commands = []
for organism in q.queue.organisms:
organism_commands = self.get_organism_commands(
organism.start,
organism.size
)
organisms_commands.append(organism_commands.flatten())
try:
organisms_commands = np.concatenate(organisms_commands)
commands, counts = np.unique(organisms_commands, return_counts=True)
command_counts = dict(zip(commands, counts))
return command_counts
except ValueError:
logger.info(f'{organisms_commands}')
raise ValueError
def get_organism_sizes(self):
sizes = []
for organism in q.queue.organisms:
sizes.append(organism.size)
return sizes
def get_entropy_score(self):
max_table_size = [max(q.queue.organisms, key=lambda x: x.size[0]).size[0],
max(q.queue.organisms, key=lambda x: x.size[1]).size[1]]
organisms_commands = []
# Getting command tables
for organism in q.queue.organisms:
organisms_commands.append(self.get_organism_commands(
organism.start,
organism.size
))
# Getting frequencies
values_distributions = [[0 for j in range(max_table_size[1])] for i in range(max_table_size[0])]
for i in range(max_table_size[0]):
for j in range(max_table_size[1]):
values = []
for commands in organisms_commands:
if i < commands.shape[0] and j < commands.shape[1]:
values.append(commands[i][j])
values = {x: values.count(x) / len(values) for x in values}
values_distributions[i][j] = values
per_site_entropy = np.zeros(max_table_size)
for i in range(max_table_size[0]):
for j in range(max_table_size[1]):
per_site_entropy[i, j] = self.calculate_entropy(values_distributions[i][j], len(c.instructions))
self.information_per_site_tables = np.array(per_site_entropy)
return np.sum(per_site_entropy)
# total_entropy = 0
# information_tables = []
# for organism_commands in organisms_commands:
# entropy = 0
# entropy_table = np.zeros(organism_commands.shape)
# max_entropy_per_site = math.log(len(c.instructions), len(c.instructions))
# information_per_site = max_entropy_per_site - entropy_table
# for i in range(organism_commands.shape[0]):
# for j in range(organism_commands.shape[1]):
# p = values_distributions[i][j][organism_commands[i][j]]
# entropy -= p * math.log(
# p, len(c.instructions)
# )
# entropy_table[i, j] = -p * math.log(
# p, len(c.instructions)
# )
#
# total_entropy += entropy
# information_tables.append(entropy_table)
# information_tables = np.array(information_tables)
# self.information_per_site_tables = information_tables
# return total_entropy
@staticmethod
def get_organism_commands(start, size):
return m.memory.memory_map[
start[0]: start[0] + size[0],
start[1]: start[1] + size[1],
]
def find_unqiue_genomes(self):
all_genomes = []
for organism in q.queue.organisms:
all_genomes.append(self.get_organism_commands(organism.start, organism.size))
unique_genomes = []
indices = set()
for i, genome in enumerate(all_genomes):
indentical_indices = set()
if i not in indices:
indentical_indices.add(i)
indices.add(i)
for j, another_genome in enumerate(all_genomes):
if i != j:
if another_genome.shape == genome.shape and (another_genome == genome).all():
indices.add(j)
indentical_indices.add(j)
unique_genomes.append((genome, len(indentical_indices)))
return unique_genomes
def input_stream(self):
for i in tqdm(range(100000)):
if len(q.queue.organisms) == 0:
break
q.queue.cycle_all()
self.make_cycle()
# print(len(q.queue.organisms))
if __name__ == '__main__':
print(c.instructions)
print(c.deltas)
f = FungeraHeadless(no_mutations=not c.config['use_mutations'])
cnt = 0
while True:
q.queue.cycle_all()
f.make_cycle()
if len(q.queue.organisms) == 0:
print('iteration ended')
f.timer.cancel()
break
f.entropy = f.get_entropy_score()
if cnt % 10:
print(f'Cycle: {f.cycle}')
print(f'Entropy: {f.entropy}')
print(f'Num_organims: {len(q.queue.organisms)}')
cnt += 1
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.main, name='main'),
path('index/', views.Index.as_view()),
path('about_company/', views.about_company, name='about_company'),
path('allspec/', views.AllSpec.as_view()),
path('contacts/', views.contacts, name='contacts'),
path('my_notes/', views.my_notes, name='my_notes'),
]
|
import torch
import torch.nn as nn
from torecsys.utils.decorator import jit_experimental, no_jit_experimental_by_namedtensor
from typing import Tuple
class PositionBiasAwareLearningFrameworkLayer(nn.Module):
def __init__(self,
input_size : int,
max_num_position : int):
# refer to parent class
super(PositionBiasAwareLearningFrameworkLayer, self).__init__()
# Initialize Embedding layer
self.position_bias = nn.Embedding(max_num_position, input_size)
def forward(self, position_embed_tensor: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
r"""Forward calculation of PositionBiasAwareLearningFrameworkLayer
Args:
position_embed_tensor ((T, T)), shape = ((B, E), (B, )), dtype = (torch.float, torch.long): Embedded feature tensors of session and Position of session in sequence.
Returns:
T, shape = (B, E), dtype = torch.float: Output of PositionBiasAwareLearningFrameworkLayer
"""
# Get position bias from embedding layer
# inputs: position_embed_tensor[1], shape = (B, )
# output: position_embed, shape = (B, E)
position_embed_bias = self.position_bias(position_embed_tensor[1])
# Add position bias to input
# inputs: position_embed_tensor[0], shape = (B, E)
# inputs: position_embed_bias, shape = (B, E)
# output: output, shape = (B, E)
output = position_embed_tensor[0] + position_embed_bias
return output
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiAdvertCommissionAdvertPurchaseModel(object):
def __init__(self):
self._channel_id = None
self._out_unique_id = None
self._security_code = None
self._tag = None
self._trigger_identifies = None
self._trigger_identify_type = None
self._trigger_strategy = None
self._user_identify = None
self._user_identify_type = None
@property
def channel_id(self):
return self._channel_id
@channel_id.setter
def channel_id(self, value):
self._channel_id = value
@property
def out_unique_id(self):
return self._out_unique_id
@out_unique_id.setter
def out_unique_id(self, value):
self._out_unique_id = value
@property
def security_code(self):
return self._security_code
@security_code.setter
def security_code(self, value):
self._security_code = value
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, value):
self._tag = value
@property
def trigger_identifies(self):
return self._trigger_identifies
@trigger_identifies.setter
def trigger_identifies(self, value):
if isinstance(value, list):
self._trigger_identifies = list()
for i in value:
self._trigger_identifies.append(i)
@property
def trigger_identify_type(self):
return self._trigger_identify_type
@trigger_identify_type.setter
def trigger_identify_type(self, value):
self._trigger_identify_type = value
@property
def trigger_strategy(self):
return self._trigger_strategy
@trigger_strategy.setter
def trigger_strategy(self, value):
self._trigger_strategy = value
@property
def user_identify(self):
return self._user_identify
@user_identify.setter
def user_identify(self, value):
self._user_identify = value
@property
def user_identify_type(self):
return self._user_identify_type
@user_identify_type.setter
def user_identify_type(self, value):
self._user_identify_type = value
def to_alipay_dict(self):
params = dict()
if self.channel_id:
if hasattr(self.channel_id, 'to_alipay_dict'):
params['channel_id'] = self.channel_id.to_alipay_dict()
else:
params['channel_id'] = self.channel_id
if self.out_unique_id:
if hasattr(self.out_unique_id, 'to_alipay_dict'):
params['out_unique_id'] = self.out_unique_id.to_alipay_dict()
else:
params['out_unique_id'] = self.out_unique_id
if self.security_code:
if hasattr(self.security_code, 'to_alipay_dict'):
params['security_code'] = self.security_code.to_alipay_dict()
else:
params['security_code'] = self.security_code
if self.tag:
if hasattr(self.tag, 'to_alipay_dict'):
params['tag'] = self.tag.to_alipay_dict()
else:
params['tag'] = self.tag
if self.trigger_identifies:
if isinstance(self.trigger_identifies, list):
for i in range(0, len(self.trigger_identifies)):
element = self.trigger_identifies[i]
if hasattr(element, 'to_alipay_dict'):
self.trigger_identifies[i] = element.to_alipay_dict()
if hasattr(self.trigger_identifies, 'to_alipay_dict'):
params['trigger_identifies'] = self.trigger_identifies.to_alipay_dict()
else:
params['trigger_identifies'] = self.trigger_identifies
if self.trigger_identify_type:
if hasattr(self.trigger_identify_type, 'to_alipay_dict'):
params['trigger_identify_type'] = self.trigger_identify_type.to_alipay_dict()
else:
params['trigger_identify_type'] = self.trigger_identify_type
if self.trigger_strategy:
if hasattr(self.trigger_strategy, 'to_alipay_dict'):
params['trigger_strategy'] = self.trigger_strategy.to_alipay_dict()
else:
params['trigger_strategy'] = self.trigger_strategy
if self.user_identify:
if hasattr(self.user_identify, 'to_alipay_dict'):
params['user_identify'] = self.user_identify.to_alipay_dict()
else:
params['user_identify'] = self.user_identify
if self.user_identify_type:
if hasattr(self.user_identify_type, 'to_alipay_dict'):
params['user_identify_type'] = self.user_identify_type.to_alipay_dict()
else:
params['user_identify_type'] = self.user_identify_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiAdvertCommissionAdvertPurchaseModel()
if 'channel_id' in d:
o.channel_id = d['channel_id']
if 'out_unique_id' in d:
o.out_unique_id = d['out_unique_id']
if 'security_code' in d:
o.security_code = d['security_code']
if 'tag' in d:
o.tag = d['tag']
if 'trigger_identifies' in d:
o.trigger_identifies = d['trigger_identifies']
if 'trigger_identify_type' in d:
o.trigger_identify_type = d['trigger_identify_type']
if 'trigger_strategy' in d:
o.trigger_strategy = d['trigger_strategy']
if 'user_identify' in d:
o.user_identify = d['user_identify']
if 'user_identify_type' in d:
o.user_identify_type = d['user_identify_type']
return o
|
from model import Model;
# 2014.06.19 01:18:18 EDT
class SuperGraph:
numNodes = 0
numEdges = 0
edges = []
def __init__(self, numStructs):
self.numNodes = numStructs
self.numEdges = 0
self.edges = []
def hasEdge(self, i, j):
return max(i, j) - 1 in self.edges[(min(i, j) - 1)]
def createSuperEdges(self, edgeList, membership):
superEdgeList = []
for edge in edgeList:
(i, j,) = edge
for x in membership[(i - 1)]:
for y in membership[(j - 1)]:
if x != y and y not in membership[(i - 1)] and x not in membership[(j - 1)]:
superEdgeList.append((min(x, y), max(x, y),1,0,0,0))
self.edges = sorted(superEdgeList)
def plot(self, outpath, gdfhandle, gmlhandle, overlapsTuples):
# in the gml file we need to print the overlaps between structures as attributes
# Output:
# edge [
# source S
# target T
# interaction 5
# normalized_interaction 0.3
# overlap 4
# normalized_overlap 0.7
# ]
print "Printing the graph out"
#print overlapsTuples
all_tuples = self.edges + overlapsTuples;
#print all_tuples
edge_tuples = sorted( all_tuples );
#print edge_tuples
# example: [(0, 1, 0, 0.05, 1, 0.1111111111111111), (0, 2, 0, 0.03333333333333333, 2, 0.18181818181818182), (1, 2, 0, 0.041666666666666664, 0, 0.0), (1, 2, 1, 0, 0, 0), (1, 2, 1, 0, 0, 0)]
fout = open(outpath, 'w')
(ip, jp, wp, nwp, ovp, novp) = (-1, -1, 0, 0.0, 0, 0.0)
(i, j) = (-1, -1)
time = 0;
print 'number of edges with duplicates = %.0f' % len(self.edges)
for e in edge_tuples:
(i, j, w, nw, ov, nov) = e
if i == ip and j == jp:
wp += w;
nwp += nw;
ovp += ov;
novp += nov;
else:
if time != 0 :
if wp > 0 :
fout.write('%.0f,' % (ip + 1) + '%.0f' % (jp + 1) + ',%.0f,' % wp + '%.3f\n' % (wp * nwp))
gdfhandle.write('%.0f,' % (ip + 1) + '%.0f' % (jp + 1) + ',%.0f,interaction\n' % wp)
gmlhandle.write('\tedge [\n\t source %.0f\n' % (ip + 1) + '\t target %.0f\n' % (jp + 1) + '\t weight %.0f\n' % wp);
gmlhandle.write('\t norm_interaction %.3f\n' % (wp * nwp) + '\t overlap %.0f\n' % ovp + '\t norm_overlap %.3f\n' % float(novp));
gmlhandle.write('\t]\n');
(ip, jp, wp, nwp, ovp, novp) = (i, j, w, nw, ov, nov);
time += 1;
if i == ip and j == jp:
if wp > 0 :
fout.write('%.0f' % (ip + 1) + ',%.0f' % (jp + 1) + ',%.0f,' % wp + '%.3f\n' % (wp * nwp))
gdfhandle.write('%.0f' % (ip + 1) + ',%.0f' % (jp + 1) + ',%.0f,interaction\n' % wp)
gmlhandle.write('\tedge [\n\t source %.0f\n' % (ip + 1) + '\t target %.0f\n' % (jp + 1) + '\t weight %.0f\n' % wp);
gmlhandle.write('\t weight %.3f\n' % (wp * nwp) + '\t overlap %.0f\n' % ovp + '\t norm_overlap %.3f\n' % float(novp));
gmlhandle.write('\t]\n');
fout.close()
print 'SuperGraph edges printed out'
def plotOld(self, outpath, gdfhandle):
fout = open(outpath, 'w')
(ip, jp,) = (-1, -1)
(i, j,) = (-1, -1)
w = 1
print 'number of edges with duplicates = %.0f' % len(self.edges)
for e in self.edges:
(i, j,) = e
if i == ip and j == jp:
w += 1
else:
if ip != -1:
fout.write('%.0f,' % (ip + 1) + '%.0f' % (jp + 1) + ',%.0f\n' % w)
gdfhandle.write('%.0f,' % (ip + 1) + '%.0f' % (jp + 1) + ',%.0f,interaction\n' % w)
w = 1
(ip, jp,) = (i, j)
if i == ip and j == jp:
fout.write('%.0f' % (ip + 1) + ',%.0f' % (jp + 1) + ',%.0f\n' % w)
gdfhandle.write('%.0f' % (ip + 1) + ',%.0f' % (jp + 1) + ',%.0f,interaction\n' % w)
fout.close()
print 'SuperGraph edges printed out'
|
list3 = [1, 2, 3, 2, 4, 10]
x = 0
for i in list3:
if x >= i:
pass
else:
if x <= i:
x = i
list3.append(i)
elif x == i:
list3.append(i)
print(x)
|
import matplotlib.pyplot as plt
import numpy as np
from .config_helper import CYCLES_LOG
def plot():
fig, axs = plt.subplots(2, figsize=(10, 10))
axs[0].set_title("Loss")
axs[1].set_title("Accuracy")
offset = 0
for i, cycle_log in enumerate(CYCLES_LOG):
losses, accuracies = cycle_log
x = range(offset, offset + len(losses))
axs[0].plot(x, losses)
axs[1].plot(x, accuracies)
offset += len(losses)
print(f"[INFO]\tCycle {i + 1}:\t\tLoss: {np.mean(losses)}\tAcc: {np.mean(accuracies)}")
plt.show(block=True)
|
import re
import os
def main():
os.chdir('/photoanalysistool0/sliding_window_approach')
filename = "extracted_info.txt"
file_ptr = open(filename, "r")
write_temp_ptr = open("ref_temp.txt","w")
write_ptr = open("ref.txt","w")
line_count = 0
line_list = []
regex = re.compile('[@_!#$%^&*()<>?/\|}{~:©«¥\`~:;.,]')
for line in file_ptr:
try:
line_count += 1
line_text = line.split("\n")
if(((re.match('^[a-zA-Z]+', line) is not None) and (regex.search(line) is None) and (line.isdigit())) or ((line[0].isdigit()) and (line[2] == "\'"))):
if(line[-2]=="\""):
line_list = [line_count-1] + [line_count] + line_list
#write_ptr.write(line_text[0]+'\n')
except IndexError:
continue
file_ptr.close()
file_ptr = open(filename, "r")
line_count = 0
for line in file_ptr:
line_count += 1
#line = line.replace("\'","")
#line = line.replace("\"","")
if(line_count in line_list):
write_temp_ptr.write(line)
line_list = []
file_ptr.close()
write_temp_ptr.close()
write_temp_ptr = open("ref_temp.txt","r")
for line in write_temp_ptr:
if(line in line_list):
continue
line_list += [line]
write_ptr.write(line)
print(line_list)
write_ptr.close()
write_temp_ptr.close()
os.remove("ref_temp.txt")
#print(line_list, len(line_list))
if __name__ == '__main__':
main()
|
#coding:utf-8
import web
import subprocess
import re
render = web.template.render('templates/')
urls = (
'','hello',
'/del','hello',
)
app = web.application(urls,globals())
look = subprocess.Popen("cat /proc/mdstat|grep '^md'|awk '{print $1}'",shell=True,stdout=subprocess.PIPE)
info = look.stdout.read()
list = re.findall('md\d+',info)
class hello:
def GET(self):
look = subprocess.Popen("cat /proc/mdstat|grep '^md'|awk '{print $1}'",shell=True,stdout=subprocess.PIPE)
info = look.stdout.read()
list = re.findall('md\d+',info)
return render.delete(list)
def POST(self):
f = web.input()
g = f.get('id')
h = 'mdadm -S /dev/' + g
shanchu = subprocess.Popen(h,shell=True,stdout=subprocess.PIPE)
return render.del_s(g)
|
string_value = 'hogehoge'
int_value = 123
int_list = [1, 2, 3, 'str']
dict_sample = {'foo':1, 'bar':2}
if type(string_value) == str:
print("string_value is str")
if type(int_value) == int:
print("int_value is int")
if type(int_list) == list:
print("int_list is list")
if type(int_list[0]) == int:
print("int_list[0] is int")
if type(int_list[3]) == str:
print("int_list[3] is str")
if type(dict_sample) == dict:
print("dict_sample is dict")
print(str(type(int_list)))
print(str(type(int_list[0])))
print(str(type(int_list[3])))
print(str(type(dict_sample)))
# ^^^の出力は以下の通り
# <class 'list'>
# <class 'int'>
# <class 'str'>
# <class 'dict'>
|
import Setup
#Save dictionary to json file.
def UCLAClean():
"""Cleans the scraped data from the UCLAScraper. returns the results in a dictionary containing the "columns"
'name','description' and 'preqName' and the "rows" with the course number labels. The function returns the
dictionary and saves a copy of it to a json file.
"""
import json
#Open the json file containing the raw scraped data.
jsonFile = Setup.UCLA
with open(jsonFile, 'r') as inFile:
myDictionary = json.load(inFile)
print("Load dictionary as Json")
#The UCLA course data usually has a sentence at the beginning of the course description describing how long
#the course is and a sentence at the end describing grading. This is irrelevant to the true course description
#and must be removed.
stopPhrases = ["hour","grading"]
outDict = {}
#Common words and characters to remove.
removals = [".",";",":","/",' a ',' of ',' the '," or "," at "," to "," will "," are "," be ","(",")"]
for i in range(len(myDictionary['description'])):
#convert course descriptions to lowercase
item = myDictionary['description'][i]
item = item.lower()
#Split the sentences
sentences = item.split(".")
preqs = ''
cleanDes = ''
#Go through each sentence. Only consider sentences that do not contain "stopPhrases"
for sentence in sentences:
if stopPhrases[0] not in sentence and stopPhrases[1] not in sentence:
#Append the sentences to the proper string.
cleanDes += sentence
preqs += sentence
#Remove unwanted characters/words from description.
for removal in removals:
cleanDes = cleanDes.replace(removal,'')
#Convert the name to lower case
tag = myDictionary['name'][i].lower()
#The course names are actually the course number and course name separated by a period. Split these by
#the period.
tags = tag.split(".")
number = tags[0]
#Start with a blank name and build up the string.
name = ''
#If the course name has periods in it, the name will split. Append everything from here onwards to a single
#string.
for i in range(len(tags)-1):
name+=tags[i+1]
#If a space is the first or last character, remove the space.
if name[0] == ' ':
name = name[1:]
if name[-1] == ' ':
name = name[:-1]
if len(cleanDes) >1:
if cleanDes[0] == ' ':
cleanDes = cleanDes[1:]
if cleanDes[-1] == ' ':
cleanDes = cleanDes[:-1]
#Save the dictionary portion.
outDict[number] = {'name':name,'description':cleanDes,'preqName':preqs}
#Save dictionary to json file.
import json
jsonFile = Setup.UCLAClean
with open(jsonFile, 'w') as outfile:
json.dump(outDict, outfile)
print("Save dictionary as Json")
return outDict
#UCLAClean()
|
from time import time
n_max = 1000000
integers = list(range(n_max))
integers[1]=0
i = 2
while i*i <= n_max :
if integers[i]!=0 :
for j in range(2,(n_max-1)//i+1) :
integers[j*i] = 0
i+=1
primes = [u for u in integers if u!=0]
t = time()
def Rmod(k,n) :
m = 0
m10 = 1
for i in range(k) :
m+=m10
m10*=10
m10%=n
if m10==1:
return (k//(i+1)*m+Rmod(k%(i+1),n))%n
return m%n
N=10**9
s=0
count = 0
for p in primes[1:2]+primes[3:] :
if Rmod(N,p)==0 :
s+=p
count+=1
print(count,":",p)
if count==40 :
break
print(s)
print(time()-t)
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from oslo_log import log as logging
from gceapi.api import base_api
from gceapi.api import clients
from gceapi.api import disk_api
from gceapi.api import operation_api
from gceapi.api import operation_util
from gceapi.api import utils
from gceapi import exception
from gceapi.i18n import _
LOG = logging.getLogger(__name__)
GB = 1024 ** 3
class API(base_api.API):
"""GCE Attached disk API."""
KIND = "attached_disk"
PERSISTENT_ATTRIBUTES = ["id", "instance_name", "volume_id", "name",
"auto_delete"]
def __init__(self, *args, **kwargs):
super(API, self).__init__(*args, **kwargs)
operation_api.API().register_get_progress_method(
"attached_disk-delete",
self._get_delete_item_progress)
def _get_type(self):
return self.KIND
def _get_persistent_attributes(self):
return self.PERSISTENT_ATTRIBUTES
def get_item(self, context, instance_name, name):
items = self._get_db_items(context)
items = [i for i in items
if i["instance_name"] == instance_name and i["name"] == name]
if len(items) != 1:
raise exception.NotFound
return items[0]
def get_items(self, context, instance_name):
items = self._get_db_items(context)
for item in items:
item.setdefault("auto_delete", False)
return [i for i in items if i["instance_name"] == instance_name]
def add_item(self, context, instance_name, params, source, name,
auto_delete, scope):
# NOTE(apavlov): name is a 'device_name' here
if not name:
msg = _("There is no name to assign.")
raise exception.InvalidRequest(msg)
nova_client = clients.nova(context)
instances = nova_client.servers.list(
search_opts={"name": instance_name})
if not instances or len(instances) != 1:
raise exception.NotFound
instance = instances[0]
devices = list()
volumes_client = nova_client.volumes
for server_volume in volumes_client.get_server_volumes(instance.id):
devices.append(server_volume.device)
device_name = None
for letter in string.ascii_lowercase[1:]:
device_name = "vd" + letter
for device in devices:
if device_name in device:
break
else:
break
else:
raise exception.OverQuota
context.operation_data["device_name"] = device_name
if source:
volume_name = utils._extract_name_from_url(source)
if not volume_name:
msg = _("There is no volume to assign.")
raise exception.NotFound(msg)
volume = disk_api.API().get_item(context, volume_name, scope)
context.operation_data["volume_id"] = volume["id"]
elif params:
params.setdefault("diskName", instance_name)
context.operation_data["params"] = params
context.operation_data["scope"] = scope
else:
msg = _('Disk config must contain either "source" or '
'"initializeParams".')
raise exception.InvalidRequest(msg)
context.operation_data["instance_id"] = instance.id
context.operation_data["register_args"] = [instance_name, name,
auto_delete]
operation_util.start_operation(
context, base_api.API._get_complex_operation_progress)
operation_util.continue_operation(
context, lambda: self._attach_volume(context), timeout=0)
def _attach_volume(self, context):
params = context.operation_data.get("params")
if params:
scope = context.operation_data["scope"]
context.operation_data.pop("params")
body = {"sizeGb": params.get("diskSizeGb"),
"sourceImage": params["sourceImage"]}
volume = disk_api.API().add_item(context, params.get("diskName"),
body, scope=scope)
context.operation_data["disk"] = volume
return None
disk = context.operation_data.get("disk")
if disk:
volume_id = disk["id"]
item_progress = disk_api.API()._get_add_item_progress(context,
volume_id)
if not operation_util.is_final_progress(item_progress):
return None
context.operation_data.pop("disk")
context.operation_data["volume_id"] = volume_id
instance_id = context.operation_data["instance_id"]
device_name = context.operation_data["device_name"]
volume_id = context.operation_data["volume_id"]
volumes_client = clients.nova(context).volumes
volumes_client.create_server_volume(
instance_id, volume_id, "/dev/" + device_name)
args = context.operation_data["register_args"]
self.register_item(context, args[0], volume_id, args[1], args[2])
return operation_util.get_final_progress()
def register_item(self, context, instance_name, volume_id, name,
auto_delete):
if not name:
msg = _("There is no name to assign.")
raise exception.InvalidRequest(msg)
if not volume_id:
msg = _("There is no volume_id to assign.")
raise exception.InvalidRequest(msg)
new_item = {
"id": instance_name + "-" + volume_id,
"instance_name": instance_name,
"volume_id": volume_id,
"name": name,
"auto_delete": auto_delete
}
new_item = self._add_db_item(context, new_item)
return new_item
def delete_item(self, context, instance_name, name):
item = self.get_item(context, instance_name, name)
volume_id = item["volume_id"]
nova_client = clients.nova(context)
instances = nova_client.servers.list(
search_opts={"name": instance_name})
if not instances or len(instances) != 1:
raise exception.NotFound
instance = instances[0]
operation_util.start_operation(context,
self._get_delete_item_progress,
item["id"])
nova_client.volumes.delete_server_volume(instance.id, volume_id)
self._delete_db_item(context, item)
def set_disk_auto_delete(self, context, instance_name, name, auto_delete):
item = self.get_item(context, instance_name, name)
item["auto_delete"] = auto_delete
self._update_db_item(context, item)
def unregister_item(self, context, instance_name, name):
item = self.get_item(context, instance_name, name)
self._delete_db_item(context, item)
def _get_add_item_progress(self, context, dummy_id):
return operation_util.get_final_progress()
def _get_delete_item_progress(self, context, dummy_id):
return operation_util.get_final_progress()
|
from Grafica import Grafica
import random
def GNP(n,p):
G = Grafica(n)
for u in range(n):
for v in range(u+1, n):
r = random.random()
if r < p:
G.conectar(u,v)
return G
def generar_parejas(n):
parejas = []
for u in range(n):
for v in range(u+1, n):
parejas.append([u, v])
return parejas
def generar_grafica(n, A):
G = Grafica(n)
for u,v in A:
G.conectar(u,v)
return G
def GNM(n,m):
A = generar_parejas(n)
random.shuffle(A)
return generar_grafica(n, A[:m])
if __name__ == "__main__":
G=GNM(20,40)
for a in G.aristas():
print(a)
|
import matplotlib.pyplot as plt
import numpy as np
freq = 2e6
t_symbol = 1e-6
n_symbols = 2
sampling_rate = 1e8
n_samples = sampling_rate * t_symbol * n_symbols
dt = 1/sampling_rate
t = np.linspace(0, dt * (n_samples - 1), n_samples)
y = np.sin(2 * np.pi * freq * t )
plt.plot(t, y)
plt.show()
|
from Domain.Repository import IRoiSeriesRepository
from Domain.Model import ZRoiSeries
from Infrastructure.Repository import connection_to_db
from typing import List
class ImpSqliteRoiSeriesRepository(IRoiSeriesRepository):
def get_all_roi_series(self) -> List[ZRoiSeries]:
pass
def get_roi_series_from_pk(self, z_pk) -> ZRoiSeries:
pass
def get_roi_series_from_series_pk(self, z_series_pk) -> List[ZRoiSeries]:
return self._get_roi_series_from_series_pk_with_connection(z_series_pk)
@connection_to_db
def _get_roi_series_from_series_pk_with_connection(self, z_series_pk, **kwargs) -> List[ZRoiSeries]:
roi_series_list = []
cursor = kwargs['cursor']
query = '''SELECT Z_PK, Z_SERIES, Z_NAME, Z_COLOR
FROM Z_ROI_SERIES
WHERE Z_SERIES LIKE ?'''
cursor.execute(query, (str(z_series_pk),))
records = cursor.fetchall()
for row in records:
roi_series_pk = int(row[0])
z_series_fk = int(row[1])
z_name = row[2]
z_color = row[3]
new_row_series = ZRoiSeries(roi_series_pk, z_name, z_color)
roi_series_list.append(new_row_series)
return roi_series_list
def add_new_roi_series_in_series(self, z_series_pk, z_name, z_color) -> ZRoiSeries:
return self._add_new_roi_series_in_series_with_connection(z_series_pk, z_name, z_color)
@connection_to_db
def _add_new_roi_series_in_series_with_connection(self, z_series_pk, z_name, z_color, **kwargs) -> ZRoiSeries:
cursor = kwargs['cursor']
sql = ''' INSERT INTO Z_ROI_SERIES (Z_SERIES, Z_NAME, Z_COLOR)
VALUES(?,?,?) '''
cursor.execute(sql, (str(z_series_pk), str(z_name), str(z_color),))
roi_series_pk = cursor.lastrowid
new_roi_series = ZRoiSeries(roi_series_pk, z_name, z_color)
return new_roi_series
|
order_list = ['Wings', 'Cookies', 'Spring Rolls', 'Salmon', 'Steak', 'Meat Tornado',
'A Literal Garden', 'Ice Cream', 'Cake', 'Pie', 'Coffee', 'Tea', 'Unicorn Tears']
print("""
**************************************
** Welcome to the Snakes Cafe! **
** Please see our menu below. **
**
** To quit at any time, type "quit" **
**************************************
Appetizers
----------
Wings
Cookies
Spring Rolls
Entrees
-------
Salmon
Steak
Meat Tornado
A Literal Garden
Desserts
--------
Ice Cream
Cake
Pie
Drinks
------
Coffee
Tea
Unicorn Tears
""")
ordered_item = []
i = 1
while i >= 1:
order = input("""
***********************************
** What would you like to order? **
***********************************
> """)
if order in ordered_item:
i = i+1
else:
ordered_item.append(order)
i = 1
if (order.capitalize() in order_list):
print(f'** {i} order of {order} have been added to your meal **')
elif(order == 'quit'):
break
else:
print(f'sorry {order} is not in the menu')
|
# Generated by Django 3.0.4 on 2020-06-06 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestionAsociados', '0018_auto_20200606_1245'),
]
operations = [
migrations.AlterField(
model_name='educacion',
name='fechafin',
field=models.DateField(blank=True, null=True),
),
]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: file
Description :
Author : ybw
date: 2020/11/16
-------------------------------------------------
Change Activity:
2020/11/16:
-------------------------------------------------
"""
import os
def mkdir(path):
"""
保证只创建一次
:param path:
:return:
"""
if not os.path.exists(path):
os.makedirs(path)
def getFile(dir_path):
dict_file = {}
List_Files = []
for path, dirs, files in os.walk(dir_path):
for file in files:
file_path = os.path.join(path, file)
size = os.path.getsize(file_path)
dict_file[file_path] = size
List_Files.append(file_path)
return List_Files, dict_file
|
import newt,csv,unicodedata,os
import networkx as nx
import newtx as nwx
api=newt.getTwitterAPI()
users=['lfeatherstone','jamesgraymp','davidevennett','mike_fabricant']
projpath='test'
sampleSize=997
def checkDir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def outputter(fn,twd):
f=open(fn,'wb+')
writer=csv.writer(f,quoting=csv.QUOTE_ALL)
k=[ 'source','screen_name','name','description','location','time_zone','created_at','contributors_enabled','url','listed_count','friends_count','followers_count','statuses_count','favourites_count','id_str','id','verified','utc_offset','profile_image_url','protected']
writer.writerow(k)
for uu in twd:
u=twd[uu]
ux=[user]
for x in [u.screen_name,u.name,u.description,u.location,u.time_zone]:
if x != None:
ux.append(unicodedata.normalize('NFKD', unicode(x)).encode('ascii','ignore'))
else: ux.append('')
for x in [u.created_at,u.contributors_enabled,u.url,u.listed_count,u.friends_count,u.followers_count,u.statuses_count,u.favourites_count,u.id_str,u.id,u.verified,u.utc_offset,u.profile_image_url,u.protected]:
ux.append(x)
try:
writer.writerow(ux)
except: pass
f.close()
#get friends of followers of user
def getFriendsProjection(tw={},maxf=5000):
newt.gephiOutputFileByName(api,projname+'/friends_innerfriends.gdf', tw,maxf=maxf)
newt.gephiOutputFileByName(api,projname+'/friends__extrafriends.gdf', tw,'extrafriends',maxf=maxf)
newt.gephiOutputFileByName(api,projname+'/friends__outerfriends.gdf', tw,'outerfriends',maxf=maxf)
def labelGraph(LG,idlist):
idlabels=newt.twDetailsFromIds(api,idlist)
outputter(projname+'/followersCommonFriends.csv',idlabels)
for id in idlabels:
if str(id) in LG.node:
LG.node[str(id)]['label']=idlabels[id].screen_name
LG.node[str(id)]['fo_count']=idlabels[id].followers_count
LG.node[str(id)]['fr_count']=idlabels[id].friends_count
LG.node[str(id)]['updates']=idlabels[id].statuses_count
LG.node[str(id)]['indegree']=LG.in_degree(str(id))
if idlabels[id].followers_count>0:
LG.node[str(id)]['fo_prop']=1.0*LG.in_degree(str(id))/idlabels[id].followers_count
else:
LG.node[str(id)]['fo_prop']=0.0
return LG
def filterNet(DG,mindegree=None,indegree=100,outdegree=50,outdegreemax=9999999,indegreemax=999999):
print 'In filterNet'
filter=[]
for n in DG:
if outdegreemax==None or DG.out_degree(n)<=outdegreemax:
if mindegree!=None:
if DG.degree(n)>=mindegree:
filter.append(n)
else:
if indegree!=None:
if DG.in_degree(n)>=indegree:
filter.append(n)
if outdegree!=None:
if DG.out_degree(n)>=outdegree:
filter.append(n)
#the filter represents the intersect of the *degreesets
#indegree and outdegree values are ignored if mindegree is set
filter=set(filter)
H=DG.subgraph(filter)
#Superstitiously, perhaps, make sure we only grab nodes that project edges...
filter= [n for n in H if H.degree(n)>0]
L=H.subgraph(filter)
print "Filter set:",filter
print L.order(),L.size()
L=labelGraph(L,filter)
nx.write_graphml(L, projname+"/followersCommonFriends.graphml")
nx.write_edgelist(L, projname+"/followersCommonFriends.txt",data=False)
def handleUser(user):
#get details of followers of user
twd=newt.getTwitterFollowersDetailsByIDs(api,user,sampleSize)
twc={}
twDetails={}
for t in twd:
if t in twc:
twc[t]=twc[t]+1
else:
twc[t]=1
twDetails[t]=twd[t]
outputter(projname+'/followers.csv',twd)
getFriendsProjection(twDetails)
fn=projname+'/friends__outerfriends.gdf'
DG=nwx.directedNetworkFromGDF(fn)
print DG.order(),DG.size()
fn=projname+'/big_netstats.csv'
f=open(fn,'wb+')
writer=csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(['user','indegree','outdegree'])
for n in DG:
writer.writerow( [n, DG.in_degree(n), DG.out_degree(n) ] )
f.close()
fn=projname+'/followers_netstats.csv'
f=open(fn,'wb+')
writer=csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(['user','indegree','outdegree','fullindegree','fulloutdegree'])
filter= [n for n in DG if DG.out_degree(n)>0]
L=DG.subgraph(filter)
for n in L:
writer.writerow( [n, L.in_degree(n), L.out_degree(n),DG.in_degree(n), DG.out_degree(n) ] )
f.close()
fn=projname+'/followersfriends_netstats.csv'
f=open(fn,'wb+')
writer=csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(['user','indegree','outdegree','fullindegree','fulloutdegree'])
filter= [n for n in DG if DG.in_degree(n)>0]
L=DG.subgraph(filter)
for n in L:
writer.writerow( [n,L.in_degree(n), L.out_degree(n),DG.in_degree(n), DG.out_degree(n)] )
f.close()
fn=projname+'/followersfriends10_netstats.csv'
f=open(fn,'wb+')
writer=csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(['user','indegree','outdegree','fullindegree','fulloutdegree'])
filter= [n for n in DG if DG.in_degree(n)>10]
L=DG.subgraph(filter)
for n in L:
writer.writerow( [n, L.in_degree(n), L.out_degree(n),DG.in_degree(n), DG.out_degree(n)] )
f.close()
filterNet(DG)
for user in users:
projname=projpath+'/'+user
checkDir(projname)
handleUser(user)
#add count of inner friends to followers details
#find common outerfriends
#get details of common outerfriends
#find indegree of common outerfriends
|
import os
import random
import tensorflow as tf
from datasets.tf_datasets import create_ucf101_data_feed_for_k_sample_per_action_iterative_dataset, \
create_data_feed_for_train, create_diva_data_feed_for_k_sample_per_action_iterative_dataset_unique_class_each_batch
from models import ModelAgnosticMetaLearning, C3DNetwork
import settings
from experiment_settings import RANDOM_SEED, DATASET, N, K, BATCH_SIZE, NUM_GPUS, NUM_ITERATIONS, META_LEARNING_RATE, \
LEARNING_RATE, META_TRAIN, test_actions, diva_test_actions, STARTING_POINT_MODEL_ADDRESS, REPORT_AFTER_STEP, \
SAVE_AFTER_STEP, META_TEST_STARTING_MODEL, NUM_META_TEST_ITERATIONS, SAVE_AFTER_META_TEST_STEP
def initialize():
if RANDOM_SEED != -1:
random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
model_dir = os.path.join(
DATASET,
'meta-train',
'{}-way-classifier'.format(N),
'{}-shot'.format(K),
'batch-size-{}'.format(BATCH_SIZE),
'num-gpus-{}'.format(NUM_GPUS),
'random-seed-{}'.format(RANDOM_SEED),
'num-iterations-{}'.format(NUM_ITERATIONS),
'meta-learning-rate-{}'.format(META_LEARNING_RATE),
'learning-rate-{}'.format(LEARNING_RATE),
)
if META_TRAIN:
log_dir = os.path.join(settings.BASE_LOG_ADDRESS, model_dir)
saving_path = os.path.join(settings.SAVED_MODELS_ADDRESS, model_dir)
else:
log_dir = os.path.join(settings.BASE_LOG_ADDRESS, 'meta-test')
saving_path = os.path.join(settings.SAVED_MODELS_ADDRESS, 'meta-test', 'model')
if DATASET == 'ucf-101':
base_address = settings.UCF101_TF_RECORDS_ADDRESS
# '/home/siavash/programming/FewShotLearning/ucf101_tfrecords/'
elif DATASET == 'diva':
base_address = settings.DIVA_TRAIN_TF_RECORDS_ADDRESS
else:
base_address = settings.KINETICS_TF_RECORDS_ADDRESS
if META_TRAIN:
input_data_ph, input_labels_ph, val_data_ph, val_labels_ph, iterator = create_data_feed_for_train(
base_address=base_address,
test_actions=test_actions,
batch_size=BATCH_SIZE * NUM_GPUS,
k=K,
n=N,
random_labels=False
)
else:
if DATASET == 'ucf-101' or DATASET == 'kinetics':
print(test_actions[:BATCH_SIZE * NUM_GPUS])
input_data_ph, input_labels_ph, iterator, table = \
create_ucf101_data_feed_for_k_sample_per_action_iterative_dataset(
dataset_address=base_address,
k=K,
batch_size=BATCH_SIZE * NUM_GPUS,
actions_include=test_actions[:BATCH_SIZE * NUM_GPUS],
)
val_data_ph = input_data_ph
val_labels_ph = input_labels_ph
else:
# input_data_ph, input_labels_ph, iterator = create_diva_data_feed_for_k_sample_per_action_iterative_dataset(
# dataset_address=base_address,
# k=K,
# batch_size=BATCH_SIZE * NUM_GPUS,
# )
input_data_ph, input_labels_ph, iterator, table = \
create_diva_data_feed_for_k_sample_per_action_iterative_dataset_unique_class_each_batch(
dataset_address=base_address,
actions_include=None
)
val_data_ph = input_data_ph
val_labels_ph = input_labels_ph
maml = ModelAgnosticMetaLearning(
C3DNetwork,
input_data_ph,
input_labels_ph,
val_data_ph,
val_labels_ph,
log_dir=log_dir,
saving_path=saving_path,
num_gpu_devices=NUM_GPUS,
meta_learn_rate=META_LEARNING_RATE,
learning_rate=LEARNING_RATE,
log_device_placement=False,
num_classes=N
)
maml.sess.run(tf.tables_initializer())
maml.sess.run(iterator.initializer)
if not META_TRAIN:
print(maml.sess.run(table.export()))
return maml
if __name__ == '__main__':
maml = initialize()
if META_TRAIN:
maml.load_model(path=STARTING_POINT_MODEL_ADDRESS, load_last_layer=False)
maml.meta_train(
num_iterations=NUM_ITERATIONS + 1,
report_after_x_step=REPORT_AFTER_STEP,
save_after_x_step=SAVE_AFTER_STEP
)
else:
maml.load_model(META_TEST_STARTING_MODEL)
maml.meta_test(NUM_META_TEST_ITERATIONS, save_model_per_x_iterations=SAVE_AFTER_META_TEST_STEP)
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver=webdriver.Firefox()
driver.get("http://youtube.com")
search=input("What to search in youtube: ")
try:
ytbSearch=driver.find_element_by_id("masthead-search-term")
ytbSearch.send_keys(str(search))
vidSearch=driver.find_element_by_id("search-btn")
vidSearch.click()
time.sleep(7)
finally:
pass
|
#A9_Q1
class Animal:
def animal_attribute(self):
print("IT has 4 legs")
class Tiger(Animal):
def properties(self):
print("IT has a tail")
y=Tiger()
y.properties()
y.animal_attribute()
#A9_Q2
#OUTPUT-A,B
#A9_Q3
class Cop:
def __init__(self, name, age, workexp, desg):
self.name = name
self.age = age
self.workexp = workexp
self.desg = desg
@classmethod
def add(cls):
cls.name = input("ENTER THE NAME")
cls.age = int(input("ENTER THE AGE"))
cls.workexp = int(input("ENTER THE WORK EXPERIENCE"))
cls.desg = input("ENTER THE DESIGNATION")
return Cop(cls.name, cls.age, cls.workexp, cls.desg)
@classmethod
def display(cls):
print("")
print("DETAILS ARE-->")
print("NAME-> " + cls.name)
print("AGE-> %d" % cls.age)
print("WORK EXPERIENCE--> %d" % cls.workexp)
print("DESIGNATION-->" + cls.desg)
def update(self):
print("UPDATE DETAILS-->")
self.add()
self.display()
class Mission(Cop):
def __init__(self, mission_details):
self.md =mission_details
def add_mission_details(self):
self.md=input("ENTER MISSION DETAILS--> ")
print("")
self.display()
print("MISSION DETAILS-->"+self.md)
x = Mission("")
obj1 = Cop("",0,0,"")
obj1.add()
obj1.display()
ch = input("DO YOU WANT TO UPDATE THE DETAILS?(Y/N)")
if ch == 'y' or ch == 'Y':
obj1.update()
ch = input("DO YOU WANT TO RUN MISSION CLASS?(Y/N)")
if ch == 'y' or ch == 'Y':
x.add_mission_details()
#A9_Q4
class Shape:
def __init(self,len,bre):
self.len=len
self.bre=bre
class Rectangle(Shape):
def area(self,len,bre):
return len*bre
class Square(Shape):
def area(self,len):
return len*len
obj=Shape()
obj1=Rectangle()
obj2=Square()
len=int(input("enter length"))
bre=int(input("enter breadth"))
print("area of a rectangle",(obj1.area(len,bre)))
print("area of square",(obj2.area(len)))
|
import random
import matplotlib.pyplot as plt
import numpy as np
import copy
gt = np.array([1/6, 2/6, 3/6, 4/6, 5/6], dtype = np.float)
def get_sequence():
states = []
rewards = []
currentState = 2
while currentState != -1 and currentState != 5:
states.append(currentState)
currentState = currentState + random.choice([1, -1])
if currentState == 5:
reward = 1
else:
reward = 0
rewards.append(reward)
return states, rewards
def td0_fig1(num_episodes, alpha, state_value):
for _ in range(num_episodes):
states, rewards = get_sequence()
for i , st in enumerate(states):
next_state = states[i+1] if i < len(states) - 1 else None
reward = rewards[i]
if next_state:
st_ = state_value[next_state]
else:
st_ = 0
state_value[st] = state_value[st] + alpha*(reward + st_ - state_value[st])
return state_value
def temporal_difference(num_episodes, alpha, num_runs):
mse_vals = [0 for _ in range(num_episodes)]
for _ in range(num_runs):
value = [0.5 for _ in range(5)]
for ep in range(num_episodes):
states, rewards = get_sequence()
for idx, s in enumerate(states):
r = rewards[idx]
st_ = 0
if(idx < len(states) - 1):
ns = states[idx+1]
st_ = value[ns]
value[s] += alpha*(r + st_ - value[s])
mse_vals[ep] += np.mean((np.array(value) - gt)**2)**0.5
return np.array(mse_vals)/num_runs
def monte_carlo(num_episodes, alpha, num_runs):
mse_vals = [0 for _ in range(num_episodes)]
for _ in range(num_runs):
state_value = [0.5 for _ in range(5)]
for ep in range(num_episodes):
states, rewards = get_sequence()
G = 0
for i in range(len(states)-1, -1, -1):
st_ = states[i]
rw_ = rewards[i]
G += rw_
state_value[st_] += alpha*(G - state_value[st_])
mse_vals[ep] += np.mean((np.array(state_value) - gt)**2)**0.5
return np.array(mse_vals)/num_runs
#Figure 1
state_val = np.array([0.5 for _ in range(5)])
plt.plot(['A', 'B', 'C', 'D', 'E'], state_val, label = "Initial")
state_val = td0_fig1(1, 0.1, state_val)
plt.plot(['A', 'B', 'C', 'D', 'E'], state_val, label = "1 Episode")
state_val = td0_fig1(9, 0.1, state_val)
plt.plot(['A', 'B', 'C', 'D', 'E'], state_val, label = "10 Episodes")
state_val = td0_fig1(90, 0.1, state_val)
plt.plot(['A', 'B', 'C', 'D', 'E'], state_val, label = "100 Episodes")
plt.plot(['A', 'B', 'C', 'D', 'E'], gt, label = "True Value")
plt.legend()
plt.xlabel("State")
plt.ylabel("Value")
plt.show()
#Figure 2
mse_td_005 = temporal_difference(100, 0.05 ,100)
mse_td_010 = temporal_difference(100, 0.1 ,100)
mse_td_015 = temporal_difference(100, 0.15 ,100)
mse_mc_001 = monte_carlo(100, 0.01 ,100)
mse_mc_002 = monte_carlo(100, 0.02 ,100)
mse_mc_003 = monte_carlo(100, 0.03 ,100)
mse_mc_004 = monte_carlo(100, 0.04 ,100)
plt.plot(list(range(100)), mse_td_005, label = "TD(alpha = 0.05)")
plt.plot(list(range(100)), mse_td_010, label = "TD(alpha = 0.1)")
plt.plot(list(range(100)), mse_td_015, label = "TD(alpha = 0.15)")
plt.plot(list(range(100)), mse_mc_001, label = "MC(alpha = 0.01)")
plt.plot(list(range(100)), mse_mc_002, label = "MC(alpha = 0.02)")
plt.plot(list(range(100)), mse_mc_003, label = "MC(alpha = 0.03)")
plt.plot(list(range(100)), mse_mc_004, label = "MC(alpha = 0.04)")
plt.legend()
plt.ylabel("Emprirical RMS Error over Runs")
plt.xlabel("Walks/Episodes")
plt.show() |
import numpy as np
import cv2 as cv
import glob
if __name__ == "__main__":
image_file_names = glob.glob("/home/vignesh/Documents/COS700/Code/models/research/deeplab/datasets/potsdam/exp/train_on_trainval_set_mobilenetv2/vis/segmentation_results/*prediction.png")
image_count = 0
image_file_names.sort()
new_img = np.zeros((5888, 5888, 3))
y = 0
x = 0
while image_count < 484:
file_name = image_file_names[image_count]
img = cv.imread(file_name)
new_img[y: y + 256, x: x + 256] = img
if x == 5632:
x = 0
y = y + 256
else:
x = x + 256
image_count = image_count + 1
cv.imwrite("image.png", new_img)
|
# -*- coding:utf-8 -*-
import pymysql.cursors
from config import settings
from base.base_log import BaseLogger
logger = BaseLogger(__name__).get_logger()
def execute(sql, params=None, db=settings.TEST_DEFAULT_DB, is_fetchone=True):
# Connect to the database
connection = pymysql.connect(host=settings.TEST_MYSQL_CONFIG['host'],
port=settings.TEST_MYSQL_CONFIG['port'],
user=settings.TEST_MYSQL_CONFIG['user'],
password=settings.TEST_MYSQL_CONFIG['password'],
db=db,
autocommit=True,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
logger.info('Executing SQL:' + str(sql) + ',params:' + str(params))
try:
with connection.cursor() as cursor:
cursor.execute(sql, params)
if is_fetchone:
return cursor.fetchone()
else:
return cursor.fetchall()
except:
connection.rollback()
finally:
connection.close()
def local_execute(sql, params=None, db='auto_test', is_fetchone=True):
# 链接测试报告库
connection = pymysql.connect(host=settings.TEST_MYSQL_CONFIG['host'],
port=settings.TEST_MYSQL_CONFIG['port'],
user=settings.TEST_MYSQL_CONFIG['user'],
password=settings.TEST_MYSQL_CONFIG['password'],
db=db,
autocommit=True,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
logger.info('Executing SQL:' + str(sql) + ',params:' + str(params))
try:
with connection.cursor() as cursor:
cursor.execute(sql, params)
if is_fetchone:
return cursor.fetchone()
else:
return cursor.fetchall()
except:
connection.rollback()
finally:
connection.close() |
"""
This type stub file was generated by pyright.
"""
from sqlite3 import dbapi2 as Database
from typing import Any, Callable
from django.db.backends.base.base import BaseDatabaseWrapper
def decoder(conv_func: Callable) -> Callable:
...
class DatabaseWrapper(BaseDatabaseWrapper):
...
FORMAT_QMARK_REGEX: Any
class SQLiteCursorWrapper(Database.Cursor):
...
def check_sqlite_version() -> None:
...
|
from distutils.core import setup
setup(
name='TasksToPipeline',
version='',
packages=['oauth2', 'cssutils', 'cssutils.css', 'cssutils.tests', 'cssutils.tests.test_encutils',
'cssutils.scripts', 'cssutils.stylesheets', 'httplib2', 'requests', 'requests.packages',
'requests.packages.chardet', 'requests.packages.urllib3', 'requests.packages.urllib3.packages',
'requests.packages.urllib3.packages.ssl_match_hostname', 'requests.packages.chardet2',
'requests.packages.oauthlib', 'requests.packages.oauthlib.oauth1',
'requests.packages.oauthlib.oauth1.rfc5849', 'requests.packages.oauthlib.oauth2',
'requests.packages.oauthlib.oauth2.draft25', 'apiclient', 'premailer', 'uritemplate', 'oauth2client',
'taskstopipeline', 'taskstopipeline.controllers'],
url='http://www.taskstopipeline.com',
license='',
author='Alain Kramar',
author_email='alain.kramar@gmail.com',
description=''
)
|
n = raw_input()
nL = len(n)
cnt = 0
while len(n) != 1:
s = 0
for i in n:
s += int(i)
n = str(s)
cnt += 1
print cnt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.