text stringlengths 38 1.54M |
|---|
from office365.runtime.client_value import ClientValue
class Location(ClientValue):
"""Represents location information of an event."""
def __init__(self, displayName=None):
"""
:param str displayName: The name associated with the location.
"""
super(Location, self).__init__()
self.displayName = displayName
|
from bigml.api import BigML
api = BigML()
source1 = api.create_source("iris.csv")
api.ok(source1)
dataset1 = api.create_dataset(source1, \
{'name': u'iris'})
api.ok(dataset1)
anomaly1 = api.create_anomaly(dataset1, \
{'anomaly_seed': u'2c249dda00fbf54ab4cdd850532a584f286af5b6',
'name': u'my_anomaly_name'})
api.ok(anomaly1)
|
# Generated by Django 3.1.3 on 2020-11-26 06:34
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_show_slug'),
]
operations = [
migrations.AddField(
model_name='show',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, null=True, populate_from='name', unique=True),
),
]
|
class Dog:
def eat(self):
print("狗吃屎")
def sleep(self):
print("狗睡觉")
邢凯 = Dog()
邢凯.eat()
邢凯.sleep()
|
'''全局变量'''
X = 11
def g1():
print(X)
def g2():
global X
X = 22
def h1():
X = 33
def nested():
print(X)
def h2():
X = 33
def nested():
nonlocal X
X = 44
print(X, id(X))
g2()
print(X, id(X))
h1()
h2()
print(X, id(X)) |
# Turta Helper for Raspbian
# Distributed under the terms of the MIT license.
# Python Driver for Maxim DS18B20 Temperature Sensor
# Version 1.00 (Initial Release)
# Updated: July 14th, 2018
# For hardware info, visit www.turta.io
# For questions e-mail turta@turta.io
# You'll need to add the following line to the /boot/config.txt
# dtoverlay=w1-gpio,gpiopin=21
# Then change the gpiopin parameter to the relevant BCM GPIO number.
# If you're using more than one GPIO pin for OneWire,
# add multiple dtoverlay lines and modify the gpiopin parameters.
# A reboot will be required after config.txt modification.
import glob
import os
from enum import IntEnum
#Enumerations
class TempUnits(IntEnum):
Celcius = 1
Fahrenheit = 2
class DS18B20Sensor:
"""DS18B20 Sensor"""
#Global variable to keep temperature unit.
convertToFahrenheit = False
def __init__(self, temp_unit):
"""Initiates the OneWire bus to get temperature from DS18B20 sensors."""
#Check argument type.
if not isinstance(temp_unit, TempUnits):
raise TypeError('temp_unit must be an instance of TempUnits Enum')
#Set temperature unit.
self.convertToFahrenheit = True if temp_unit == 2 else False
#Execute shell commands to initialize OneWire interface.
os.system("modprobe w1-gpio")
os.system("modprobe w1-therm")
return
def list_sensors(self):
"""Returns available DS18B20 sensor serial numbers on the OneWire bus."""
#Search for DS18B20 sensors.
sensors = glob.glob('/sys/bus/w1/devices/28-*')
sensorList = []
#Trim sensor paths to get serial numbers.
for sensor in sensors:
sensorList.append(sensor[20:])
#Return sensor serial numbers, starting with "28-".
return sensorList
def _c_to_f(self, celcius):
"""Converts given Celcius value to Fahrenheit value."""
return celcius * 1.8 + 32
def read_temp_from_first_sensor(self):
"""Returns temperature from the first detected DS18B20 sensor."""
sensors = self.list_sensors()
for sensor in sensors:
with open("/sys/bus/w1/devices/" + sensor + "/w1_slave") as sf:
response = sf.readlines()
#If CRC is OK:
if response[0].strip()[-3:] == "YES":
tempPosition = response[1].find("t=")
#And if temperature data exists:
if tempPosition != -1:
temperature = float(response[1][tempPosition + 2:]) / 1000.0
#Return temperature.
return self._c_to_f(temperature) if self.convertToFahrenheit else temperature
#Return -100 if no sensor is available.
return -100
def read_temp_from_all_sensors(self):
"""Returns temperatures from all detected DS18B20 sensors as SN & temperature in a 2D array."""
results = []
sensors = self.list_sensors()
for sensor in sensors:
with open("/sys/bus/w1/devices/" + sensor + "/w1_slave") as sf:
response = sf.readlines()
#If CRC is OK:
if response[0].strip()[-3:] == "YES":
tempPosition = response[1].find("t=")
#And if temperature data exists:
if tempPosition != -1:
temperature = float(response[1][tempPosition + 2:]) / 1000.0
#Add sensor serial number and temperature to the array.
results.append([sensor, self._c_to_f(temperature) if self.convertToFahrenheit else temperature])
#Return results.
return results
def read_temp_by_serial(self, serial_number):
"""Returns temperature from the queried DS18B20 sensor."""
#Read temperatures from all detected sensors.
values = self.read_temp_from_all_sensors()
for senRes in values:
#If sensor's serial number matches with the given serial number:
if senRes[0] == serial_number:
#Return temperature.
return senRes[1]
#Return -100 if no sensor is found for the given serial number.
return -100
#Disposal
def __del__(self):
"""Releases the resources."""
del self.convertToFahrenheit
return |
#encoding:utf-8
'''
定义查询指定终端参数应答消息
'''
from lib.protocol.message.MessageBase import MessageBase
from lib.protocol.messagePlateform.ResponseBase import ResponseBase
class QueryTheTerminalParam_res(MessageBase,ResponseBase):
def __init__(self):
super().__init__() #不执行该方法,无法使用父类里面定义的属性
self.msgRes = "" #需要回复的消息的16进制报文
pass
#######################################################
# 设置需要回复的消息
#######################################################
def setMsgRes(self,data):
self.msgRes = data
#######################################################
# 获取需要回复消息的消息体
#######################################################
def getMsgResBody(self):
data = self.msgRes[28:][:-4]
data = self.restore_7e7d(data)
return data
#######################################################
# 获取需要回复消息的消息流水号
#######################################################
def getQueryWaterCode(self):
wc = self.msgRes[22:26]
return wc
#######################################################
# 获取需要回复消息的消息手机号
#######################################################
def getQueryPhoneNum(self):
phoneNum = self.msgRes[10:22]
return phoneNum
#######################################################
# 将消息体转换为需要查询的终端参数
#######################################################
def getQueryParams(self):
body = self.getMsgResBody()
params = []
param = body[0:8]
body = body[8:]
while param != "":
params.append(param)
param = body[0:8]
body = body[8:]
return params
#######################################################
# 生成一条完整的消息
#######################################################
def generateMsg(self):
msg = ""
msgHeader = self.getMsgHeader()
msgBody = self.getMsgBody()
checkCode = self.getCheckCode(msgHeader + msgBody)
msg = msg + self.IDENTIFY
info = msgHeader + msgBody + checkCode
info = self.replace7e7d(info)
msg = msg + info
msg = msg + self.IDENTIFY
return msg
#######################################################
# 获取消息体
#######################################################
def getMsgBody(self):
msg = ""
resWaterCode = self.getQueryWaterCode() #应答流水号,对应的终端参数查询消息的流水号
resParamCounts = self.int2hexStringByBytes(len(self.getQueryParams())) #应答参数个数
paramList = self.getParamList() #参数项列表
msg = resWaterCode + resParamCounts + paramList
return msg
#######################################################
# 获取消息头
#######################################################
def getMsgHeader(self):
msgID = "0104"
subPkg = 0
msgBodyProperty = self.getMsgBodyProperty(msgBodyLen=int(len(self.getMsgBody()) / 2),subPkg=subPkg) #消息体属性
phoneNum = self.int2BCD(self.getQueryPhoneNum()) #终端手机号
msgWaterCode = self.int2hexStringByBytes(1,2) #消息流水号
if subPkg != 8192:
subPkgContent = "" #消息包封装项
else:
subPkgContent = self.getMsgPackage()
data = msgID + msgBodyProperty + phoneNum + msgWaterCode + subPkgContent
return data
#获取消息体属性
def getMsgBodyProperty(self,msgBodyLen=128,encryptionType=0,subPkg=0):
if msgBodyLen >= 512:
raise RuntimeError('消息体长度超长!')
msgBodyLen = msgBodyLen #消息体长度
encryptionType = encryptionType #加密方式
subPkg = subPkg #分包
retain = 0 #保留位
data = msgBodyLen + encryptionType + subPkg + retain
dataHex = self.int2hexStringByBytes(data,2)
return dataHex
#######################################################
# 获取参数项列表
#######################################################
def getParamList(self):
queryParams = self.getQueryParams()
paramNums = 0 #参数总数
data = ""
if "00000010" in queryParams:
content = self.str2Hex("tnet")
data = data + "00000010" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000011" in queryParams:
content = self.str2Hex("yuanhong")
data = data + "00000011" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000012" in queryParams:
content = self.str2Hex("123456")
data = data + "00000012" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000013" in queryParams:
content = self.str2Hex("10.100.12.30")
data = data + "00000013" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000014" in queryParams:
content = self.str2Hex("CDMA")
data = data + "00000014" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000015" in queryParams:
content = self.str2Hex("yuanhong2")
data = data + "00000015" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000016" in queryParams:
content = self.str2Hex("1234567")
data = data + "00000016" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000017" in queryParams:
content = self.str2Hex("10.100.12.31")
data = data + "00000017" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000018" in queryParams:
content = self.int2hexStringByBytes(9001,4)
data = data + "00000018" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
if "00000019" in queryParams:
content = self.int2hexStringByBytes(9002,4)
data = data + "00000019" + self.int2hexStringByBytes(int(len(content) / 2)) + content
paramNums = paramNums + 1
paramNums = self.int2hexStringByBytes(paramNums)
data = paramNums + data
return data
if __name__ == "__main__":
obj = QueryTheTerminalParam_res()
obj.setMsgRes("7e8106002901220150001000060a00000010000000110000001200000013000000180000001400000015000000160000001700000019c17e")
body = obj.getMsgResBody()
print(obj.getQueryParams())
print(obj.getQueryWaterCode())
print(obj.generateMsg()) |
'''
CS122 Group Project: COVID-19 and Food Insecurity in Chicago
Sophia Mlawer, Mariel Wiechers, Valeria Balza, and Gabriela Palacios
This module manages all the sources of data.
'''
import covid
import food_swamp
import acs_data
import regress
import food_banks
import create_databases
def run():
'''
Retrieves and processes each data source. Respective files
are saved in the output_data folder. Also generates the SQL
databases required for the Django web application
'''
# Saves covid_data.csv to output_data folder
covid.go()
# Saves acs_data.csv to output_data folder
acs_data.go('input_data/ACS_demographic.csv', 'input_data/ACS_employment.csv',
'input_data/ACS_housing.csv', 'input_data/zctatozip.csv')
# Saves food_swamp_zip.csv to output_data folder
food_swamp.go()
# Conducts regression analysis to generate predicted 'food swamp' indicator
# and produces data tables to construct databases
table_data, map_data = regress.model("output_data/food_swamp_zip",
"output_data/acs_data",
"output_data/covid_data")
# Saves food_banks.csv to output_data folder
food_banks_df = food_banks.go()
# Creates databases and saves them to Django directory (CS_covid_food)
create_databases.gen_sqlite(table_data)
create_databases.gen_shapefiles(map_data, food_banks_df)
if __name__ == "__main__":
run() |
# -*- coding: utf-8 -*-
"""
Created on Fri May 8 11:34:44 2020
@author: Eier
"""
import pymysql
connection = pymysql.connect("IP-Adress", "Username", "Password")
cursor = connection.cursor()
cursor.execute("use mandatory2") #selecting dabase
cursor.execute("select * from Observations;")
names = [ i[0] for i in cursor.description]
data = cursor.fetchall()
print(len(data))
for i in range(len(names)):
missing_number = 0
NULL_number = 0
for row in data:
if (row[i] == 0):
missing_number += 1
if (row[i] == None):
NULL_number += 1
Zero_Missing_Percent = (missing_number / len(data))*100
Null_Missing_Percent = (NULL_number / len(data))*100
print("KolonneIndeks: {} VariableName: {} Prosent zero-missing verdier: {} \
Prosent NULL-missing verdier: {}".format(i, names[i],\
Zero_Missing_Percent, Null_Missing_Percent))
|
import sqlite3
import QueryConstructor
import Sync
def get_first_word_after_filter(text,filter_name):
filter_val = text.split(filter_name)
filter_parameters = list()
i=1
length = len(filter_val)
if length >1:
while(i<length):
filter_parameters.append(filter_val[i].split()[0])
i = i+1
return filter_parameters
def run_query(query_text,configuration):
query,query_type = QueryConstructor.construct_query(query_text,"ec2")
connection = sqlite3.connect(configuration['db'])
cursor = connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
if len(result) == 0 and query_type == "describe":
Sync.populate_db_with_ec2_resources(query_text.split()[2])
cursor.execute(query)
result = cursor.fetchall()
for items in result:
res = "InstanceId : " +str(items[0]) + "\nInstanceName : " +str(items[3]) + "\nSystemtag : "+ str(items[5]) + "\nClienttag : " +str(items[4]) + "\nEnvtag : "+str(items[6]) + "\nPublicIp : " + str(items[1])+ "\nPrivateIp : " + str(items[2])+"\n"
print res
|
letters = {}
for ch in 'abcdefghij':
letters.setdefault(ord(ch) % 3, []).append(ch)
print(letters)
|
def solution(s):
se_answer=[]
s_list = s.split(' ')
for each_list in s_list:
another_list = []
for j in range(len(each_list)) :
if j %2 ==0 : another_list.append(each_list[j].upper())
elif j % 2 != 0: another_list.append(each_list[j].lower())
se_answer.append("".join(another_list))
return ' '.join(se_answer) |
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
# reverse(문자열, args=튜플)
# 문자열에 해당하는 URL별칭을 찾고, 매개변수가 필요한 URL일 경우 args 매개변수에 있는 튜플값으로 자동 매핑
from .models import Question, Choice
from django.http.response import HttpResponseRedirect
import datetime # 파이썬 내장모듈, 시간정보를 얻을 때 사용
from .forms import * # QuestionForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
#from . import forms # forms.QuestionForm
# �Լ� or Ŭ����
# views.py : 내부적으로 동작할 행동들을 정의
# HTML 파일 전달,검색,등록,삭제,수정
# 함수 or 클래스 형태로 뷰 구현
# 함수형태로 구현시 반드시 첫번째 매개변수로 request 사용
# request : 웹 클라이언트의 요청에 대한 정보를 담고 있는 변수
def index(request):
print("index")
# 1.Question ��ü ã��
list = Question.objects.all() # ����� ���� ����
# ?.objects.all() : Question 모델 클래스에 저장된 모든 객체 추출
# 2.���ø�(HTML)�� ���� �����ϱ�
return render(request, "vote/templates/index.html", {'question_list':list})
# render(request, HTML 파일경로, HTML���Ͽ� ������ ������-������)
#
def detail(request, question_id):
p = get_object_or_404(Question, pk = question_id)
# get_object_or_404 : 모델클래스에 id값으로 객체 1개를 반환하는 함수
# 만약 객체를 못찾는 경우 클라이언트에게 404에러 메시지를 전달
# primary key
return render(request, "vote/templates/detail.html", {'question':p})
#
def vote(request, question_id): # 얘를 받는 이유 : 결과창을 보여주기 위해
if request.method == "POST":
# request.method : 클라이언트의 요청 방식이 저장된 변수
# "GET", "POST" 문자열 비교. 대소문자 구분
id = request.POST.get('choice') # detail에서 name
# request.POST : POST방식으로 들어온 데이터들
# request.POST.get(문자열) : POST방식으로 들어온 데이터 중 name속성의 값이 문자열과 같은 데이터를 추출
# get 함수가 반환하는 데이터는 무조건 문자열
obj = get_object_or_404(Choice, pk=id)
obj.votes += 1
obj.save() # 모델클래스의 객체.save() : 변동사항을 저장
#return HttpResponseRedirect(reverse('result', args=(question_id,)))
return HttpResponseRedirect(reverse('vote:result', args=(question_id,)))
# 튜플을 만들 때 요소 개수가 한개면 사칙연산에 사용하는 우선순위 괄호로 판단하기 떄문에
# 튜플 요소 개수가 한개일 경우 끝에 쉼표를 입력
#return redirect('/result/%s/' %(question_id))
# redirect(문자열) : 문자열에 해당하는 URL주소로 변경
# 내부적으로 연산하고 다른 URL로 토스하기 때문에 template를 만들 필요가 없음
#
def result(request, question_id):
data = Question.objects.get(id=question_id)
# 모델클래스.objects.get(조건) : 조건에 맞는 객체를 1개 찾아 반환
return render(request, "vote/templates/result.html", {'obj':data})
#
# 뷰 함수 정의시 위에 @함수명 작성하면, 해당 뷰를 호출하기 전에 함수명에 해당하는 함수가 먼저 호출됨
@login_required
def registerQ(request):
if request.method == "GET":
form = QuestionForm() # QuestionForm 객체 생성, 사용하는 속성들이 공란으로 되어있음
return render(request, "vote/templates/registerQ.html", {'form':form}) # 원래 {}
elif request.method == "POST":
#name = request.POST.get('question_text')
#obj = Question()
#obj.question_text = name
form = QuestionForm(request.POST)
if form.is_valid(): # 폼객체.is_valid() : 해당 폼에 입력값들이 에러가 없는지 확인. True False 값 반환
# 폼 객체 사용시 반드시 사용해야하는 함수
obj = form.save(commit=False)
# 폼객체.save() : 해당 폼에 입력값들로 모델클래스 객체를 데이터베이스에 저장 후 반환
# 폼객체.save(commit=False) : 데이터베이스에 바로 저장하지 않고
# 모델폼에서 모델클래스 객체로 변환 후 반환
user = User.objects.get(username=request.user.get_username())
# request.user.get_username() : 로그인된 회원의 username을 반환하는 함수
obj.pub_date = datetime.datetime.now()
obj.author = user
obj.save()
return HttpResponseRedirect(reverse('vote:detail', args=(obj.id,)))
else: # 입력 양식에 문자가 있을 경우의 처리
return render(request, "vote/templates/registerQ.html", {'form':form, 'error':"입력이 잘못됐습니다."})
# 템플릿으로 form 전달하면 사용자가 이전에 작성한 내용이 들어있는 상태로 전달함
#
@login_required
def deleteQ(request, question_id):
obj = get_object_or_404(Question, pk=question_id) # pk = id
if obj.author.username != request.user.get_username():
return render(request, "vote/templates/error.html",
{'error':"잘못된 접근입니다", 'returnURL':reverse('vote:detail', args=(question_id,))})
obj.delete() # 해당 객체를 데이터베이스에서 삭제
return HttpResponseRedirect(reverse('vote:index'))
#
def registerC(request,question_id):
obj = get_object_or_404(Question, pk=question_id)
if request.user.get_username() != obj.author.username:
return render(request, "vote/templates/error.html",
{'error':"본인이 작성한 글이 아닙니다", 'returnURL':reverse('vote:detail', args=(question_id,))})
if request.method == "GET":
# Choice 폼 객체 생성
form = ChoiceForm()
# render 함수로 HTML파일 로드 + 템플릿에 폼객체 전달(뷰 코드 작성 및 HTML파일 생성까지)
return render(request, "vote/templates/registerC.html", {'form':form, 'name':obj.question_text})
elif request.method == "POST":
# 폼객체 생성(클라이언트의 데이터를 넣음)
form = ChoiceForm(request.POST)
# 폼의 에러 확인
if form.is_valid():
# 모델클래스 객체를 데이터베이스에 저장 및 반환
obj1 = form.save(commit=False)
obj1.question = obj
obj1.save()
return HttpResponseRedirect(reverse('vote:detail', args=(obj1.question.id, )))
else:
return render(request, "vote/templates/registerC.html",
{'form':form, 'error':"입력 오류", 'name':obj.question_text})
# 다른페이지로 전달
# 에러 전달
@login_required
def deleteC(request, choice_id):
# 1. 뷰 구현 - deleteQ 함수 참고
# Choice 객체 찾기
obj = get_object_or_404(Choice, pk=choice_id)
if request.user.get_username() != obj.question.author.username:
return render(request, "vote/templates/error.html",
{'error':"잘못된 접근입니다", 'returnURL':reverse('vote:detail', args=(obj.question.id,))})
id = obj.question.id # choice 객체 삭제 전에 Question 객체의 id값을 저장
obj.delete()
# detail or index 페이지로 이동
return HttpResponseRedirect(reverse('vote:detail', args=(id,)))
# 2. urls 등록 - vote/urls.py에서 수정
# 3. detail.html 파일 수정 - vote/templates/detail.html
# 각 답변 항목별로 '삭제'링크 만들기
#
@login_required
def updateQ(request, question_id):
obj = get_object_or_404(Question, pk=question_id)
if request.user.get_username() != obj.author.username:
# obj.author 해당 Question객체를 작성한 User객체
# 해당 질문을 쓴 글쓴이 이름과 로그인된 유저의 이름을 비교
return render(request, "vote/templates/error.html",
{'error':"본인이 작성한 글이 아닙니다", 'returnURL':reverse('vote:detail', args=(question_id,))})
if request.method == "GET":
form = QuestionForm(instance = obj)
# Question 객체에 저장된 값을 QuestionForm 객체를 생성할 때 입력
# 모델폼의 생성자에 instance 매개변수는 이미 생성된 모델클래스의 객체를 담을 수 있음
return render(request, "vote/templates/updateQ.html", {'form':form})
elif request.method == "POST":
form = QuestionForm(request.POST, instance=obj)
# 이미 생성된 Question 객체에 내용을 클라이언트가 작성한 내용으로 덮어씌움
if form.is_valid():
question = form.save(commit=False) # 더 입력을 해야하는 공간이 남아있을 때
question.pub_date = datetime.datetime.now()
question.save()
return HttpResponseRedirect(reverse('vote:detail', args=(question_id,)))
else:
return render(request, "vote/templates/updateQ.html", {'form':form, 'error':"유효하지 않은 데이터"})
|
def forming_teams(players, k):
players.sort()
n = len(players)
ans = 0
for i in range(n-2):
left, right = i + 1, n-1
while left < right:
if players[i] >= k:
break
cur_sum = players[i] + players[left] + players[right]
if cur_sum == k:
print(players[i], players[left], players[right])
ans += 1
left += 1
right -= 1
elif cur_sum < k:
left += 1
else:
right -= 1
return ans
print(forming_teams([1,2,3,4,5,6,7,8,9,10], 10)) |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 14:31:16 2015
@author: Olivier + modif MJL+MR 140316
Modified by Dickson Owuor Sat Feb 23 18:17:35 2019
"""
import csv
import numpy as np
import gc
import sys
import ntpath
from .mbdll_border import *
def Trad(fileName):
temp=[]
with open(fileName, 'rU') as f:
dialect = csv.Sniffer().sniff(f.read(1024), delimiters=";,' '\t")
f.seek(0)
reader = csv.reader(f, dialect)
temp = list(reader)
f.close()
#print(temp)
if temp[0][0].replace('.','',1).isdigit() or temp[0][0].isdigit():
return [[float(temp[j][i]) for j in range(len(temp))] for i in range(len(temp[0]))]
else:
if temp[0][1].replace('.','',1).isdigit() or temp[0][1].isdigit():
return [[float(temp[j][i]) for j in range(len(temp))] for i in range(1,len(temp[0]))]
else:
title = []
for i in range(len(temp[0])):
sub = (str(i + 1) + ' : ' + temp[0][i])
title.append(sub)
return title, [[float(temp[j][i]) for j in range(1, len(temp))] for i in range(len(temp[0]))]
def GraankInit(T,eq=False):
res=[]
n=len(T[0])
#print T
for i in range(len(T)):
npl=str(i+1)+'+'
nm=str(i+1)+'-'
tempp=np.zeros((n,n),dtype= 'bool')
tempm=np.zeros((n,n),dtype= 'bool')
#print i
for j in range(n):
for k in range(j+1,n):
if T[i][j]>T[i][k]:
tempp[j][k]=1
tempm[k][j]=1
else:
if T[i][j]<T[i][k]:
#print (j,k)
tempm[j][k]=1
tempp[k][j]=1
else:
if eq:
tempm[j][k]=1
tempp[k][j]=1
tempp[j][k]=1
tempm[k][j]=1
res.append((set([npl]),tempp))
res.append((set([nm]),tempm))
return res
def SetMax(R):
i=0
k=0
test=0
Cb=R
while(i<len(Cb)-1):
test=0
k=i+1
while(k<len(Cb)):
if(Cb[i].issuperset(Cb[k]) or Cb[i]==Cb[k]):
del Cb[k]
else:
if Cb[i].issubset(Cb[k]):
del Cb[i]
test=1
break
k+=1
if test==1:
continue
i+=1
return Cb
def inv(s):
i=len(s)-1
if s[i]=='+':
return s[0:i]+'-'
else:
return s[0:i]+'+'
def APRIORIgen(R,a,n):
res=[]
test=1
temp=set()
temp2=set()
#print"a"
I=[]
if(len(R)<2):
return []
Ck=[x[0] for x in R]
#print"b"
for i in range(len(R)-1):
#print"c"
#print len(R)
for j in range(i+1,len(R)):
temp=R[i][0]|R[j][0]
invtemp={inv(x) for x in temp}
#print invtemp
#print"d"+str(j)
if ((len(temp)==len(R[0][0])+1) and (not (I!=[] and temp in I)) and (not (I!=[] and invtemp in I))):
test=1
#print "e"
for k in temp:
temp2=temp-set([k])
invtemp2={inv(x) for x in temp2}
if not temp2 in Ck and not invtemp2 in Ck:
test=0
break
if test==1:
m=R[i][1]*R[j][1]
t=float(np.sum(m))/float(n*(n-1.0)/2.0)
if t >a:
res.append((temp,m))
I.append(temp)
gc.collect()
#print "z"
return res
def Graank(D_in,a,eq=False):
title = D_in[0]
T = D_in[1]
res = []
res2 = []
temp = 0
n = len(T[0])
G = GraankInit(T,eq)
#print G
for i in G:
temp = float(np.sum(i[1]))/float(n*(n-1.0)/2.0)
if temp < a:
G.remove(i)
# else:
# res.append(i[0])
while G!=[]:
G=APRIORIgen(G,a,n)
#print G
i=0
while i<len(G) and G!=[]:
temp=float(np.sum(G[i][1]))/float(n*(n-1.0)/2.0)
#print temp
if temp<a:
del G[i]
else:
#print i
z=0
while z <(len(res)-1):
if res[z].issubset(G[i][0]):
del res[z]
del res2[z]
else:
z=z+1
res.append(G[i][0])
res2.append(temp)
i+=1
return title, res, res2
def fuse(L):
Res=L[0][:][:4000]
for j in range(len(L[0])):
for i in range(1,len(L)):
Res[j]=Res[j]+L[i][j][:4000]
return Res
def fuseTrad(L):
temp=[]
for i in L:
temp.append(Trad(i))
return fuse(temp)
def getSupp(T,s,eq=False):
n=len(T[0])
res=0
for i in range(len(T[0])):
for j in range(i+1,len(T[0])):
temp=1
tempinv=1
for k in s:
x=int(k[0:(len(s)-1)])-1
if(k[len(s)-1]=='+'):
if(T[x][i]>T[x][j]):
tempinv=0
else:
if(T[x][i]<T[x][j]):
temp=0
else:
if(T[x][i]<T[x][j]):
tempinv=0
else:
if(T[x][i]>T[x][j]):
temp=0
if(T[x][i]==T[x][j] and not eq):
temp=0
tempinv=0
res=res+temp+tempinv
return float(res)/float(n*(n-1.0)/2.0)
#def main(filename1,supmin1,eq=False):
# D1,S1=Graank(Trad(filename1),supmin1,eq)
# print('D1 : '+filename1)
# for i in range(len(D1)):
# print(str(D1[i])+' : '+str(S1[i]))
#main('FluTopicData-testsansdate-blank.csv',0.5,False)
#main('ndvi_file.csv',0.5,False)
# --------------------- CODE FOR EMERGING PATTERNS -------------------------------------------
def get_maximal_items(init_list):
# comb = list((zip(init_list, tlag_list)))
max_items = gen_set(tuple(init_list))
temp = list(max_items)
for item_i in max_items:
for item_j in max_items:
if set(item_i).issubset(set(item_j)) and set(item_i) != (set(item_j)):
try:
if item_i in temp:
temp.remove(item_i)
except:
continue
return temp
# ------------------------- main method --------------------------------
def algorithm_gradual(file_name, min_sup):
title, D1, S1=Graank(Trad(file_name), min_sup, False)
#print(str(D1))
for line in title:
print(line)
print('<h5>Pattern : Support</h5>')
if D1:
for i in range(len(D1)):
supp = "%.2f" % S1[i]
print(str(tuple(D1[i])) + ' : ' + str(supp) + "<br>")
sys.stdout.flush()
else:
print("<h5>Oops! no gradual patterns found</h5>")
sys.stdout.flush()
def algorithm_ep_gradual(file_path_1, file_path_2, min_sup):
try:
# 1. get Gradual patterns for dataset 1 and 2
title_1, gp_list_1, S1 = Graank(Trad(file_path_1), min_sup, False)
title_2, gp_list_2, S2 = Graank(Trad(file_path_2), min_sup, False)
# 2. check if data-sets have matching columns
if title_1 == title_2:
if gp_list_1 and gp_list_2:
# 3. get maximal item-sets
freq_pattern_1 = get_maximal_items(gp_list_1)
freq_pattern_2 = get_maximal_items(gp_list_2)
# 4. get emerging gradual patterns
ep = mbdll_border(tuple(freq_pattern_1), tuple(freq_pattern_2))
if not ep:
print("Oops! no relevant emerging pattern was found")
print("-------------------------------------------")
else:
for line in title_1:
print(line)
file_1 = ntpath.basename(file_path_1)
file_2 = ntpath.basename(file_path_2)
print(str(file_2) + " opposing " + str(file_1))
print("---------------------------------------")
print(str(ep))
else:
print("Oops! no frequent patterns were found")
print("---------------------------------------")
else:
print("Data-sets do not match")
print("-----------------------")
except Exception as error:
print(error)
algorithm_ep_gradual('../../../data/FluTopicData.csv', '../../../data/FluTopicData.csv', .1)
|
from django import db
from django.conf import settings
from django.core.management.base import NoArgsCommand
from data.models import MedianHouseholdIncome4Member
import csv
# National Priorities Project Data Repository
# import_mhi_4_member.py
# Updated 6/29/2010, Joshua Ruihley, Sunlight Foundation
# Imports Census Median Household Income for 4 Persons
# source info: http://www.census.gov/hhes/www/income/4person.html (accurate as of 6/29/2010)
# npp csv: http://assets.nationalpriorities.org/raw_data/census.gov/income/mhi_4_member.csv (updated 6/29/2010)
# destination model: MedianHouseholdIncome4Member
# HOWTO:
# 1) Download source files from url listed above
# 2) Convert source file to .csv with same formatting as npp csv
# 3) change SOURCE_FILE variable to the the path of the source file you just created
# 5) Run as Django management command from your project path "python manage.py import_mhi_4_member"
SOURCE_FILE = '%s/census.gov/income/mhi_4_member.csv' % (settings.LOCAL_DATA_ROOT)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
data_reader = csv.reader(open(SOURCE_FILE))
for i, row in enumerate(data_reader):
if i == 0:
year_row = row;
else:
for j,col in enumerate(row):
if j == 0:
state = col
elif j > 0:
year = year_row[j]
value = col
record = MedianHouseholdIncome4Member(state=state, year=year, value=value)
record.save() |
__author__ = 'anilpa'
from pylab import *
noise_vars = [0,1,3,5]
input_scales = [10,50,100]
sizes = [2000] # always even!
discontinuity = True
stationary = False
if discontinuity:
func_types = [[1,2],[1,3],[1,4],[2,3],[2,4]]
else:
func_types = [[1,1],[2,2],[3,3],[4,4]]
coeffs = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
def draw_coeffs():
for i in range(0,len(coeffs)):
coeffs[i] = np.random.random()*10
def func(x1, x2, x3, x4, func_type, noise_var):
target = 0
if func_type == 1:
target = coeffs[0]*x1 + coeffs[1]*x2 + coeffs[2]*x3 + coeffs[3]*x4 # linear
elif func_type == 2:
sum = coeffs[4]*x1 + coeffs[5]*x2 + coeffs[6]*x3 + coeffs[7]*x4
target = sum*log2(sum) # n*logn
elif func_type == 3:
target = coeffs[8]*(x1**2.0) + coeffs[9]*(x2**2.0) + coeffs[10]*(x3**2.0) + coeffs[11]*(x4**2.0) # quad
elif func_type == 4:
sum = coeffs[12]*x1 + coeffs[13]*x2 + coeffs[14]*x3 + coeffs[15]*x4
target = sum**2.0 # quad_sum
noise = np.random.randn()*noise_var
target = target + noise
return target
import csv
for cur_inp_scale in input_scales:
for cur_noise_var in noise_vars:
for size in sizes:
for cur_func_types in func_types:
draw_coeffs()
name = 'SYNTH'
if discontinuity:
name += '_D_'
else:
name += '_ND_'
if stationary:
name += 'NCD_'
else:
name += 'CD_'
name += str(size) + '_4_' + str(cur_inp_scale) + '_' + str(cur_noise_var) + '_' + str(cur_func_types[0]) + str(cur_func_types[1])
with open('/Users/anilpa/Desktop/GitHub/OnlineRegression/data/input/' + name + '.csv', 'w') as csvfile:
opdata = csv.writer(csvfile, delimiter='\t')
# data generation
for i in range(0, size):
if not stationary and 2*i == size:
draw_coeffs() # concept drift!
target = 0
inp1 = np.random.random()*cur_inp_scale
inp2 = np.random.random()*cur_inp_scale
inp3 = np.random.random()*cur_inp_scale
inp4 = np.random.random()*cur_inp_scale
if inp1+inp2+inp3+inp4 < 2*cur_inp_scale:
target = func(inp1, inp2, inp3, inp4, cur_func_types[0], cur_noise_var)
else:
target = func(inp1, inp2, inp3, inp4, cur_func_types[1], cur_noise_var)
opdata.writerow([inp1, inp2, inp3, inp4, "|" + str(target)])
print(name)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 13:05:02 2019
@author: paulo
"""
#DATA AUGMENTATION
import os
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import os
from PIL import Image
from skimage.color import rgb2gray
from keras.models import load_model
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from keras.layers import InputLayer
from keras.models import Sequential
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#import cv2
from keras import layers
from keras import models
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.applications.vgg16 import VGG16
from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm
from vis.input_modifiers import Jitter
from vis.optimizer import Optimizer
from vis.callbacks import GifGenerator
import cv2
from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm
from vis.input_modifiers import Jitter
from vis.optimizer import Optimizer
from vis.callbacks import GifGenerator
from vis.utils import utils
#from keras import activations
from keras.preprocessing import image
import keras
from keras.layers import Dropout
from keras import backend as K
from keras import regularizers
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
#get the folders
SERS_train_dir = r'/home/newuser/Desktop/emily try/Data/SERS/'
NOENH_train_dir = r'/home/newuser/Desktop/emily try/Data/nonSERS/'
gen_dir_tra = r'/home/newuser/Desktop/emily try/Data/Training/'
gen_dir_val = r'/home/newuser/Desktop/emily try/Data/Validation/'
#get the files inside the folders
SERS_train = os.listdir(SERS_train_dir)
NOENH_train = os.listdir(NOENH_train_dir)
all_dir = [SERS_train_dir, NOENH_train_dir]
all_data = [SERS_train, NOENH_train]
shape = []
for dire,file in zip(all_dir, all_data):
folder = dire.split('/')[-2]+'/'
for f in file:
ima = cv2.imread(dire+f)
shape.append([ima.shape[0],ima.shape[1]])
max_shape = np.max(shape,axis=0)
min_shape = np.min(shape,axis=0)
# image = cap[0]
path = r'/home/newuser/Desktop/alex/'
input_frames = 'test for big area image classification.png'
size = cv2.imread(path+input_frames).shape[0:2]
division = np.mean(size)//np.mean([max_shape,min_shape])
from PIL import Image
height = max_shape[0]
width = max_shape[1]
im = Image.open(path+input_frames)
imgwidth, imgheight = im.size
test_image = []
for i in range(0,imgheight,height):
for j in range(0,imgwidth,width):
box = (j, i, j+width, i+height)
a = im.crop(box)
test_image.append(a)
# try:
# a.save(os.path.join(path,'img'+str(i)+'_'+str(j)+'.png'))
# except:
# pass
classifier = load_model(r'/home/newuser/Desktop/alex/SERS_NOSERS_pillars_v05.h5')
model = Sequential()
# model.add(InputLayer(input_shape=(150,150)))
for layer in classifier.layers:
model.add(layer)
for layer in model.layers:
layer.trainable = False
labels = ['No Enhancement', 'SERS']
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions)
heatmaps = []
from keras.preprocessing import image
for f in test_image:
img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)
#
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# x = preprocess_input(x)
preds = model.predict(x)
color = []
num = np.argmax(preds)
if num == 0:
color.append(['r','k'])
if num == 1:
color.append(['k','r'])
#output from the conv net and not from the pooling...
img_output = model.layers[0].layers[-1].output[:,num]
last_conv_layer = model.layers[0].get_layer('block5_conv3')
grads = K.gradients(img_output, last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis= (0,1,2))
# pooled_grads = K.mean(grads, axis= (0,1,2))
iterate = K.function([ model.layers[0].layers[0].input],
[pooled_grads, last_conv_layer.output[0]])
pooled_grads_value , conv_layer_output_value = iterate([x])
for j in range(512):
conv_layer_output_value[:,:,j] *= pooled_grads_value[j]
heatmap = np.mean(conv_layer_output_value , axis=-1)
heatmap = np.maximum(heatmap,0)
heatmap /= np.max(heatmap)
img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)
heatmap = cv2.resize(heatmap, (img.size[1],img.size[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
img *= np.uint8(255.0/max(max(img.getextrema())))
blend = cv2.addWeighted(img,0.5, heatmap,0.5, 0)
heatmaps.append(blend)
for h in heatmaps:
fig, ax = plt.subplots()
plt.axis('off')
im = ax.imshow(h,interpolation='lanczos',cmap='hot')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.25)
range_b = [blend.min(), np.mean([h.max(),h.min()]), h.max()]
cbar = fig.colorbar(im, cax=cax, ticks=range_b, orientation='vertical')
cbar.ax.set_yticklabels(['Low', 'Medium', 'High'], fontdict={'fontsize': 18, 'fontweight': 'medium'}) # horizontal colorbar
w,h,c=heatmaps[i*(int(division)+1)+j].shape
itera = int(division)+1
final_heat = np.zeros(shape=(w*itera,h*itera,c))
for i in range(itera):
for j in range(itera):
final_heat[i*w:(i+1)*w,j*h:(j+1)*h,:] = heatmaps[i*(int(division)+1)+j]
|
from django.shortcuts import render
from django.views import View
import json, datetime
from .models import User, Order
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth import login,logout,authenticate,update_session_auth_hash
import time
import smtplib, getpass
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
import string, secrets, ast
# Create your views here.
class RegisterView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(RegisterView, self).dispatch(request, *args, **kwargs)
def post(self, request, **kwargs):
try:
unicode_body = request.body.decode('utf-8')
body = json.loads(unicode_body)
email = body.get('email','')
password = body.get('password','')
banner_id = body.get('banner_id','')
first_name = body.get('first_name','')
last_name = body.get('last_name','')
try:
date_of_birth = datetime.datetime.strptime(body.get('date_of_birth',None),"%Y-%m-%d")
except:
date_of_birth = None
user = User.objects.create_user(email, banner_id, password)
user.first_name = first_name
user.last_name = last_name
user.date_of_birth = date_of_birth
user.save()
return HttpResponse("200 SUCCESS")
except Exception as e:
return HttpResponse("400 FAILURE")
class LoginView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(LoginView, self).dispatch(request, *args, **kwargs)
def post(self, request, **kwargs):
unicode_body = request.body.decode('utf-8')
body = json.loads(unicode_body)
email = body.get('email','')
password = body.get('password','')
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse("200 SUCCESS")
else:
return HttpResponse("401 FAILURE")
class GetUserView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(GetUserView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse(request.user.email)
class PlaceOrderView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(PlaceOrderView, self).dispatch(request, *args, **kwargs)
def post(self, request, **kwargs):
unicode_body = request.body.decode('utf-8')
body = json.loads(unicode_body)
banner_id = body.get('banner_id','')
email = User.objects.get(banner_id=banner_id).email
outlet_name = body.get('outlet_name','')
order_details = str(body.get('order_details',''))
order = Order()
#order.save() - this save would have been done if it involved adding foreign keys and many to many field - check docs for future reference
order.banner_id = banner_id
order.email = email
order.outlet_name = outlet_name
order.order_details = order_details
order.save()
time.sleep(3)
return HttpResponse("200 SUCCESS")
class ChangePasswordView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(ChangePasswordView, self).dispatch(request, *args, **kwargs)
def post(self, request, **kwargs):
unicode_body = request.body.decode('utf-8')
body = json.loads(unicode_body)
user = User.objects.get(email=request.user.email)
old_password = body.get('old_password','')
new_password = body.get('new_password','')
if user.check_password(old_password):
user.set_password(new_password)
user.save()
update_session_auth_hash(request, user)
return HttpResponse("200 SUCCESS")
else:
return HttpResponse("401 FAILURE")
class ResetPasswordView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(ResetPasswordView, self).dispatch(request, *args, **kwargs)
def post(self, request, **kwargs):
unicode_body = request.body.decode('utf-8')
body = json.loads(unicode_body)
email = body.get("email","")
alphabet = string.ascii_letters + string.digits
password = '#' + ''.join(secrets.choice(alphabet) for i in range(8))
user = User.objects.get(email=email)
user.set_password(password)
user.save()
### update session hash may be needed - RAJESH to check later ###
self.send_mail("foodatdalteam@gmail.com",email,"Password reset for your FoodAtDal account","Your request to change password has been processed.\nThis is your new password: {}".format(password),server="smtp.gmail.com",username="foodatdalteam@gmail.com",password="foodatdal")
return HttpResponse("200 SUCCESS")
def send_mail(self, send_from, send_to, subject, body_of_msg, files=[],
server="localhost", port=587, username='', password='',
use_tls=True):
"""Compose and send email with provided info and attachments.
Args:
send_from (str): from name
send_to (str): to name
subject (str): message title
message (str): message body
files (list[str]): list of file paths to be attached to email
server (str): mail server host name
port (int): port number
username (str): server auth username
password (str): server auth password
use_tls (bool): use TLS mode
"""
message = MIMEMultipart()
message['From'] = send_from
message['To'] = send_to
message['Date'] = formatdate(localtime=True)
message['Subject'] = subject
message.attach(MIMEText(body_of_msg))
smtp = smtplib.SMTP(server, port)
if use_tls:
smtp.starttls()
smtp.login(username, password)
smtp.sendmail(send_from, send_to, message.as_string())
smtp.quit()
class GetUserDetailsView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(GetUserDetailsView, self).dispatch(request, *args, **kwargs)
def get(self, request, **kwargs):
unicode_body = request.body.decode('utf-8')
user_details = {}
try:
user = User.objects.get(email=request.user.email)
user_details["email"] = user.email
user_details["banner_id"] = user.banner_id
user_details["first_name"] = user.first_name
user_details["last_name"] = user.last_name
try:
user_details["date_of_birth"] = user.date_of_birth.strftime("%Y-%m-%d")
except:
user_details["date_of_birth"] = "null"
user_details["response_code"] = "200"
except:
user_details = {}
user_details["response_code"] = "404S"
return HttpResponse(str(user_details))
class PopulateOrdersView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(PopulateOrdersView, self).dispatch(request, *args, **kwargs)
def post(self, request, **kwargs):
unicode_body = request.body.decode('utf-8')
body = json.loads(unicode_body)
email = body.get("email","")
all_orders = {}
all_orders['current_orders'] = []
all_orders['previous_orders'] = []
orders = Order.objects.filter(email=email)
for order in orders:
order_info = {}
order_info['outlet_name'] = order.outlet_name
order_info['order_details'] = ast.literal_eval(order.order_details)
order_info['order_datetime'] = order.order_datetime.strftime("%Y-%m-%d %H:%M:%S")
order_info['picked_up'] = order.picked_up
if order_info['picked_up']:
order_info['picked_up_time'] = order.picked_up_time.strftime("%Y-%m-%d %H:%M:%S")
all_orders['current_orders'].append(order_info)
else:
all_orders['previous_orders'].append(order_info)
return HttpResponse(str(all_orders))
|
from django.db import models
class BlogPost(models.Model):
""" Keeps track of which blog posts have already been imported
from the Posterous site
"""
posterous_id = models.IntegerField()
def __unicode__(self):
return 'Posterous Post #{0}'.format(self.posterous_id)
|
import math
y=float(input("Write y"))
x=float(input("Write x"))
i=(2.33*math.log(math.sqrt(1+(math.cos(y))**2)))/(math.e**y+(math.sin(x))**2)
print(i) |
from distutils.core import setup
setup(
name='excel-validation',
version='1.0',
py_modules=['excel_validation']
) |
#!/usr/bin/env python3
"""Module for application setup/install
This module is pretty standard for python applications that you wish to install
via the pip module. It basically lets you do things like "pip install -e ." and
"pip install ."
"""
import setuptools
setuptools.setup(
name="<%= consoleCommand %>",
author="<%= author %>",
description="",
url="",
version="0.0.1",
packages=setuptools.find_packages(exclude=["tests"]),
include_package_data=True,
package_dir={"<%= sourceFolder %>": "<%= sourceFolder %>"},
install_requires=[
# NOTE: List your dependencies here. If you are accustom to using
# requirements.txt and dev_requirements.txt this would be your
# requirements.txt items without the versions. Requirements.txt and
# dev_requirements.txt will be built automatically via a Makefile
# target.
],
entry_points={"console_scripts": ["<%= consoleCommand %> = <%= sourceFolder %>.app:main"]},
)
|
#!/usr/bin/python
# -*- coding: utf8 -*-
# prueba para la transferencia de logs desde equipo remoto a local
from datetime import date
import test_helper
from helpers.logging_helper import init_logger
import transfer_log
init_logger("transfer_log")
# vamos a descargar logs para 3 fechas..
day = date.today().replace(year=2013, month=01, day=01)
transfer_log.run(day)
day = date.today().replace(year=2013, month=01, day=02)
transfer_log.run(day)
day = date.today().replace(year=2013, month=01, day=05)
transfer_log.run(day)
|
import os, sys, datetime
import numpy as np
import os.path as osp
import albumentations as A
from albumentations.core.transforms_interface import ImageOnlyTransform
from .face_analysis import FaceAnalysis
from ..utils import get_model_dir
from ..thirdparty import face3d
from ..data import get_image as ins_get_image
from ..utils import DEFAULT_MP_NAME
import cv2
class MaskRenderer:
def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', insfa=None):
#if insfa is None, enter render_only mode
self.mp_name = name
self.root = root
self.insfa = insfa
model_dir = get_model_dir(name, root)
bfm_file = osp.join(model_dir, 'BFM.mat')
assert osp.exists(bfm_file), 'should contains BFM.mat in your model directory'
self.bfm = face3d.morphable_model.MorphabelModel(bfm_file)
self.index_ind = self.bfm.kpt_ind
bfm_uv_file = osp.join(model_dir, 'BFM_UV.mat')
assert osp.exists(bfm_uv_file), 'should contains BFM_UV.mat in your model directory'
uv_coords = face3d.morphable_model.load.load_uv_coords(bfm_uv_file)
self.uv_size = (224,224)
self.mask_stxr = 0.1
self.mask_styr = 0.33
self.mask_etxr = 0.9
self.mask_etyr = 0.7
self.tex_h , self.tex_w, self.tex_c = self.uv_size[1] , self.uv_size[0],3
texcoord = np.zeros_like(uv_coords)
texcoord[:, 0] = uv_coords[:, 0] * (self.tex_h - 1)
texcoord[:, 1] = uv_coords[:, 1] * (self.tex_w - 1)
texcoord[:, 1] = self.tex_w - texcoord[:, 1] - 1
self.texcoord = np.hstack((texcoord, np.zeros((texcoord.shape[0], 1))))
self.X_ind = self.bfm.kpt_ind
self.mask_image_names = ['mask_white', 'mask_blue', 'mask_black', 'mask_green']
self.mask_aug_probs = [0.4, 0.4, 0.1, 0.1]
#self.mask_images = []
#self.mask_images_rgb = []
#for image_name in mask_image_names:
# mask_image = ins_get_image(image_name)
# self.mask_images.append(mask_image)
# mask_image_rgb = mask_image[:,:,::-1]
# self.mask_images_rgb.append(mask_image_rgb)
def prepare(self, ctx_id=0, det_thresh=0.5, det_size=(128, 128)):
self.pre_ctx_id = ctx_id
self.pre_det_thresh = det_thresh
self.pre_det_size = det_size
def transform(self, shape3D, R):
s = 1.0
shape3D[:2, :] = shape3D[:2, :]
shape3D = s * np.dot(R, shape3D)
return shape3D
def preprocess(self, vertices, w, h):
R1 = face3d.mesh.transform.angle2matrix([0, 180, 180])
t = np.array([-w // 2, -h // 2, 0])
vertices = vertices.T
vertices += t
vertices = self.transform(vertices.T, R1).T
return vertices
def project_to_2d(self,vertices,s,angles,t):
transformed_vertices = self.bfm.transform(vertices, s, angles, t)
projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection
return projected_vertices[self.bfm.kpt_ind, :2]
def params_to_vertices(self,params , H , W):
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = params
fitted_vertices = self.bfm.generate_vertices(fitted_sp, fitted_ep)
transformed_vertices = self.bfm.transform(fitted_vertices, fitted_s, fitted_angles,
fitted_t)
transformed_vertices = self.preprocess(transformed_vertices.T, W, H)
image_vertices = face3d.mesh.transform.to_image(transformed_vertices, H, W)
return image_vertices
def draw_lmk(self, face_image):
faces = self.insfa.get(face_image, max_num=1)
if len(faces)==0:
return face_image
return self.insfa.draw_on(face_image, faces)
def build_params(self, face_image):
#landmark = self.if3d68_handler.get(face_image)
#if landmark is None:
# return None #face not found
if self.insfa is None:
self.insfa = FaceAnalysis(name=self.mp_name, root=self.root, allowed_modules=['detection', 'landmark_3d_68'])
self.insfa.prepare(ctx_id=self.pre_ctx_id, det_thresh=self.pre_det_thresh, det_size=self.pre_det_size)
faces = self.insfa.get(face_image, max_num=1)
if len(faces)==0:
return None
landmark = faces[0].landmark_3d_68[:,:2]
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = self.bfm.fit(landmark, self.X_ind, max_iter = 3)
return [fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t]
def generate_mask_uv(self,mask, positions):
uv_size = (self.uv_size[1], self.uv_size[0], 3)
h, w, c = uv_size
uv = np.zeros(shape=(self.uv_size[1],self.uv_size[0], 3), dtype=np.uint8)
stxr, styr = positions[0], positions[1]
etxr, etyr = positions[2], positions[3]
stx, sty = int(w * stxr), int(h * styr)
etx, ety = int(w * etxr), int(h * etyr)
height = ety - sty
width = etx - stx
mask = cv2.resize(mask, (width, height))
uv[sty:ety, stx:etx] = mask
return uv
def render_mask(self,face_image, mask_image, params, input_is_rgb=False, auto_blend = True, positions=[0.1, 0.33, 0.9, 0.7]):
if isinstance(mask_image, str):
to_rgb = True if input_is_rgb else False
mask_image = ins_get_image(mask_image, to_rgb=to_rgb)
uv_mask_image = self.generate_mask_uv(mask_image, positions)
h,w,c = face_image.shape
image_vertices = self.params_to_vertices(params ,h,w)
output = (1-face3d.mesh.render.render_texture(image_vertices, self.bfm.full_triangles , uv_mask_image, self.texcoord, self.bfm.full_triangles, h , w ))*255
output = output.astype(np.uint8)
if auto_blend:
mask_bd = (output==255).astype(np.uint8)
final = face_image*mask_bd + (1-mask_bd)*output
return final
return output
#def mask_augmentation(self, face_image, label, input_is_rgb=False, p=0.1):
# if np.random.random()<p:
# assert isinstance(label, (list, np.ndarray)), 'make sure the rec dataset includes mask params'
# assert len(label)==237 or len(lable)==235, 'make sure the rec dataset includes mask params'
# if len(label)==237:
# if label[1]<0.0: #invalid label for mask aug
# return face_image
# label = label[2:]
# params = self.decode_params(label)
# mask_image_name = np.random.choice(self.mask_image_names, p=self.mask_aug_probs)
# pos = np.random.uniform(0.33, 0.5)
# face_image = self.render_mask(face_image, mask_image_name, params, input_is_rgb=input_is_rgb, positions=[0.1, pos, 0.9, 0.7])
# return face_image
@staticmethod
def encode_params(params):
p0 = list(params[0])
p1 = list(params[1])
p2 = [float(params[2])]
p3 = list(params[3])
p4 = list(params[4])
return p0+p1+p2+p3+p4
@staticmethod
def decode_params(params):
p0 = params[0:199]
p0 = np.array(p0, dtype=np.float32).reshape( (-1, 1))
p1 = params[199:228]
p1 = np.array(p1, dtype=np.float32).reshape( (-1, 1))
p2 = params[228]
p3 = tuple(params[229:232])
p4 = params[232:235]
p4 = np.array(p4, dtype=np.float32).reshape( (-1, 1))
return p0, p1, p2, p3, p4
class MaskAugmentation(ImageOnlyTransform):
def __init__(
self,
mask_names=['mask_white', 'mask_blue', 'mask_black', 'mask_green'],
mask_probs=[0.4,0.4,0.1,0.1],
h_low = 0.33,
h_high = 0.35,
always_apply=False,
p=1.0,
):
super(MaskAugmentation, self).__init__(always_apply, p)
self.renderer = MaskRenderer()
assert len(mask_names)>0
assert len(mask_names)==len(mask_probs)
self.mask_names = mask_names
self.mask_probs = mask_probs
self.h_low = h_low
self.h_high = h_high
#self.hlabel = None
def apply(self, image, hlabel, mask_name, h_pos, **params):
#print(params.keys())
#hlabel = params.get('hlabel')
assert len(hlabel)==237 or len(hlabel)==235, 'make sure the rec dataset includes mask params'
if len(hlabel)==237:
if hlabel[1]<0.0:
return image
hlabel = hlabel[2:]
#print(len(hlabel))
mask_params = self.renderer.decode_params(hlabel)
image = self.renderer.render_mask(image, mask_name, mask_params, input_is_rgb=True, positions=[0.1, h_pos, 0.9, 0.7])
return image
@property
def targets_as_params(self):
return ["image", "hlabel"]
def get_params_dependent_on_targets(self, params):
hlabel = params['hlabel']
mask_name = np.random.choice(self.mask_names, p=self.mask_probs)
h_pos = np.random.uniform(self.h_low, self.h_high)
return {'hlabel': hlabel, 'mask_name': mask_name, 'h_pos': h_pos}
def get_transform_init_args_names(self):
#return ("hlabel", 'mask_names', 'mask_probs', 'h_low', 'h_high')
return ('mask_names', 'mask_probs', 'h_low', 'h_high')
if __name__ == "__main__":
tool = MaskRenderer('antelope')
tool.prepare(det_size=(128,128))
image = cv2.imread("Tom_Hanks_54745.png")
params = tool.build_params(image)
#out = tool.draw_lmk(image)
#cv2.imwrite('output_lmk.jpg', out)
#mask_image = cv2.imread("masks/mask1.jpg")
#mask_image = cv2.imread("masks/black-mask.png")
#mask_image = cv2.imread("masks/mask2.jpg")
mask_out = tool.render_mask(image, 'mask_blue', params)# use single thread to test the time cost
cv2.imwrite('output_mask.jpg', mask_out)
|
'''
Given a binary tree, return the postorder traversal of its nodes' values.
Example
Given binary tree {1,#,2,3},
1
\
2
/
3
return [3,2,1].
Challenge
Can you do it without recursion?
'''
class Solution:
def postorderTraversal(self, root):
result = []
self.posttraverse(root, result)
return result
def posttraverse(self, root, result):
if not root:
return
self.posttraverse(root.left, result)
self.posttraverse(root.right, result)
result.append(root.val) |
#*-* coding:UTF-8 *-*
'''
Created on 2016��4��21��
@author: xsx
'''
import unittest
from common import browserClass
from common import baseClass
import traceback
import time
from selenium.webdriver.common.keys import Keys #需要引入keys包
base=baseClass.base()
browser=browserClass.browser()
class createTest(unittest.TestCase):
def setUp(self):
self.driver=browser.startBrowser('chrome')
self.url="http://beefun.wsgjp.com/"
self.driver.get(self.url)
self.driver.maximize_window()
self.driver.implicitly_wait(10)
name="$1b8bb415$corpName"
user="$1b8bb415$userName"
pwd="$1b8bb415$pwdEdit"
login="$1b8bb415$btnLogin"
loginname="xsx123456"
username="xsx"
password="xsx123456."
browser.loginUser(self.driver,name ,user,pwd,login,loginname,username,password)
'''
#切换到自义定流程
btn=".//*[@id='$3327be68$linkButton1']"
browser.findXpath(self.driver,btn)
'''
module=".//*[@id='$80d499b2$ManagerMenuBar3']/div"
modulename=".//*[@id='$80d499b2$ManagerMenuBar3_14']/td[3]"
browser.openModule2(self.driver, module, modulename)
pass
def tearDown(self):
print "test over"
#self.driver.close()
pass
def testCreateorder(self):
u'''网上下载订单'''
morebtn=".//*[@id='$dea0a8b3$button10']"
downorder=".//*[@id='$dea0a8b3$synTrade0']/td[3]"
browser.openModule2(self.driver, morebtn, downorder)
begin=9
end=13
shop='微店一号'
okbtn=".//*[@id='$8bee40ab$btnOk']"
cancelbtn=".//*[@id='$8bee40ab$button1']"
try:
str1='html/body/div['
str2=']/table/tbody/tr[2]/td/div'
btn1="/html/body/table[8]/tbody/tr[2]/td/div/div/div/table/tbody/tr[3]/td[2]/table/tbody/tr/td[2]/div/div"
a=browser.getdtnamicElement(self.driver, str1, str2, btn1, shop, begin, end)
try:
for n in range(1,4):
time.sleep(1)
btn2="/html/body/table[8]/tbody/tr[2]/td/div/div/div/table/tbody/tr[3]/td[4]/table/tbody/tr/td[2]/div/div"
base.findXpath(self.driver,btn2).click()
time.sleep(1)
xpath2='html/body/div['+str(a+1)+']/table/tbody/tr['+str(n)+']/td/div'
#print base.findXpath(self.driver,xpath2).text
base.findXpath(self.driver,xpath2).click()
print(u"获取状态"+str(n)+"元素成功")
base.findXpath(self.driver,okbtn).click()
time.sleep(10)
except:
print(u"获取订单状态失败")
print(traceback.format_exc())
try:
base.findXpath(self.driver,cancelbtn).click()
#base.accalert(self.driver)
time.sleep(2)
except:
print(u"下载成功,查询失败")
print(traceback.format_exc())
except:
print(u"订单创建失败")
print(traceback.format_exc())
print ('testover')
pass
def testSubmitorder(self):
u'''原始订单页面提交订单'''
submit=".//*[@id='$dea0a8b3$button9']"
all=".//*[@id='$dea0a8b3$quickStatus']/table/tbody/tr[1]/td/div"
browser.findXpath(self.driver,all).click()
try:
#判断一共要提交几次
str1=".//*[@id='$dea0a8b3$c_grid_Audit']/div[2]/table/tbody/tr["
str2="]/td[4]/div"
n=browser.getlines(self.driver, str1, str2)
#提交成功确认按钮
okbtn="html/body/table[7]/tbody/tr[2]/td/div/table/tbody/tr/td/table/tbody/tr[2]/td/button"
#继续按钮
contin=".//*[@id='$1c0ec6ae$canadd']"
#提示框内消息
winbtn=".//*[@id='$1c0ec6ae$grid']/div[2]/table/tbody/tr/td[3]/div"
#关闭按钮
winclose=".//*[@id='$1c0ec6ae$btnClose']"
trueorfalse=".//*[@id='$1c0ec6ae$grid']/div[2]/table/tbody/tr[2]/td[3]/div"
#进行提交
for i in range(1,n):
checkbtn=".//*[@id='$dea0a8b3$c_grid_Audit']/div[2]/table/tbody/tr["+str(i)+"]/td[2]/input"
checkbtn2=".//*[@id='$dea0a8b3$c_grid_Audit']/div[2]/table/tbody/tr["+str(i-1)+"]/td[2]/input"
#处理状态
subcon=".//*[@id='$dea0a8b3$c_grid_Audit']/div[2]/table/tbody/tr["+str(i)+"]/td[5]/div"
subtext=browser.findXpath(self.driver,subcon).text
#提醒是关闭的订单,只有一个提醒
flash=".//*[@id='$dea0a8b3$c_grid_Audit']/div[2]/table/tbody/tr["+str(i)+"]/td[3]/div/font"
#是否选中
con=browser.elementisexist(self.driver,trueorfalse)
#选中该行并进行提交
browser.findXpath(self.driver,checkbtn).click()
browser.findXpath(self.driver,submit).click()
if con==True:
browser.findXpath(self.driver,winclose).click()
time.sleep(2)
browser.findXpath(self.driver,checkbtn2).click()
if subtext=='未提交':
#判断是否有提醒
#有提醒
if browser.elementisexist(self.driver, flash)==True:
try:
#如果是交易关闭的订单
flashtext=browser.findXpath(self.driver,flash).text
#winbtn=".//*[@id='$1c0ec6ae$grid']/div[2]/table/tbody/tr/td[3]/div"
if flashtext==u"闭":
if browser.findXpath(self.driver,winbtn).text==u'过滤交易关闭的订单':
browser.findXpath(self.driver,winclose).click()
print "次订单已经关闭,测试成功"
else:
print "此订单提示是闭,但弹出窗口提示不正常,测试失败,弹出框信息为:"+browser.findXpath(self.driver,winbtn).text
except:
print(u"原始订单页面关闭交易订单提交失败")
print(traceback.format_exc())
try:
#如果是未支付的订单
flashtext=browser.findXpath(self.driver,flash).text
#winbtn=".//*[@id='$1c0ec6ae$grid']/div[2]/table/tbody/tr/td[3]/div"
if flashtext==u"未付":
if browser.findXpath(self.driver,winbtn).text==u'未付款的订单':
browser.findXpath(self.driver,contin).click()
browser.findXpath(self.driver, okbtn).click()
print "次订单是未支付订单,测试成功"
else:
print "此订单提示是未付,但弹出窗口提示不正常,测试失败"
print browser.findXpath(self.driver,winbtn).text
except:
print(u"原始订单页面未支付订单提交失败")
print(traceback.format_exc())
try:
#如果是商品未对应的订单,谷歌浏览器不支持
flashtext=browser.findXpath(self.driver,flash).text
#winbtn=".//*[@id='$1c0ec6ae$grid']/div[2]/table/tbody/tr/td[3]/div"
if flashtext==u"未对":
time.sleep(5)
#browser.findXpath(self.driver,all).send_keys(Keys.ENTER)
if browser.findXpath(self.driver,winbtn).text==u'过滤未对应本地商品的订单':
browser.findXpath(self.driver,winclose).click()
print "次订单是未未对应订单,测试成功"
else:
print "此订单提示是未对,但弹出窗口提示不正常,测试失败"
print browser.findXpath(self.driver,winbtn).text
except:
print(u"原始订单页面未对应订单处理失败")
print(traceback.format_exc())
#如果是退款中的订单
try:
flashtext=browser.findXpath(self.driver,flash).text
if flashtext==u'退':
browser.findXpath(self.driver,contin).click()
browser.findXpath(self.driver, okbtn).click()
print u"退款中订单提交发货成功"
except :
print(u"原始订单页面退款中订单提交失败")
print(traceback.format_exc())
#没有提醒,可以直接提交发货
if browser.elementisexist(self.driver, flash)==False:
try:
#正常提交
browser.findXpath(self.driver, okbtn).click()
except:
print(u"原始订单页面订单未提交订单失败")
print(traceback.format_exc())
elif subtext=='已提交发货':
try:
if browser.findXpath(self.driver,winbtn).text=='过滤已提交发货的订单':
browser.findXpath(self.driver,winclose).click()
print "该订单提交发货,测试成功"
else:
print "该订单提交发货,提示框信息显示失败,测试失败"
except:
print(u"原始订单页面订单提交发货失败")
print(traceback.format_exc())
else :
print "测试用例不含处理状态,请添加"
print subtext
time.sleep(2)
browser.findXpath(self.driver,checkbtn).click()
except:
print(u"原始订单页面订单提交失败")
print(traceback.format_exc())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
import pandas as pd
import numpy as np
class DataLoader:
@staticmethod
def load_validation_data():
# load validation data
val_data = pd.read_csv("../data/cloze_test_val__spring2016 - cloze_test_ALL_val.csv")
print("Labels: ", val_data.columns.tolist())
val_right_ending_nr = val_data[['AnswerRightEnding']].values
val_context_sentences = val_data.iloc[:, 1:5].values
val_ending_sentence1 = val_data[['RandomFifthSentenceQuiz1']].values
val_ending_sentence2 = val_data[['RandomFifthSentenceQuiz2']].values
return val_right_ending_nr, val_context_sentences, val_ending_sentence1, val_ending_sentence2
@staticmethod
def load_test_data_with_right_ending_nr():
# load test data (for experimental section in report, containing right ending nr)
val_data = pd.read_csv("../data/test_for_report-stories_labels.csv")
print("Labels: ", val_data.columns.tolist())
val_right_ending_nr = val_data[['AnswerRightEnding']].values
val_context_sentences = val_data.iloc[:, 1:5].values
val_ending_sentence1 = val_data[['RandomFifthSentenceQuiz1']].values
val_ending_sentence2 = val_data[['RandomFifthSentenceQuiz2']].values
return val_right_ending_nr, val_context_sentences, val_ending_sentence1, val_ending_sentence2
@staticmethod
def load_test_data_to_make_predictions():
# load test data (to make the predictions that we need to hand in as a .csv)
val_data = pd.read_csv("../data/test-stories.csv")
print("Labels: ", val_data.columns.tolist())
val_context_sentences = val_data.iloc[:, 0:4].values
val_ending_sentence1 = val_data[['RandomFifthSentenceQuiz1']].values
val_ending_sentence2 = val_data[['RandomFifthSentenceQuiz2']].values
return val_context_sentences, val_ending_sentence1, val_ending_sentence2
@staticmethod
def load_training_data():
# load training data
train_data = pd.read_csv("../data/train_stories.csv")
print("Training data: ", train_data.head())
print("Labels: ", train_data.columns.tolist())
train_context_sentences = train_data.iloc[:, 2:6].values
train_ending_sentence = train_data[['sentence5']].values
train_story_title = train_data[['storytitle']].values
return train_context_sentences, train_ending_sentence, train_story_title
def load_data_with_fake_endings(self, file_name):
# should return in the same format as load validation data, but with fake endings as ending2
train_context_sentences, train_ending_sentence, _ = self.load_training_data()
counter = 0
discarded = 0
sentences = open(file_name, "r") # opens file for reading
fake_endings = []
new_train_context_sentences, new_train_ending_sentence = [], []
for sentence in sentences:
if len(sentence) < 10:
# don't include the sample in the new training data
discarded += 1
else:
# append data
new_train_context_sentences.append(train_context_sentences[counter])
new_train_ending_sentence.append(train_ending_sentence[counter])
fake_endings.append(sentence)
counter += 1
fake_endings = np.array(list(map(lambda i: i[:-1], fake_endings)))
fake_endings = fake_endings.reshape(len(fake_endings), 1)
right_endings = np.ones(len(fake_endings))
return right_endings, np.array(new_train_context_sentences), np.array(new_train_ending_sentence), fake_endings |
import os
import pytest
import re
import subprocess
import textwrap
import time
@pytest.fixture
def qs_path():
from bsdploy import bsdploy_path
qs_path = os.path.abspath(os.path.join(bsdploy_path, '..', 'docs', 'quickstart.rst'))
if not os.path.exists(qs_path):
pytest.skip("Can't access quickstart.rst")
return qs_path
def strip_block(block):
lines = iter(block)
result = []
for line in lines:
if not line:
continue
result.append(line)
break
result.extend(lines)
lines = iter(reversed(result))
result = []
for line in lines:
if not line:
continue
result.append(line)
break
result.extend(lines)
return textwrap.dedent("\n".join(reversed(result))).split('\n')
def iter_blocks(lines):
inindent = False
text = []
block = []
for line in lines:
line = line.rstrip()
if inindent and line and line.strip() == line:
inindent = False
text = '\n'.join(text)
text = re.sub('([^\n])\n([^\n])', '\\1\\2', text)
text = re.split('\\n+', text)
yield text, strip_block(block)
text = []
block = []
if inindent:
block.append(line)
else:
text.append(line)
if line.endswith('::') or line.startswith('.. code-block::'):
inindent = True
def parse_qs(qs_path):
with open(qs_path) as f:
lines = f.read().splitlines()
result = []
for text, block in iter_blocks(lines):
text = '\n'.join(text)
if block[0].startswith('%'):
result.append(('execute', block))
elif '``' in text:
names = re.findall('``(.+?)``', text)
if 'create' in text.lower():
result.append(('create', names, block))
if 'add' in text.lower():
result.append(('add', names, block))
elif 'completed' in text:
result.append(('expect', block))
return result
def iter_quickstart_calls(actions, confext, ployconf, tempdir):
paths = {
'ploy.conf': ployconf,
'etc/ploy.conf': ployconf,
'ploy.yml': ployconf,
'etc/ploy.yml': ployconf,
'files.yml': tempdir['bootstrap-files/files.yml'],
'jailhost.yml': tempdir['host_vars/jailhost.yml'],
'jailhost-demo_jail.yml': tempdir['jailhost-demo_jail.yml']}
for action in actions:
if action[0] == 'execute':
for line in action[1]:
if line.startswith('%'):
line = line[1:].strip()
parts = line.split()
if len(parts) == 3 and parts[:2] == ['ploy', 'ssh']:
continue
bootstrap = line.endswith('bootstrap')
if bootstrap:
yield (action[0], wait_for_ssh, ('localhost', 44003), {})
line = '%s -y' % line
yield (action[0], subprocess.check_call, (line,), dict(shell=True))
if bootstrap:
yield (action[0], wait_for_ssh, ('localhost', 44003), {})
elif action[0] == 'create':
name = action[1][-1]
content = list(action[2])
content.append('')
yield (action[0], paths[name].fill, (content,), {})
elif action[0] == 'add':
name = action[1][-1]
content = paths[name].content().split('\n')
content.extend(action[2])
content.append('')
yield (action[0], paths[name].fill, (content,), {})
elif action[0] == 'expect':
pass
else:
pytest.fail("Unknown action %s" % action[0])
def test_quickstart_calls(confext, qs_path, ployconf, tempdir):
calls = []
for action, func, args, kw in iter_quickstart_calls(parse_qs(qs_path), confext, ployconf, tempdir):
if action in ('add', 'create'):
func(*args, **kw)
calls.append((action, func.__self__.path))
else:
calls.append((func, args))
assert calls == [
(subprocess.check_call, ('pip install "ploy_virtualbox>=2.0.0b1"',)),
(subprocess.check_call, ('mkdir ploy-quickstart',)),
(subprocess.check_call, ('cd ploy-quickstart',)),
(subprocess.check_call, ('mkdir etc',)),
('create', ('%s/etc/ploy.conf' % tempdir.directory).replace('.conf', confext)),
(subprocess.check_call, ('ploy start ploy-demo',)),
('add', ('%s/etc/ploy.conf' % tempdir.directory).replace('.conf', confext)),
(wait_for_ssh, ('localhost', 44003)),
(subprocess.check_call, ('ploy bootstrap -y',)),
(wait_for_ssh, ('localhost', 44003)),
('add', ('%s/etc/ploy.conf' % tempdir.directory).replace('.conf', confext)),
(subprocess.check_call, ('ploy configure jailhost',)),
('add', ('%s/etc/ploy.conf' % tempdir.directory).replace('.conf', confext)),
(subprocess.check_call, ('ploy start demo_jail',)),
('create', '%s/jailhost-demo_jail.yml' % tempdir.directory),
(subprocess.check_call, ('ploy configure demo_jail',)),
(subprocess.check_call, ('mkdir host_vars',)),
('create', '%s/host_vars/jailhost.yml' % tempdir.directory),
(subprocess.check_call, ('ploy configure jailhost -t pf-conf',)),
(subprocess.check_call, ("ploy ssh jailhost 'ifconfig em0'",))]
assert ployconf.content().splitlines() == [
'[vb-instance:ploy-demo]',
'vm-nic2 = nat',
'vm-natpf2 = ssh,tcp,,44003,,22',
'storage =',
' --medium vb-disk:defaultdisk',
' --type dvddrive --medium https://mfsbsd.vx.sk/files/iso/12/amd64/mfsbsd-se-12.0-RELEASE-amd64.iso --medium_sha1 2fbf2be5a79cc8081d918475400581bd54bb30ae',
'',
'[ez-master:jailhost]',
'instance = ploy-demo',
'',
'[ez-master:jailhost]',
'instance = ploy-demo',
'roles =',
' dhcp_host',
' jails_host',
'',
'[ez-instance:demo_jail]',
'ip = 10.0.0.1']
assert tempdir['jailhost-demo_jail.yml'].content().splitlines() == [
'---',
'- hosts: jailhost-demo_jail',
' tasks:',
' - name: install nginx',
' pkgng:',
' name: "nginx"',
' state: "present"',
' - name: Setup nginx to start immediately and on boot',
' service: name=nginx enabled=yes state=started']
assert tempdir['host_vars/jailhost.yml'].content().splitlines() == [
'pf_nat_rules:',
' - "rdr on em0 proto tcp from any to em0 port 80 -> {{ hostvars[\'jailhost-demo_jail\'][\'ploy_ip\'] }} port 80"']
@pytest.yield_fixture
def virtualenv(monkeypatch, tempdir):
origdir = os.getcwd()
os.chdir(tempdir.directory)
subprocess.check_output(['virtualenv', '.'])
monkeypatch.delenv('PYTHONHOME', raising=False)
monkeypatch.setenv('VIRTUAL_ENV', tempdir.directory)
monkeypatch.setenv('PATH', '%s/bin:%s' % (tempdir.directory, os.environ['PATH']))
yield tempdir.directory
os.chdir(origdir)
subprocess.call(['VBoxManage', 'controlvm', 'ploy-demo', 'poweroff'])
time.sleep(5)
subprocess.call(['VBoxManage', 'unregistervm', '--delete', 'ploy-demo'])
def wait_for_ssh(host, port, timeout=90):
from contextlib import closing
import socket
while timeout > 0:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.settimeout(1)
if s.connect_ex((host, port)) == 0:
if s.recv(128).startswith(b'SSH-2'):
return
except socket.timeout:
timeout -= 1
continue
time.sleep(1)
timeout -= 1
raise RuntimeError(
"SSH at %s:%s didn't become accessible" % (host, port))
@pytest.mark.skipif("not config.option.quickstart_bsdploy")
def test_quickstart_functional(request, qs_path, confext, ployconf, tempdir, virtualenv):
if confext == '.yml':
pytest.xfail("No YML config file support yet")
if not os.path.isabs(request.config.option.quickstart_bsdploy):
pytest.fail("The path given by --quickstart-bsdploy needs to be absolute.")
if request.config.option.ansible_version:
subprocess.check_call(['pip', 'install', 'ansible==%s' % request.config.option.ansible_version])
else:
subprocess.check_call(['pip', 'install', 'ansible'])
subprocess.check_call(['pip', 'install', '-i' 'https://d.rzon.de:8141/fschulze/dev/', '--pre', request.config.option.quickstart_bsdploy])
for action, func, args, kw in iter_quickstart_calls(parse_qs(qs_path), confext, ployconf, tempdir):
func(*args, **kw)
|
# coding=utf-8
# coding: utf-8
import random
import codecs
from tkinter import *
import winsound
import time
# klasa Nagroda (z niej dziedziczone będą treści nagórd i wartości, które one przyjmują)
class Nagroda:
# ładowanie bazy pytań o nazwie pytania.txt
file_name = "nagrody.txt"
nagroda_1 = []
# wczytuje dane ze stringa, dane muszą być ustawione w odpowiedzniej kolejności, jak niżej i oddzielone znakiem "|"
# ładowanie bazy pytań o nazwie pytania.txt
f = codecs.open(file_name, "r", encoding="utf-8")
lines = f.readlines()
for line in lines:
if line != "\n":
nagroda_1.append(line[0:-1])
f.close()
def okno(self,obraz):
losowa_zmienna = random.randint(0, 8-1)#8 to liczba pytań
data_string = self.nagroda_1[losowa_zmienna]
#dane do wyświetlania pytań
tmp_1 = data_string.split("|")
nr_nagrody = tmp_1[0]
self.wartosc = tmp_1[1]
nagrody = tmp_1[2]
self.tk = Tk()
self.tk.title = "Game"
self.canvas = Canvas(self.tk, width=500, height=300, bd=0, highlightthickness=0)
self.canvas.pack()
self.tekstura = PhotoImage(file=obraz)
#powołanie przycisku i testu
self.przycisk = Button(self.tk, text="OK", command=self.tk.destroy) # przycisk zamykający okno
self.label = Label(self.tk, text=nagrody)
#powołanie zdjecia
self.postac = self.canvas.create_image(1, 1, image=self.tekstura, anchor=NW)
#"spakowanie" napisu i przycisku
self.label.pack(side="top", fill=X, expand=True)
self.przycisk.pack(expand=False)
#presuniecie postaci na pozycje 0,0
self.canvas.move(self.postac, 0, 0)
#zmienne konfiguracyjne
self.pozycja =0
self.velocity = 0.5
self.time = 1
class krolik(Nagroda):
def glos(self):
muzyka= "krolik.wav"
winsound.PlaySound(muzyka, winsound.SND_ASYNC | winsound.SND_ALIAS)
def draw(self):
if self.pozycja > 300:
self.velocity = self.velocity * (-1)
elif self.pozycja < 0:
self.velocity = self.velocity * (-1)
self.canvas.move(self.postac, self.velocity, 0)
self.id = self.canvas.after(self.time, self.draw) # (time_delay, method_to_execute)
self.pozycja += self.velocity
def animacja(self):
obraz = "krolik.gif"
a = self.okno(obraz)
return a
def nagroda_krolika(self):
self.glos()
self.animacja()
self.draw() # Changed per Bryan Oakley's comment
mainloop()
self.canvas.after_cancel(self.id)
winsound.PlaySound(None, winsound.SND_PURGE) # zakończenie odtwarzania muzyki jeśli nie zakończyło się to wcześniej
return self.wartosc
class swMikolaj(Nagroda):
def glos(self):
muzyka= "swMikolaj.wav"
winsound.PlaySound(muzyka, winsound.SND_ASYNC | winsound.SND_ALIAS)
def draw(self):
if self.pozycja > 300:
self.velocity = self.velocity * (-1)
elif self.pozycja < 0:
self.velocity = self.velocity * (-1)
self.canvas.move(self.postac, self.velocity, 0)
self.id = self.canvas.after(self.time, self.draw) # (time_delay, method_to_execute)
self.pozycja += self.velocity
def animacja(self):
obraz = "swMikolaj.gif"
a = self.okno(obraz)
return a
def nagroda_swMikolaja(self):
self.glos()
self.animacja()
self.draw() # Changed per Bryan Oakley's comment
mainloop()
self.canvas.after_cancel(self.id)
winsound.PlaySound(None, winsound.SND_PURGE) # zakończenie odtwarzania muzyki jeśli nie zakończyło się to wcześniej
return self.wartosc
class kotek(Nagroda):
def glos(self):
muzyka= "kotek.wav"
winsound.PlaySound(muzyka, winsound.SND_ASYNC | winsound.SND_ALIAS)
def draw(self):
if self.pozycja > 300:
self.velocity = self.velocity * (-1)
elif self.pozycja < 0:
self.velocity = self.velocity * (-1)
self.canvas.move(self.postac, self.velocity, 0)
self.id = self.canvas.after(self.time, self.draw) # (time_delay, method_to_execute)
self.pozycja += self.velocity
def animacja(self):
obraz = "kotek.gif"
a = self.okno(obraz)
return a
def nagroda_kotka(self):
self.glos()
self.animacja()
self.draw() # Changed per Bryan Oakley's comment
mainloop()
self.canvas.after_cancel(self.id)
winsound.PlaySound(None, winsound.SND_PURGE) # zakończenie odtwarzania muzyki jeśli nie zakończyło się to wcześniej
return self.wartosc
if __name__ == "__main__":
krolik = krolik()
print(krolik.nagroda_krolika())
swMikolaj = swMikolaj()
print(swMikolaj.nagroda_swMikolaja())
kotek = kotek()
print(kotek.nagroda_kotka()) |
numbers = ['one', 'two', 'three', 'four', 'five',
'six', 'seven', 'eight', 'nine', 'ten',
'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty',
'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
hundreds = len('hundred') * 900
ands = len('and') * 891
sum1to9 = sum([len(n) for n in numbers[:9]])
sum1to99 = sum([len(n) for n in numbers]) + sum1to9 * 8
sum1to999 = sum1to99 * 10 + hundreds + ands + sum1to9 * 100
print sum1to999 + len('one thousand') |
import base
import getters
ALL_CLASSES= base.BaseIpGetter.__subclasses__()
ALL= [x() for x in ALL_CLASSES]
def get_ip():
import random
remaining= ALL[:]
while remaining:
getter= random.choice(remaining)
try:
return getter.get_ip()
except base.GetIpFailed:
remaining.remove( getter )
raise base.GetIpFailed("None of the ip_getters returned a good ip")
|
# Copyright (C) 2020 Klika Tech, Inc. or its affiliates. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE file or at https://opensource.org/licenses/MIT.
from configparser import ConfigParser
from json import load
from os import remove, path, environ
from pathlib import Path
from re import match
from subprocess import run
from typing import Union
from pytest import mark
report_fname = '.report.json'
def teardown_module():
if path.isfile(report_fname):
remove(report_fname)
def get_plugin_cfg(key: str) -> str:
c = ConfigParser()
c.read('./pytest.ini')
return c['pytest'][key]
def run_test(exp_rc: int = 0, environment: Union[dict, None] = None, publish=True) -> str:
"""
args: list of pytest cmdline arguments
:param publish: publish results to TM4J
:param exp_rc: expected return code
:param environment: sys env vars
"""
if path.isfile(report_fname):
remove(report_fname)
cmd = 'python -m pytest -p no:cacheprovider --tm4j'.split()
if not publish:
cmd.append('--tm4j-no-publish')
cmd.append('common/report_tests.py')
new_env = environ.copy()
plugin_location = Path.cwd().parent.as_posix()
print('plugin location:', plugin_location)
new_env['PYTHONPATH'] = plugin_location
new_env['PYTHONDONTWRITEBYTECODE'] = '1'
new_env['PYTEST_PLUGINS'] = 'pytest_tm4j_reporter.reporter'
if environment:
new_env.update(environment)
cmd_run = run(cmd, capture_output=True, env=new_env)
output = cmd_run.stdout.decode()
err = cmd_run.stderr.decode()
assert err == '', print(err)
assert cmd_run.returncode == exp_rc, f'got stdout:\n{output}\ngot stderr:\n{err}'
return output
def test_verify_output_json_structure():
output = run_test(exp_rc=1, publish=False)
print('CHECK: tests without a TM4J ID are listed as warning in stdout')
expected_ptrn = 'tests affected:.*report_tests.py::test_withoutTm4jId_two'
for line in output.split('\n'):
if match(expected_ptrn, line):
break
else:
raise AssertionError('a test without TM4J ID is not listed in warning message')
with open('test_data/report.json') as orig_obj:
orig = load(orig_obj)
with open(report_fname) as rcvd_obj:
rcvd = load(rcvd_obj)
print('CHECK: JSON-output matches expected')
assert orig == rcvd, f'result json does not match\n' \
f'original: {orig}\n' \
f'received: {rcvd}'
def test_publish_existing_testcycle():
project_prefix = get_plugin_cfg('tm4j_project_prefix')
tcycle_key = 'R40'
env = {'tm4j_testcycle_key': tcycle_key}
output = run_test(exp_rc=1, environment=env)
print('CHECK: publish result reported')
# no real check because the api client does not return result
# todo: api client to return result
expected = f'[TM4J] Using existing test cycle: key={project_prefix}-{tcycle_key}'
assert expected in output, f'got: {output}'
expected = f'[TM4J] Report published. Project: {project_prefix}. Test cycle key: {tcycle_key}'
assert expected in output, f'got: {output}'
def test_publish_create_testcycle():
project_prefix = get_plugin_cfg('tm4j_project_prefix')
tcycle_desc = get_plugin_cfg('tm4j_testcycle_description')
output = run_test(exp_rc=1)
exp1 = '[TM4J] Created a new test cycle'
exp2_raw = r'\[TM4J\] Report published\. Project: project_prefix\. Test cycle key: R\d+'
exp2 = exp2_raw.replace('project_prefix', project_prefix)
assert exp1 in output, f'\nexpected: {exp1}\ngot: {output}'
for line in output.split('\n'):
if match(exp2, line):
break
else:
raise AssertionError(f'\n{exp2} not found in output:\n{output}')
exp3 = f'[TM4J] Test cycle description: {tcycle_desc}'
assert exp3 in output, f'got: {output}'
@mark.xfail
def test_tm4j_unavailable():
print('handling not implemented in API client')
assert False
@mark.xfail
def test_tm4j_api_key_invalid():
print('handling not implemented in API client')
assert False
@mark.xfail
def test_tm4j_project_not_exist():
print('handling not implemented in API client')
assert False
@mark.xfail
def test_tm4j_test_cycle_specified_but_not_exist():
print('handling not implemented in API client')
assert False
|
import torch
import numpy as np
import sys, os
root_dir = os.path.join(os.path.dirname(__file__),'..')
if root_dir not in sys.path:
sys.path.insert(0, root_dir)
import constants
from config import args
def convert_kp2d_from_input_to_orgimg(kp2ds, offsets):
offsets = offsets.float().to(kp2ds.device)
img_pad_size, crop_trbl, pad_trbl = offsets[:,:2], offsets[:,2:6], offsets[:,6:10]
leftTop = torch.stack([crop_trbl[:,3]-pad_trbl[:,3], crop_trbl[:,0]-pad_trbl[:,0]],1)
kp2ds_on_orgimg = (kp2ds + 1) * img_pad_size.unsqueeze(1) / 2 + leftTop.unsqueeze(1)
return kp2ds_on_orgimg
def vertices_kp3d_projection(outputs, meta_data=None, presp=args().model_version>3):
params_dict, vertices, j3ds = outputs['params'], outputs['verts'], outputs['j3d']
verts_camed = batch_orth_proj(vertices, params_dict['cam'], mode='3d',keep_dim=True)
pj3d = batch_orth_proj(j3ds, params_dict['cam'], mode='2d')
projected_outputs = {'verts_camed': verts_camed, 'pj2d': pj3d[:,:,:2]}
if meta_data is not None:
projected_outputs['pj2d_org'] = convert_kp2d_from_input_to_orgimg(projected_outputs['pj2d'], meta_data['offsets'])
return projected_outputs
def batch_orth_proj(X, camera, mode='2d',keep_dim=False):
camera = camera.view(-1, 1, 3)
X_camed = X[:,:,:2] * camera[:, :, 0].unsqueeze(-1)
X_camed += camera[:, :, 1:]
if keep_dim:
X_camed = torch.cat([X_camed, X[:,:,2].unsqueeze(-1)],-1)
return X_camed
def project_2D(kp3d, cams,keep_dim=False):
d,f, t = cams[0], cams[1], cams[2:].unsqueeze(0)
pose2d = kp3d[:,:2]/(kp3d[:,2][:,None]+d)
pose2d = pose2d*f+t
if keep_dim:
kp3d[:,:2] = pose2d
return kp3d
else:
return pose2d |
import imp
import sys
import os
import numpy as np
import torch
import pandas as pd
from chemprop.data import get_data, get_data_from_smiles, MoleculeDataLoader,MoleculeDataset
from chemprop.utils import load_args, load_checkpoint, load_scalers, makedirs, timeit
from chemprop.train.predict import predict
from rdkit.Chem import RDConfig
from rdkit import Chem
from rdkit import DataStructs
sys.path.append(os.path.join(RDConfig.RDContribDir, 'SA_Score'))
import sascorer
from g2g_optimization.train.metrics import *
def evaluate_chemprop(decoded_path,fold_path,chemprop_path):
data = pd.read_csv(decoded_path,header=None,delimiter=' ')
# data = data.rename(columns={0:'Mol1',1:'Mol2'})
# device = torch.device('cuda')
# model = load_checkpoint(fold_path,device=device)
# scaler, features_scaler = load_scalers(fold_path)
# smiles1 = list(data['Mol1'])
# print('Loading data')
# full_data = get_data_from_smiles(
# smiles=smiles1,
# skip_invalid_smiles=False,
# features_generator=None
# )
# test_data = MoleculeDataset(full_data)
# test_data_loader=MoleculeDataLoader(dataset=test_data)
# model_preds1 = predict(
# model=model,
# data_loader=test_data_loader,
# scaler=scaler)
# smiles2 = list(data['Mol2'])
# print('Loading data')
# full_data = get_data_from_smiles(
# smiles=smiles2,
# skip_invalid_smiles=False,
# features_generator=None
# )
# test_data = MoleculeDataset(full_data)
# test_data_loader=MoleculeDataLoader(dataset=test_data)
# model_preds2 = predict(
# model=model,
# data_loader=test_data_loader,
# scaler=scaler)
temp_folder='tmp'
if not os.path.isdir(temp_folder):
os.mkdir(temp_folder)
data[0].to_csv(os.path.join(temp_folder,'col1.csv'),index=False)
data[1].to_csv(os.path.join(temp_folder,'col2.csv'),index=False)
os.system('python '+os.path.join(chemprop_path,'predict.py')+' --test_path '+os.path.join(temp_folder,'col1.csv')+' --batch_size 16 --checkpoint_dir '+fold_path+' --preds_path '+os.path.join(temp_folder,'preds_col1.csv'))
os.system('python '+os.path.join(chemprop_path,'predict.py')+' --test_path '+os.path.join(temp_folder,'col2.csv')+' --batch_size 16 --checkpoint_dir '+fold_path+' --preds_path '+os.path.join(temp_folder,'preds_col2.csv'))
preds1 = pd.read_csv(os.path.join(temp_folder,'preds_col1.csv'))
preds1 = preds1.rename(columns={"0":"Mol1",preds1.columns[1]:"Target1"})
preds2 = pd.read_csv(os.path.join(temp_folder,'preds_col2.csv'))
preds2 = preds2.rename(columns={"1":"Mol2",preds2.columns[1]:"Target2"})
preds_tot = pd.concat((preds1,preds2),axis=1)
# preds_tot = pd.DataFrame()
# preds_tot['Mol1'] = smiles1
# preds_tot['Target1'] = [x[0] for x in model_preds1]
# preds_tot['Mol2'] = smiles2
# preds_tot['Target2'] = [x[0] for x in model_preds2]
statistics = sum_statistics(preds_tot)
return statistics,preds_tot
def evaluate_chemprop_onecol(data,fold_path,chemprop_path):
temp_folder='tmp'
if not os.path.isdir(temp_folder):
os.mkdir(temp_folder)
data.to_csv(os.path.join(temp_folder,'temp.csv'),index=False)
os.system('python '+os.path.join(chemprop_path,'predict.py')+' --test_path '+os.path.join(temp_folder,'temp.csv')+' --checkpoint_dir '+fold_path+' --preds_path '+os.path.join(temp_folder,'preds_temp.csv') + ' > /dev/null')
preds = pd.read_csv(os.path.join(temp_folder,'preds_temp.csv'))
return preds
def evaluate_chemprop_sol(decoded_path,solvent,fold_path,chemprop_path):
data = pd.read_csv(decoded_path,header=None,delimiter=' ')
temp_folder='tmp'
if not os.path.isdir(temp_folder):
os.mkdir(temp_folder)
data['sol'] = solvent
data[[0,'sol']].to_csv(os.path.join(temp_folder,'col1.csv'),index=False)
data[[1,'sol']].to_csv(os.path.join(temp_folder,'col2.csv'),index=False)
os.system('python '+os.path.join(chemprop_path,'predict.py')+' --test_path '+os.path.join(temp_folder,'col1.csv')+' --checkpoint_dir '+fold_path+' --preds_path '+os.path.join(temp_folder,'preds_col1.csv')+' --number_of_molecules 2')
os.system('python '+os.path.join(chemprop_path,'predict.py')+' --test_path '+os.path.join(temp_folder,'col2.csv')+' --checkpoint_dir '+fold_path+' --preds_path '+os.path.join(temp_folder,'preds_col2.csv')+' --number_of_molecules 2')
preds1 = pd.read_csv(os.path.join(temp_folder,'preds_col1.csv'))
preds1 = preds1.rename(columns={"0":"Mol1",preds1.columns[2]:"Target1"})
preds2 = pd.read_csv(os.path.join(temp_folder,'preds_col2.csv'))
preds2 = preds2.rename(columns={"1":"Mol2",preds2.columns[2]:"Target2"})
preds_tot = pd.concat((preds1,preds2),axis=1)
statistics = sum_statistics(preds_tot)
return statistics,preds_tot
|
#!/usr/bin/env python
'''
Algorithms calculating the offer quality with respect to different perspectives
'''
import numpy as np
def q_extreme(scores):
'''
Input:
scores - array of unsorted scores
0.7% of scores are expected out of the range between (q1 - 1.5 * iqr) and (q3 + 1.5 * iqr), if the distribution is near Gaussian.
'''
tmp = np.sort(scores)
n = len(tmp)
q1 = tmp[n / 4]
q3 = tmp[3 * n / 4]
iqr = q3 - q1
upper = q3 + 1.5 * iqr
lower = q1 - 1.5 * iqr
func = lambda v: max(lower, 0) / upper if v <= lower else (1 if v >= upper else max(v, 0) / upper)
return [func(s) for s in scores]
def q_percentile(scores):
'''
Input:
scores - array of unsorted scores
'''
tmp = np.sort(scores)
rank = dict([(s, 0) for s in scores])
n = len(tmp)
for i, s in enumerate(tmp):
rank[s] = (i + 1.0) / n
return [rank[s] for s in scores]
if __name__ == '__main__':
import unittest
class TestFixture(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_q_extreme(self):
scores = np.random.randn(10)
print scores
print q_extreme(scores)
def test_q_percentile(self):
scores = np.random.randn(10)
print scores
print q_percentile(scores)
unittest.main()
|
# Generated by Django 2.1.7 on 2019-03-16 18:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20190316_1529'),
]
operations = [
migrations.AlterField(
model_name='vidicap',
name='vidicap_model',
field=models.CharField(choices=[('Vidicap Mini', '2'), ('Vidicap Stream', '3'), ('Vidicap HD', '1'), ('Vidicap Touch', '4')], max_length=15, verbose_name='Модель Vidicap'),
),
]
|
import pytest
from puzzles.increasing_decreasing_string import sort_string
def test_sort_string():
assert sort_string("aaaabbbbcccc") == "abccbaabccba"
assert sort_string("rat") == "art"
assert sort_string("leetcode") == "cdelotee"
assert sort_string("ggggggg") == "ggggggg"
assert sort_string("spo") == "ops"
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import re
import os
def alphabetizer(string):
#alphabetizes elements within a string
st_lst = string.split(';')
st_lst = sorted(st_lst)
alph = ';'.join(st_lst)
return alph
def dim_ord(string, dim1 = False, dim2 = False, dim3 = False):
'''
PURPOSE
-------
- Orders features by dimension
- Can theoretically be applied to a single string by simply calling the function dim_ord('V;V.MSDR;SG')
- If applying to an entire column, must be done so using the .apply() method
- A maximum of three (optional) dimensions can be inputted.
- If a dimension is specified, features within that dimension will be at the beginning of the new string.
- All features not within that dimension will follow, but will still be ordered alphabetically by dimension.
- if alphabetizer was previously run, then dimension features will also be in alphabetical order
PARAMETERS
----------
string | A string containing features seperated by a semicolon.
dim1 | (Optional) A string denoting a dimension as worded in the Unimorph Schema User Guide Appendix 1.
If dim2 and dim3 are specified, dim1 will appear first.
dim2 | (Optional) A string denoting a dimension as worded in the Unimorph Schema User Guide Appendix 1.
If dim1 is specified, dim1 will appear first, followed by dim2.
Will raise error if dim1 is False (dim1 must exist for dim2 to be used).
dim3 | (Optional) A string denoting a dimension as worded in the Unimorph Schema User Guide Appendix 1.
If dim1 and dim3 are specified, dim1 will appear first, followed by dim2, and lastly dim3.
Will raise error if dim1 and dim2 are False (dim1 and dim2 must exist for dim3 to be used).
RETURNS
---------
A string with features ordered by dimension, or an alternate order specified by the user.
'''
#mappings from dimensions to features
mappings = {
'Aktionsart' : ['accmp', 'ach', 'acty', 'atel', 'dur', 'dyn', 'pct', 'semel', 'stat', 'tel'],
'Animacy' : ['anim', 'hum', 'inan', 'nhum'],
'Argument' : ['argac3s'],
'Aspect' : ['hab', 'ipfv', 'iter', 'pfv', 'prf', 'prog', 'prosp'],
'Case' : ['abl', 'abs', 'acc', 'all', 'ante', 'apprx', 'apud', 'at', 'avr',
'ben', 'byway', 'circ', 'com', 'compv', 'dat', 'eqtv', 'erg', 'ess',
'frml', 'gen', 'in', 'ins', 'inter', 'nom', 'noms', 'on', 'onhr',
'onvr', 'post', 'priv', 'prol', 'propr', 'prox', 'prp', 'prt', 'rel',
'rem', 'sub', 'term', 'trans', 'vers', 'voc'],
'Comparison' : ['ab' 'cmpr' 'eqt' 'rl' 'sprl'],
'Definiteness' : ['f', 'indf', 'nspec', 'spec'],
'Deixis' : ['abv', 'bel', 'even', 'med', 'noref', 'nvis',
'phor', 'prox', 'ref1', 'ref2', 'remt', 'vis'],
'Evidentiality' : ['assum', 'aud', 'drct', 'fh', 'hrsy', 'infer', 'nfh', 'nvsen', 'quot', 'rprt', 'sen'],
'Finiteness' : ['fin', 'nfin'],
'Gender' : ['bantu1-23', 'fem', 'masc', 'nakh1-8', 'neut'],
'Information Structure' : ['foc', 'top'],
'Interrogativity' : ['decl', 'int'],
'Language-Specific Features' : ['lgspec1', 'lgspec2'],
'Mood' : ['adm', 'aunprp', 'auprp', 'cond', 'deb', 'ded',
'imp', 'ind', 'inten', 'irr', 'lkly', 'oblig',
'opt', 'perm', 'pot', 'purp', 'real', 'sbjv', 'sim'],
'Number' : ['du', 'gpauc', 'grpl', 'invn', 'pauc', 'pl', 'sg', 'tri'],
'Part of Speech' : ['adj', 'adp', 'adv', 'art', 'aux', 'clf', 'comp', 'conj',
'det', 'intj', 'n', 'num', 'part', 'pro', 'propn', 'v',
'v.cvb', 'v.msdr', 'v.ptcp'],
'Person' : ['0', '1', '2', '3', '4','excl', 'incl', 'obv', 'prx'],
'Polarity' : ['pos', 'neg'],
'Politeness' : ['avoid', 'col', 'elev', 'foreg', 'form', 'high', 'humb', 'infm', 'lit',
'low', 'pol', 'stelev', 'stsupr'],
'Possession' : ['aln', 'naln', 'pss1d', 'pss1de', 'pss1di', 'pss1p', 'pss1pe',
'pss1pi', 'pss1s', 'pss2d', 'pss2df', 'pss2dm', 'pss2p', 'pss2pf',
'pss2pm', 'pss2s', 'pss2sf', 'pss2sform', 'pss2sinfm', 'pss2sm', 'pss3d',
'pss3df', 'pss3dm', 'pss3p', 'pss3pf', 'pss3pm', 'pss3s', 'pss3sf', 'pss3sm', 'pssd'],
'Switch-Reference' : ['cn_r_mn', 'ds', 'dsadv', 'log', 'or', 'seqma', 'simma', 'ss', 'ssadv'],
'Tense' : ['1day', 'fut', 'hod', 'immed', 'prs', 'pst', 'rct', 'rmt'],
'Valency' : ['appl', 'caus', 'ditr', 'imprs', 'intr', 'recp', 'refl', 'tr'],
'Voice' : ['acfoc', 'act', 'agfoc', 'antip', 'bfoc', 'cfoc', 'dir', 'ifoc', 'inv', 'lfoc',
'mid', 'pass', 'pfoc']}
#list containing all dimension names
dimlst = list(mappings.keys())
#splitting strings into a list to access individual features
lst = string.split(';')
#empty lists and dictionary to store ordered features
d1 = []
d2 = []
d3 = []
rest_d ={}
#if three dimensions are specified
if dim1 and dim2 and dim3:
#iterating through each feature in the lst
for feat in lst:
#finding the dimension each feature belongs to
#if it belongs to one of the specified dimensions, then add it to the corresponding list
if feat.lower() in mappings[dim1]:
d1.append(feat)
elif feat.lower() in mappings[dim2]:
d2.append(feat)
elif feat.lower() in mappings[dim3]:
d3.append(feat)
#if feature belongs to a non-specified dimension, then it will come after
else:
for dim in dimlst:
if feat.lower() in mappings[dim]:
#each feature mapped to its corresponding dimension
rest_d[feat] = dim
#sorting the keys by their values, thereby getting each feature (key) in order by dimension (value)
rest_d = {k: v for k, v in sorted(rest_d.items(), key=lambda item: item[1])}
#converting the dictionary keys (features) into a list
rest_lst = list(rest_d.keys())
#combining all of the lists into one list containing features in the desired order
#if a given word did not contain a particular feature, this still works
#will just be added as an empty list, which does not show up in the finalized list
ordered = sorted(d1) + sorted(d2) + sorted(d3) + rest_lst
#joining the list into a string seperated by a semi-colon
joined = ';'.join(ordered)
#return the joined string
return joined
#same as previous, but if only dim1 and dim2 were specified
elif dim1 and dim2 and dim3 == False:
for feat in lst:
if feat.lower() in mappings[dim1]:
d1.append(feat)
elif feat.lower() in mappings[dim2]:
d2.append(feat)
else:
for dim in dimlst:
if feat.lower() in mappings[dim]:
rest_d[feat] = dim
rest_d = {k: v for k, v in sorted(rest_d.items(), key=lambda item: item[1])}
rest_lst = list(rest_d.keys())
ordered = sorted(d1) + sorted(d2) + rest_lst
joined = ';'.join(ordered)
return joined
#same as previous, but if only dim1 was specified
elif dim1 and dim2 == False and dim3 == False:
for feat in lst:
if feat.lower() in mappings[dim1]:
d1.append(feat)
else:
for dim in dimlst:
if feat.lower() in mappings[dim]:
rest_d[feat] = dim
rest_d = {k: v for k, v in sorted(rest_d.items(), key=lambda item: item[1])}
rest_lst = list(rest_d.keys())
ordered = sorted(d1) + rest_lst
joined = ';'.join(ordered)
return joined
#same as previous, but with no dimensions specified
else:
for feat in lst:
for dim in dimlst:
if feat.lower() in mappings[dim]:
rest_d[feat] = dim
rest_d = {k: v for k, v in sorted(rest_d.items(), key=lambda item: item[1])}
rest_lst = list(rest_d.keys())
joined = ';'.join(rest_lst)
return joined
def pov(array):
'''
PURPOSE
-------
Creates a list denoting if a particular word is tagged as being for both first and second person use.
PARAMETERS
----------
array | An array (or dataframe column) of features. Features should be strings.
RETURNS
--------
A list containing booleans denoting if a given array value contains a tag for both first and second person.
'''
#empty list to store booleans
pov_lst = []
#iterating through each string in the array
for string in array:
#cleaning the string
new = re.sub('(.*[a-z]\d+.*)|(.*d+\[a-z].*)', '', string)
#if a match is found for both 1st person and second person, append true
if any(x in new for x in ['1;', ';1;', ';1']) and any(y in new for y in ['2;', ';2;', ';2']):
pov_lst.append(True)
else:
pov_lst.append(False)
#return the populated list
return pov_lst
def dim_pop(df, column = 'feature'):
'''
PURPOSE
-------
- Populates a dataframe with columns corresponding to dimensions
- If a given string of features contains a particular dimension: column will denote "true"
- Otherwise, column will denote "False"
PARAMETERS
----------
df | Pandas.DataFrame | Dataframe to be populated with dimension columns
column | Str | (Optional). Name of the pandas dataframe column to be searched for features.
'feature' by default.
RETURNS
-------
A dataframe containing columns that correspond to dimensions.
'''
#A dictionary that will denote if a given row contains a particular dimension
#empty lists will be populated in upcoming loops
res = {dimlst[i]: [] for i in range(len(dimlst))}
# A list to contain the dimensions contained within each word
word_dims = []
#iterating through each row in the feature column
for string in df[column]:
#Initializing a list to contain all the dimensions found within that word
inner = []
#splitting the string into a list for iteration
split_str = string.split(';')
#goes through each feature
#maps each feature to its corresponding dimension
#appends the dimension to the empty list named inner
for feat in split_str:
[inner.append(a) for a, b in mappings.items() if feat.lower() in b]
#appends each inner list to the word_dims list
#word dims is now a lists of lists, where each inner list contains each words dimensions
word_dims.append(inner)
#iterating through each inner list in word_dims
for val in word_dims:
#iterating through each dimension
for dim in dimlst:
#if the dimension can be found within the rows list of dimensions
#append True to the corresponding key in the res dictionary
if dim in val:
res[dim].append(True)
#else, append False to the corresponding Key
else:
res[dim].append(False)
#handles cases where a word has multiple features of the same dimensions
#as soon as the dimension is found, it will move on to the next dimension
#will not double count
#populating dataframe columns with their corresponding dimension booleans
for dim in dimlst:
df[dim] = res[dim]
return df
def master(filename, directory, save_dir, dim1 = False, dim2 = False, dim3 = False):
'''
PURPOSE
-------
Takes in a file, and applies all ordering functions and creates columns denoting dimension
and 1st/2nd person co-occurence.
PARAMETERS
----------
filename | str | a .txt file from unimorph in the form 'eng.txt'.
directory | str | A directory for which to LOOK FOR the file in the form
'C:\---\--\folder_name' or '\folder', depending on your working directory.
save_dir | str | A directory for which to SAVE the output csv file to in the form
'C:\---\--\folder_name' or '\folder', depending on your working directory.
RETURNS
-------
A csv file containing an ordered feature column, and columns denoting dimensions and
1st/2nd persion dimension co-occurence
'''
name = filename.replace('.txt', '')
df = pd.read_csv(directory + '\\' + filename, delimiter="\t", names = ['word', 'form', 'feature'])
df['feature'] = df['feature'].apply(alphabetizer)
if dim1 and dim2 and dim3:
df['feature'] = df['feature'].apply(dim_ord, dim1, dim2, dim3)
elif dim1 and dim2 and dim3 == False:
df['feature'] = df['feature'].apply(dim_ord, dim1, dim2)
elif dim1 and dim2 == False and dim3 == False:
df['feature'] = df['feature'].apply(dim_ord, dim1)
else:
df['feature'] = df['feature'].apply(dim_ord)
df['pov'] = pov(df['feature'])
df = dim_pop(df, 'feature')
return df.to_csv(save_dir + '\mod_'+ name + '.csv', index = False)
|
import sys
import argparse
from iclientpy.rest.api.updatetileset import update_smtilestileset, recache_tileset
from iclientpy.rest.api.cache import cache_workspace, cache_service
def cache_local_workspace(args):
d = vars(args)
d = dict((k, v) for k, v in d.items() if k in ('username', 'password') or not (v is None))
d['original_point'] = tuple(float(item) for item in d['original_point'].strip("'").strip('"').split(','))
d['cache_bounds'] = tuple(float(item) for item in d['cache_bounds'].strip("'").strip('"').split(','))
if 'scale' in d:
d['scale'] = [float(item) for item in d['scale'].strip("'").strip('"').split(',')]
del d['func']
cache_workspace(**d)
def cache_remote_service(args):
d = vars(args)
d = dict((k, v) for k, v in d.items() if k in ('username', 'password') or not (v is None))
d['original_point'] = tuple(float(item) for item in d['original_point'].strip("'").strip('"').split(','))
d['cache_bounds'] = tuple(float(item) for item in d['cache_bounds'].strip("'").strip('"').split(','))
if 'scale' in d:
d['scale'] = [float(item) for item in d['scale'].strip("'").strip('"').split(',')]
del d['func']
cache_service(**d)
def recache(args):
d = vars(args)
d = dict((k, v) for k, v in d.items() if k in ('username', 'password') or not (v is None))
del d['func']
recache_tileset(**d)
def update_cache(args):
d = vars(args)
d = dict((k, v) for k, v in d.items() if k in ('username', 'password') or not (v is None))
d['original_point'] = tuple(float(item) for item in d['original_point'].strip("'").strip('"').split(','))
d['cache_bounds'] = tuple(float(item) for item in d['cache_bounds'].strip("'").strip('"').split(','))
if 'scale' in d:
d['scale'] = [float(item) for item in d['scale'].strip("'").strip('"').split(',')]
del d['func']
update_smtilestileset(**d)
def get_parser():
parser = argparse.ArgumentParser(epilog='for more information , visit<http://iclientpy.supermap.io/>.', description="""
切图,更新切片命令行工具
""")
sub_parsers = parser.add_subparsers()
recache_parser = sub_parsers.add_parser('recache') # type: argparse.ArgumentParser
recache_parser.set_defaults(func=recache)
recache_require_group = recache_parser.add_argument_group('必选参数')
recache_require_group.add_argument('-l', '--uri', dest='address', help='服务地址,如:http://localhost:8090/iserver')
recache_require_group.add_argument('-u', '--user', dest='username', help='用户名', default=None)
recache_require_group.add_argument('-p', '--password', dest='password', help='密码', default=None)
recache_require_group.add_argument('-t', '--token', dest='token', help='用于身份验证的token')
recache_require_group.add_argument('-c', '--component-name', dest='component_name', help='待更新缓存服务名称')
recache_require_group.add_argument('-m', '--map-name', dest='map_name', help='切图地图名称')
recache_require_group.add_argument('-s', '--storageid', dest='storageid', help='存储的id')
recache_optional_group = recache_parser.add_argument_group('可选参数')
recache_optional_group.add_argument('-n', '--tileset_name', dest='tileset_name', help='存储切片集名称')
updatecache_parser = sub_parsers.add_parser('updatecache') # type: argparse.ArgumentParser
updatecache_parser.set_defaults(func=update_cache)
updatecache_require_group = updatecache_parser.add_argument_group('必选参数')
updatecache_require_group.add_argument('-l', '--uri', dest='address', help='服务地址,如:http://localhost:8090/iserver')
updatecache_require_group.add_argument('-u', '--user', dest='username', help='用户名', default=None)
updatecache_require_group.add_argument('-p', '--password', dest='password', help='密码', default=None)
updatecache_require_group.add_argument('-t', '--token', dest='token', help='用于身份验证的token')
updatecache_require_group.add_argument('-c', '--component-name', dest='component_name', help='待更新缓存服务名称')
updatecache_require_group.add_argument('-w', '--w-loc', dest='w_loc', help='工作空间路径')
updatecache_require_group.add_argument('-m', '--map-name', dest='map_name', help='切图地图名称')
updatecache_require_group.add_argument('-o', '--original-point', dest='original_point',
help='切图原点,需以单引号开始和结束,如:\'-180,90\'')
updatecache_require_group.add_argument('-b', '--bounds', dest='cache_bounds',
help='缓存范围,需以单引号开始和结束,如:\'-180,-90,0,0\'')
updatecache_optional_group = updatecache_parser.add_argument_group('可选参数')
updatecache_optional_group.add_argument('-s', '--scale', dest='scale', help='缓存比例尺分母,如:8000000,4000000,2000000')
updatecache_optional_group.add_argument('--service-type', dest='w_servicetype', help='工作空间服务类型')
updatecache_optional_group.add_argument('--tile-size', dest='tile_size', help='切片大小')
updatecache_optional_group.add_argument('--tile-type', dest='tile_type', help='切片类型')
updatecache_optional_group.add_argument('--format', dest='format', help='切片输出格式')
updatecache_optional_group.add_argument('--epsgcode', dest='epsg_code', help='投影')
updatecache_optional_group.add_argument('--storageid', dest='storageid', help='存储id')
updatecache_optional_group.add_argument('-rw', dest='remote_workspace', action='store_true',
help='输入的工作空间地址是远程iServer所在服务器上的地址,不需要上传工作空间。')
updatecache_optional_group.add_argument('--quiet', dest='quiet', action='store_true', help='不需要确认,直接运行')
updatecache_optional_group.add_argument('--source-component', dest='source_component_name', help='缓存更新数据来源服务')
updatecache_optional_group.add_argument('--update', dest='update', action='store_true', help='更新服务缓存')
cache_workspace_parser = sub_parsers.add_parser('cacheworkspace') # type: argparse.ArgumentParser
cache_workspace_parser.set_defaults(func=cache_local_workspace)
cache_workspace_require_group = cache_workspace_parser.add_argument_group('必选参数')
cache_workspace_require_group.add_argument('-l', '--uri', dest='address',
help='服务地址,如:http://localhost:8090/iserver')
cache_workspace_require_group.add_argument('-u', '--user', dest='username', help='用户名', default=None)
cache_workspace_require_group.add_argument('-p', '--password', dest='password', help='密码', default=None)
cache_workspace_require_group.add_argument('-t', '--token', dest='token', help='用于身份验证的token')
cache_workspace_require_group.add_argument('-w', '--w-loc', dest='w_loc', help='工作空间路径')
cache_workspace_require_group.add_argument('-m', '--map-name', dest='map_name', help='切图地图名称')
cache_workspace_require_group.add_argument('-s', '--scale', dest='scale', help='缓存比例尺分母,如:8000000,4000000,2000000')
cache_workspace_require_group.add_argument('-o', '--original-point', dest='original_point',
help='切图原点,需以单引号开始和结束,如:\'-180,90\'')
cache_workspace_require_group.add_argument('-b', '--bounds', dest='cache_bounds',
help='缓存范围,需以单引号开始和结束,如:\'-180,-90,0,0\'')
cache_workspace_optional_group = cache_workspace_parser.add_argument_group('可选参数')
cache_workspace_optional_group.add_argument('--tile-size', dest='tile_size', help='切片大小')
cache_workspace_optional_group.add_argument('--tile-type', dest='tile_type', help='切片类型')
cache_workspace_optional_group.add_argument('--format', dest='format', help='切片输出格式')
cache_workspace_optional_group.add_argument('--epsgcode', dest='epsg_code', help='投影')
cache_workspace_optional_group.add_argument('--storageid', dest='storageid', help='存储的id')
cache_workspace_optional_group.add_argument('--output', dest='output', help='结果输出路径')
cache_workspace_optional_group.add_argument('--remote-workspace', dest='remote_workspace', action='store_true',
help='是否是远程工作空间路径')
cache_workspace_optional_group.add_argument('--quiet', dest='quiet', action='store_true', help='不需要确认,直接运行')
cache_workspace_optional_group.add_argument('--jobtilesourcetype', dest='job_tile_source_type',
choices=['SMTiles', 'MBTiles', 'UGCV5', 'GeoPackage'],
default='SMTiles',
help='存储类型,仅在输出到本地存储路径时生效,Mongo,OTS与FastDFS时不生效,Mongo,OTS与FastDFS应直接设置storageid')
cache_service_parser = sub_parsers.add_parser('cacheservice') # type: argparse.ArgumentParser
cache_service_parser.set_defaults(func=cache_remote_service)
cache_service_require_group = cache_service_parser.add_argument_group('必选参数')
cache_service_require_group.add_argument('-l', '--uri', dest='address', help='服务地址,如:http://localhost:8090/iserver')
cache_service_require_group.add_argument('-u', '--user', dest='username', help='用户名', default=None)
cache_service_require_group.add_argument('-p', '--password', dest='password', help='密码', default=None)
cache_service_require_group.add_argument('-t', '--token', dest='token', help='用于身份验证的token')
cache_service_require_group.add_argument('-c', '--component-name', dest='component_name', help='服务名称')
cache_service_require_group.add_argument('-m', '--map-name', dest='map_name', help='切图地图名称')
cache_service_require_group.add_argument('-o', '--original-point', dest='original_point',
help='切图原点,需以单引号开始和结束,如:\'-180,90\'')
cache_service_require_group.add_argument('-b', '--bounds', dest='cache_bounds',
help='缓存范围,需以单引号开始和结束,如:\'-180,-90,0,0\'')
cache_service_require_group.add_argument('-s', '--scale', dest='scale', help='缓存比例尺分母,如:8000000,4000000,2000000')
cache_service_optional_group = cache_service_parser.add_argument_group('可选参数')
cache_service_optional_group.add_argument('--tile-size', dest='tile_size', help='切片大小')
cache_service_optional_group.add_argument('--tile-type', dest='tile_type', help='切片类型')
cache_service_optional_group.add_argument('--format', dest='format', help='切片输出格式')
cache_service_optional_group.add_argument('--epsgcode', dest='epsg_code', help='投影')
cache_service_optional_group.add_argument('--storageid', dest='storageid', help='存储id')
cache_service_optional_group.add_argument('--output', dest='output', help='结果输出路径')
cache_service_optional_group.add_argument('--quiet', dest='quiet', action='store_true', help='不需要确认,直接运行')
cache_service_optional_group.add_argument('--jobtilesourcetype', dest='job_tile_source_type',
choices=['SMTiles', 'MBTiles', 'UGCV5', 'GeoPackage'], default='SMTiles',
help='存储类型,仅在输出到本地存储路径时生效,Mongo,OTS与FastDFS时不生效,Mongo,OTS与FastDFS应直接设置storageid')
return parser
def main(argv=sys.argv[1:]):
parser = get_parser()
try:
if not argv:
parser.print_usage()
parser.exit(1)
args = parser.parse_known_args(argv)[0]
args.func(args)
except SystemExit as err:
return err.code
return 0
if __name__ == '__main__':
main()
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
a=input()+'#'
l,b,c=-1,'',[]
for i,x in enumerate(zip(a,a[1:])):
if x[0]!=x[1]:
b+=x[0]
c+=[i-l]
l=i
y=len(b)
print([c[y//2]+1,0][y%2==0 or c[y//2]<2 or b!=b[::-1] or any(c[y//2-i]+c[y//2+i]<3 for i in range(1,y//2+1))]) |
# coding:utf-8
import requests, json, datetime
from account.models import Account
API_USER = 'shine_forever_test_zFZOBK' # input your apt_user
API_KEY = 'su9zZf98O0ooT5i8' # input your spi_key
url = "http://www.sendcloud.net/webapi/mail.send_template.json"
base_link = "http:127.0.0.1:8000/account/do_verificatin?"
one_day_in_second = 5184000
def send_email(name, email,token,authcode):
print "send_email......."
link = base_link + 'token=%s&authcode=%s' % (token, authcode)
sub_vars = {
'to': [email],
'sub': {
'%name%': [name],
'%url%': [link],
}
}
params = {
"api_user": API_USER,
"api_key": API_KEY,
"template_invoke_name": "test_template_active",
"substitution_vars": json.dumps(sub_vars),
"from": "service@sendcloud.im",
"fromname": "shiyanlou",
"subject": "Welcome to Shiyanlou",
"resp_email_id": "true",
}
r = requests.post(url, data=params)
print r.content
if r.status_code == 200 and json.loads(r.content)["message"] == "success":
return True
else:
return False
def verify_email(token, authcode):
print("verify_email..")
try:
account = Account.objects.get(token=token,authcode=authcode)
account.verification_status = 1
account.save()
return True
except:
return False
|
v = []
impares = []
for c in range(0, 10):
x = int(input("Digite o " + str(c+1) + "º número: "))
v.append(x)
for c in v:
if c % 2 == 1:
impares.append(c)
print("A média dos números ímpares é: ", sum(impares)/len(impares))
|
from django.contrib import admin
# Register your models here.
from notification.models import Notification
class NotificationAdmin(admin.ModelAdmin):
list_display = ('title', 'sub_title', 'type')
search_fields = ('type',)
list_filter = ('type',)
admin.site.register(Notification, NotificationAdmin)
|
import sys
import json
import gzip
from tabulate import tabulate
def friendly_size(n):
return str(n / 1000.)
def both_sizes(j):
bs = json.dumps(j).encode()
return (len(bs), len(gzip.compress(bs)))
with open(sys.argv[1], 'r') as f:
j = json.load(f)
tab = []
(a, b) = both_sizes(j)
total_bs = a
tab.append(("total", friendly_size(a), friendly_size(b), 1))
subs = []
for k in j.keys():
(a, b) = both_sizes(j[k])
subs.append((k, a, b))
subs = sorted(subs, key=lambda x: -x[1])
for (k, n, n2) in subs:
tab.append((k, friendly_size(n), friendly_size(n2), "{0:.2f}".format(n / float(total_bs))))
print(tabulate(tab, headers=("key", "kb", "gzip", "frac")))
|
import os
from juliabox.jbox_util import ensure_delete, make_sure_path_exists, unique_sessname, JBoxCfg
from juliabox.vol import JBoxVol
class JBoxDefaultConfigVol(JBoxVol):
provides = [JBoxVol.JBP_CONFIG]
FS_LOC = None
@staticmethod
def configure():
cfg_location = os.path.expanduser(JBoxCfg.get('cfg_location'))
make_sure_path_exists(cfg_location)
JBoxDefaultConfigVol.FS_LOC = cfg_location
@staticmethod
def _get_config_mounts_used(cid):
used = []
props = JBoxDefaultConfigVol.dckr().inspect_container(cid)
try:
for _cpath, hpath in JBoxVol.extract_mounts(props):
if hpath.startswith(JBoxDefaultConfigVol.FS_LOC):
used.append(hpath.split('/')[-1])
except:
JBoxDefaultConfigVol.log_error("error finding config mount points used in " + cid)
return []
return used
@staticmethod
def refresh_disk_use_status(container_id_list=None):
pass
@staticmethod
def get_disk_for_user(user_email):
JBoxDefaultConfigVol.log_debug("creating configs disk for %s", user_email)
if JBoxDefaultConfigVol.FS_LOC is None:
JBoxDefaultConfigVol.configure()
disk_path = os.path.join(JBoxDefaultConfigVol.FS_LOC, unique_sessname(user_email))
cfgvol = JBoxDefaultConfigVol(disk_path, user_email=user_email)
cfgvol._unpack_config()
return cfgvol
@staticmethod
def is_mount_path(fs_path):
return fs_path.startswith(JBoxDefaultConfigVol.FS_LOC)
@staticmethod
def get_disk_from_container(cid):
mounts_used = JBoxDefaultConfigVol._get_config_mounts_used(cid)
if len(mounts_used) == 0:
return None
mount_used = mounts_used[0]
disk_path = os.path.join(JBoxDefaultConfigVol.FS_LOC, str(mount_used))
container_name = JBoxVol.get_cname(cid)
sessname = container_name[1:]
return JBoxDefaultConfigVol(disk_path, sessname=sessname)
@staticmethod
def refresh_user_home_image():
pass
def release(self, backup=False):
ensure_delete(self.disk_path, include_itself=True)
@staticmethod
def disk_ids_used_pct():
return 0
def _unpack_config(self):
if os.path.exists(self.disk_path):
JBoxDefaultConfigVol.log_debug("Config folder exists %s. Deleting...", self.disk_path)
ensure_delete(self.disk_path, include_itself=True)
JBoxDefaultConfigVol.log_debug("Config folder deleted %s", self.disk_path)
JBoxDefaultConfigVol.log_debug("Will unpack config to %s", self.disk_path)
os.mkdir(self.disk_path)
JBoxDefaultConfigVol.log_debug("Created config folder %s", self.disk_path)
self.restore_user_home(True)
JBoxDefaultConfigVol.log_debug("Restored config files to %s", self.disk_path)
self.setup_instance_config()
JBoxDefaultConfigVol.log_debug("Setup instance config at %s", self.disk_path) |
from app import app
from domain.Project import Project
from domain.ProjectDao import ProjectDao
from persistent.ProjectDaoImpl import ProjectDaoImpl
import unittest
class AppTests(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def test_project(self):
result = self.app.get('/api/v1/project/7')
self.assertEquals(result.status_code,200)
|
import mysql.connector
import sys
from datetime import datetime
cnx = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="todo_app"
)
cursor = cnx.cursor()
ts = datetime.now()
print("Welcome to your todo app!")
username = input("May I know your name? ")
if username:
print(f"What shall we do, {username}?")
print("")
print("1. View Todos \n2. Create new Todo \n3. Delete a Todo \n4. Alter a Todo")
print("")
user_choice = input()
print("")
if user_choice == '1':
cursor.execute("SELECT * FROM todos")
result = cursor.fetchall()
print(f"This is all of your todos, {username}")
for row in result:
print(row[0], row[1], row[2])
if user_choice == '2':
todo = input("What do you have to do? ")
cursor.execute("INSERT INTO todos (todo, timestamp) VALUES (%s, %s)", (todo, ts))
cnx.commit()
print(cursor.rowcount, "record(s) added")
if user_choice == '3':
cursor.execute("SELECT * FROM todos")
result = cursor.fetchall()
for row in result:
print(row[0], row[1], row[2])
delete = input(f"What record would you like deleted, {username}? ")
cursor.execute("DELETE FROM todos WHERE id = %s",(delete,))
cnx.commit()
print(cursor.rowcount, "record(s) deleted")
if user_choice == '4':
cursor.execute("SELECT * FROM todos")
result = cursor.fetchall()
for row in result:
print(row[0], row[1], row[2])
number = input(f"What record should be changed, {username}? ")
data = input(f"What should it become, {username}? ")
cursor.execute("UPDATE todos SET todo = %s WHERE id = %s", (data, number))
cnx.commit()
print(cursor.rowcount, "record(s) affected")
else:
print("You didn't specify your name!")
sys.exit()
|
# Generated by Django 3.0.3 on 2020-03-20 02:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('applyapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='email'),
),
]
|
from collections import defaultdict, Counter, OrderedDict
from nltk import induce_pcfg, treetransforms
from nltk.corpus import ptb, treebank
from nltk.grammar import CFG, Nonterminal
from nltk.parse import ShiftReduceParser
from nltk.parse.viterbi import ViterbiParser
from torch.autograd import Variable
import nltk
import numpy as np
import time
import torch
import pickle
# http://www.surdeanu.info/mihai/teaching/ista555-fall13/readings/PennTreebankConstituents.html
PHRASE_TAGS = ['SBAR', 'PP', 'ADJP', 'QP', 'WHNP' , 'ADVP']
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def pickle_it(data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_pickle(filename):
with open(filename, 'rb') as f:
return(pickle.loads(f.read()))
def preprocess_nt(item):
"""gives the base parse tag for a single nonterminal in a CFG"""
return(Nonterminal(item.unicode_repr().split('-')[0].split('|')[0].split('+')[0].split('=')[0]))
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def idx2word(idx, i2w, pad_idx):
sent_str = [str()]*len(idx)
for i, sent in enumerate(idx):
for word_id in sent:
if word_id == pad_idx:
break
# call word_id.item() to do proper conversion into str
sent_str[i] += i2w[str(word_id.item())] + " "
sent_str[i] = sent_str[i].strip()
return(sent_str)
def interpolate(start, end, steps):
interpolation = np.zeros((start.shape[0], steps + 2))
for dim, (s,e) in enumerate(zip(start,end)):
interpolation[dim] = np.linspace(s,e,steps+2)
return interpolation.T
def expierment_name(args, ts):
exp_name = str()
exp_name += "BS=%i_"%args.batch_size
exp_name += "LR={}_".format(args.learning_rate)
exp_name += "EB=%i_"%args.embedding_size
exp_name += "%s_"%args.rnn_type.upper()
exp_name += "HS=%i_"%args.hidden_size
exp_name += "L=%i_"%args.num_layers
exp_name += "BI=%i_"%args.bidirectional
exp_name += "LS=%i_"%args.latent_size
exp_name += "WD={}_".format(args.word_dropout)
exp_name += "ANN=%s_"%args.anneal_function.upper()
exp_name += "K={}_".format(args.k)
exp_name += "X0=%i_"%args.x0
exp_name += "TS=%s"%ts
return exp_name
def get_parse(idx):
tree = ptb.parsed_sents()[idx]
tree.pprint()
def load_parser(filename):
with open(filename, 'rb') as f:
parser = pickle.load(f)
return(parser)
def find_parse_tag(tag):
pass
def generate_parse_tree(sentence):
pass
def evaluate_parse_quality(parse):
pass
def check_grammar(grammar, sentence):
grammar.check_coverage(sentence.split())
|
import numpy as np
import os
import pickle
import open3d as o3d
from collections import OrderedDict
from utils import image_utils
from utils.transformations import rotation_matrix
from action_relation.utils.open3d_utils import read_point_cloud, make_pcd
import typing
def convert_voxel_index_to_3d_index(xyz_arr, min_xyz, xyz_size, voxel_size,
validate=True):
idx = (xyz_arr - min_xyz) / voxel_size
idx_int = np.around(idx, decimals=1).astype(np.int32)
# idx_int = idx.astype(np.int32)
if validate:
for i in range(3):
if type(idx_int) is np.ndarray and len(idx_int.shape) > 1:
assert np.all(idx_int >= 0) and np.all(idx_int[:, i] <= xyz_size[i])
else:
assert idx_int[i] >= 0 and idx_int[i] <= xyz_size[i]
return idx_int
class SceneVoxels(object):
'''Create a single 3D representation for the entire scene. This is used
for training a precond classifier directly from scene representation.
What happens when the entire scene does not fit in the voxel space?
'''
def __init__(self, pcd_path_list, scene_type):
self.pcd_path_list = pcd_path_list
self.scene_type = scene_type
self.voxel_size: float = 0.01
self.min_xyz = np.array([-0.5, -0.5, -0.25])
self.max_xyz = np.array([0.5, 0.5, 0.5])
self.xyz_size = np.around(
(self.max_xyz-self.min_xyz)/self.voxel_size, decimals=1)
self.xyz_size = self.xyz_size.astype(np.int32)
self.full_3d = None
self.voxel_index_int = None
self.save_full_3d = True
pcd_list, pcd_points_arr_list = [], []
min_z_per_pcd_list = []
for pcd_path in pcd_path_list:
pcd = read_point_cloud(pcd_path)
pcd_list.append(pcd)
pcd_points_arr = np.asarray(pcd.points)
pcd_points_arr_list.append(pcd_points_arr)
[_, _, min_z] = pcd_points_arr.min(axis=0)
min_z_per_pcd_list.append(min_z)
min_z_per_pcd_list = sorted(min_z_per_pcd_list)
if scene_type == "data_in_line":
# Make sure that the objects are in a plane ?
assert min_z_per_pcd_list[1] - min_z_per_pcd_list[0] <= 0.01
elif scene_type == "cut_food":
pass
elif scene_type == "box_stacking":
pass
else:
raise ValueError(f"Invalid scene type {scene_type}")
new_world_origin = [0, 0, min_z]
x_axis, rot_angle = [1, 0, 0], np.deg2rad(180.0)
self.T = rotation_matrix(rot_angle, x_axis)
self.T[:3, 3] = new_world_origin
self.pcd_points_arr_list = []
for pcd in pcd_list:
pcd.transform(self.T)
pcd_arr = np.asarray(pcd.points)
self.pcd_points_arr_list.append(pcd_arr)
def init_voxel_index(self) -> bool:
# The
self.object_voxel_index_int_list = []
for pcd_points in self.pcd_points_arr_list:
pcd_idx = (pcd_points- self.min_xyz) / self.voxel_size
pcd_index_int = np.around(pcd_idx, decimals=1).astype(np.int32)
if np.any(pcd_index_int.max(axis=0) >= self.xyz_size):
print("==== ERROR: Object out of bounds (above max) ====")
return False
if np.any(pcd_index_int.min(axis=0) < 0):
print("==== ERROR: Object out of bounds (below min) ====")
return False
self.object_voxel_index_int_list.append(pcd_index_int)
if self.save_full_3d:
self.status_3d, self.full_3d = self.parse()
# Remove the other data to make up space
self.anchor_index_int = None
self.other_index_int = None
self.other_obj_voxels_idx = None
return True
def convert_voxel_index_to_3d_index(self, xyz_arr, validate=True):
return convert_voxel_index_to_3d_index(
xyz_arr, self.min_xyz, self.xyz_size, self.voxel_size)
def parse(self):
# if self.save_full_3d and self.objects_are_far_apart:
# return True, None
if self.save_full_3d and self.full_3d is not None:
return self.status_3d, self.full_3d
if self.full_3d is None:
full_3d = np.zeros([2] + self.xyz_size.tolist())
object_idx = 1
for object_index_int in self.object_voxel_index_int_list:
full_3d[0,
object_index_int[:, 0],
object_index_int[:, 1],
object_index_int[:, 2]] = 1
full_3d[1,
object_index_int[:, 0],
object_index_int[:, 1],
object_index_int[:, 2]] = object_idx
object_idx += 1
return True, full_3d
def convert_full3d_arr_to_open3d(self) -> dict:
status, full_3d = self.parse()
if full_3d is None:
return {}
ax_x, ax_y, ax_z = np.where(full_3d[0, ...] != 0)
ax = np.vstack([ax_x, ax_y, ax_z]).T
scene_pcd = make_pcd(ax, color=[1, 0, 0])
return {'scene': scene_pcd}
def visualize_full3d(self) -> None:
status, voxels_arr = self.parse()
if voxels_arr is None:
return
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y,z = voxels_arr[0, ...].nonzero()
ax.scatter(x, y, z)
# ax.set_title('', fontsize=14)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
class RobotAllPairVoxels(object):
def __init__(self, pcd_path_list):
self.pcd_path_list = pcd_path_list
self.voxel_size: float = 0.01
self.min_xyz = np.array([-0.25, -0.25, -0.25])
self.max_xyz = np.array([0.25, 0.25, 0.25])
self.xyz_size = np.around(
(self.max_xyz-self.min_xyz)/self.voxel_size, decimals=1)
self.xyz_size = self.xyz_size.astype(np.int32)
self.pcd_list = [
read_point_cloud(pcd_path) for pcd_path in pcd_path_list
]
self.pcd_list, self.pcd_points_arr_list = [], []
self.min_z_per_pcd_list = []
self.obj_center_per_pcd_list = []
for pcd_path in pcd_path_list:
pcd = read_point_cloud(pcd_path)
self.pcd_list.append(pcd)
pcd_points_arr = np.asarray(pcd.points)
self.pcd_points_arr_list.append(pcd_points_arr)
[_, _, min_z] = pcd_points_arr.min(axis=0)
[mean_x, mean_y, mean_z] = pcd_points_arr.mean(axis=0)
self.min_z_per_pcd_list.append(min_z)
self.obj_center_per_pcd_list.append([mean_x, mean_y, mean_z])
self.min_z_per_pcd_list = sorted(self.min_z_per_pcd_list)
# Make sure that the objects are in a plane ?
# assert self.min_z_per_pcd_list[1] - self.min_z_per_pcd_list[0] <= 0.01
self.robot_voxels_by_pcd_pair_dict = OrderedDict()
def init_voxels_for_pcd_pair(self, anchor_idx, other_idx):
robot_voxels = RobotVoxels(self.pcd_list[anchor_idx],
self.pcd_list[other_idx],
min_xyz=self.min_xyz,
max_xyz=self.max_xyz)
status = robot_voxels.init_voxel_index()
self.robot_voxels_by_pcd_pair_dict[(anchor_idx, other_idx)] = robot_voxels
anchor_center = np.array(self.obj_center_per_pcd_list[anchor_idx])
other_center = np.array(self.obj_center_per_pcd_list[other_idx])
dist = np.linalg.norm(anchor_center - other_center)
print(f"inter obj dist: {dist}")
return status, robot_voxels
def get_object_center_list(self):
return self.obj_center_per_pcd_list
class RobotVoxels(object):
def __init__(self,
anchor_pcd: o3d.geometry.PointCloud,
other_pcd: o3d.geometry.PointCloud,
min_xyz,
max_xyz,
has_object_in_between=False) -> None:
self.anchor_pcd = o3d.geometry.PointCloud(anchor_pcd)
self.other_pcd = o3d.geometry.PointCloud(other_pcd)
self.voxel_size: float = 0.01
self.has_object_in_between = has_object_in_between
self.objects_are_far_apart = False
# This should be similar to simulation or atleast the final data
# size should be similar to simulation.
# self.min_xyz = np.array([-0.65, -0.65, -0.5])
# self.max_xyz = np.array([0.65, 0.65, 0.5])
self.min_xyz = min_xyz
self.max_xyz = max_xyz
self.xyz_size = np.around(
(self.max_xyz-self.min_xyz)/self.voxel_size, decimals=1)
self.xyz_size = self.xyz_size.astype(np.int32)
self.full_3d = None
self.voxel_index_int = None
self.save_full_3d = True
# The original world coordinate system is X to right, Y ahead and Z down
# We want to move the origin to the base of the anchor object with X ahead
# Y to right and Z up.
anchor_points = np.asarray(self.anchor_pcd.points)
other_points = np.asarray(self.other_pcd.points)
[center_x, center_y, _] = anchor_points.mean(axis=0)
[_, _, min_z] = anchor_points.min(axis=0)
new_world_origin = [-center_x, -center_y, -min_z]
x_axis, rot_angle = [1, 0, 0], np.deg2rad(0)
self.T = rotation_matrix(rot_angle, x_axis)
self.T[:3, 3] = new_world_origin
self.anchor_pcd.transform(self.T)
self.other_pcd.transform(self.T)
self.T2 = rotation_matrix(np.deg2rad(180), x_axis)
self.anchor_pcd.transform(self.T2)
self.other_pcd.transform(self.T2)
self.transf_anchor_points = np.asarray(self.anchor_pcd.points)
self.transf_other_points = np.asarray(self.other_pcd.points)
def init_voxel_index(self) -> bool:
# The
anchor_idx = (self.transf_anchor_points - self.min_xyz) / self.voxel_size
self.anchor_index_int = np.around(anchor_idx, decimals=1).astype(np.int32)
other_idx = (self.transf_other_points - self.min_xyz) / self.voxel_size
self.other_index_int = np.around(other_idx, decimals=1).astype(np.int32)
if np.any(self.other_index_int.max(axis=0) >= self.xyz_size):
# The other object is too far.
self.objects_are_far_apart = True
# raise ValueError("other Object is out of boundary")
if np.any(self.other_index_int.min(axis=0) < 0):
# The other object is too far.
self.objects_are_far_apart = True
# raise ValueError("other Object is out of boundary")
# print("Objects are far apart: {}, have obstacle in between: {}".format(
# self.objects_are_far_apart, self.has_object_in_between
# ))
if self.objects_are_far_apart:
return True
if self.save_full_3d:
self.status_3d, self.full_3d = self.parse()
# Remove the other data to make up space
self.anchor_index_int = None
self.other_index_int = None
self.other_obj_voxels_idx = None
return True
def convert_voxel_index_to_3d_index(self, xyz_arr, validate=True):
return convert_voxel_index_to_3d_index(
xyz_arr, self.min_xyz, self.xyz_size, self.voxel_size)
def create_position_grid(self):
grid = np.meshgrid(np.arange(self.xyz_size[0]),
np.arange(self.xyz_size[1]),
np.arange(self.xyz_size[2]))
voxel_0_idx = self.convert_voxel_index_to_3d_index(np.zeros(3))
grid[0] = grid[0] - voxel_0_idx[0]
grid[1] = grid[1] - voxel_0_idx[1]
grid[2] = grid[2] - voxel_0_idx[2]
return np.stack(grid)
def get_all_zero_voxels(self):
'''Returns canonical voxles which are all 0's.'''
full_3d = np.zeros([3] + self.xyz_size.tolist())
return full_3d
def parse(self):
if self.save_full_3d and self.objects_are_far_apart:
return True, None
if self.save_full_3d and self.full_3d is not None:
return self.status_3d, self.full_3d
if self.full_3d is None:
full_3d = np.zeros([3] + self.xyz_size.tolist())
full_3d[0,
self.anchor_index_int[:, 0],
self.anchor_index_int[:, 1],
self.anchor_index_int[:, 2]] = 1
full_3d[0,
self.other_index_int[:, 0],
self.other_index_int[:, 1],
self.other_index_int[:, 2]] = 2
full_3d[1,
self.anchor_index_int[:, 0],
self.anchor_index_int[:, 1],
self.anchor_index_int[:, 2]] = 1
full_3d[2,
self.other_index_int[:, 0],
self.other_index_int[:, 1],
self.other_index_int[:, 2]] = 1
return True, full_3d
def convert_full3d_arr_to_open3d(self) -> dict:
status, full_3d = self.parse()
if full_3d is None:
return {}
ax_x, ax_y, ax_z = np.where(full_3d[1, ...] != 0)
ax = np.vstack([ax_x, ax_y, ax_z]).T
anchor_pcd = make_pcd(ax, color=[1, 0, 0])
ax_x, ax_y, ax_z = np.where(full_3d[2, ...] != 0)
ax = np.vstack([ax_x, ax_y, ax_z]).T
other_pcd = make_pcd(ax, color=[0, 0, 1])
return {'anchor': anchor_pcd, 'other': other_pcd}
def visualize_full3d(self) -> None:
status, voxels_arr = self.parse()
if voxels_arr is None:
return
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y,z = voxels_arr[0, ...].nonzero()
ax.scatter(x, y, z)
# ax.set_title('', fontsize=14)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
def create_robot_voxels_from_anchor_pcd_path(
anchor_pcd_path: str,
other_pcd_path: str,
has_object_in_between: bool) -> RobotVoxels:
anchor_pcd = read_point_cloud(anchor_pcd_path)
other_pcd = read_point_cloud(other_pcd_path)
return RobotVoxels(anchor_pcd, other_pcd, has_object_in_between)
|
last_letter = 'a'
word = input()
r = 0
for letter in word:
az_direction = abs(ord(letter) - ord(last_letter))
za_direction = 26 - az_direction
r += min(za_direction, az_direction)
last_letter = letter
print(r)
|
import sys
import time
import threading
import os
from mininet.net import Containernet
from mininet.link import TCLink
from mininet.node import RemoteController, Docker
from mininet.cli import CLI
from mininet.log import setLogLevel, info
PATH_TO_MN = os.path.abspath(__file__).split("mn")[0]
sys.path.append(PATH_TO_MN)
from mn.hosts.api.receive.ReceiveActions import LoggingReceiveAction
from mn.topo.Topos import EvalTetraTopo
from mn.cn_rest import start_rest
# originates from https://www.etsi.org/deliver/etsi_en/300300_300399/30039502/01.03.01_60/en_30039502v010301p.pdf
# TETRA speech codec
UDP_MESSAGE_SIZE_BYTES = 17
PACKETS_PER_SECOND = 33
PACKET_COUNT = PACKETS_PER_SECOND * 2
GROUP_IP = "224.2.3.4"
def printAndExecute(host, cmdString):
print("%s: %s" % (host.name, cmdString))
host.cmd(cmdString)
def startUDPClient(host, groupIP, msgsize, count, rate, sport=6666, dport=6666):
cmdString = 'java -classpath %smn/hosts/cli/UDP PeriodicUDP %s %s %s %s %s' % (PATH_TO_MN, host.IP(), groupIP, msgsize, count, rate)
printAndExecute(host, cmdString)
def startUDPServer(host, listenIP, hostIP):
cmdString = 'python %smn/hosts/cli/UDP/UDP_server.py \"%s\" \"%s\" \"%s\" -log &' % (PATH_TO_MN, listenIP, host.name, hostIP)
printAndExecute(host, cmdString)
def startARP(host, srcIP, srcMAC, dstIP, iface):
cmdString = 'python %smn/hosts/cli/ARP/ARP_client.py \"%s\" %s %s %s' % (PATH_TO_MN, srcIP, dstIP, srcMAC, iface)
printAndExecute(host, cmdString)
def startIP(host, srcIP, dstIP, ipProto, iface, srcMAC):
cmdString = 'python %smn/hosts/cli/RAW/IPClient.py %s %s %s %s %s' % (PATH_TO_MN, srcIP, dstIP, ipProto, iface, srcMAC)
printAndExecute(host, cmdString)
def main():
setLogLevel('info')
topo = EvalTetraTopo()
net = Containernet(controller=RemoteController, topo=topo, build=False, autoSetMacs=True, link=TCLink)
net.start()
print()
print("**Wiping log dir.")
for root, dirs, files in os.walk(LoggingReceiveAction.LOG_DIR):
for file in files:
os.remove(os.path.join(root, file))
print("**Starting containernet REST Server.")
thr = threading.Thread(target=start_rest, args=(net,)) # comma behind net is on purpose
thr.daemon = True
thr.start()
# wait for connection with controller
time.sleep(3)
hosts = net.hosts
# send arp from reqHost to every other host -> required by ONOS HostService to resolve hosts (i.e. map MAC<->IP)
reqHost = hosts[0]
for host in hosts:
if(host is not reqHost):
startARP(reqHost, reqHost.IP(), reqHost.MAC(), host.IP(), reqHost.intf())
CLI(net)
## set up UDP servers to join group
for host in hosts:
if host.name in ['tbs10host', 'tbs11host', 'tbs4host', 'tbs21host']:
startUDPServer(host, GROUP_IP, host.IP())
CLI(net)
## send data
startUDPClient(net.getNodeByName('tbs17host'), GROUP_IP, UDP_MESSAGE_SIZE_BYTES, count=PACKET_COUNT, rate=PACKETS_PER_SECOND)
CLI(net)
net.stop()
if __name__ == '__main__':
main()
|
from django.db import models
# Create your models here.
class Evento(models.Model):
nombreES = models.CharField(max_length=150, verbose_name = 'Nombre del evento en español')
nombreEN = models.CharField(max_length=150, verbose_name = 'Nombre del evento en inglés')
descripcionES = models.TextField(max_length=200, verbose_name = 'Descripción en español')
descripcionEN = models.TextField(max_length=200, verbose_name = 'Descripción en inglés')
fecha = models.DateField(verbose_name="Fecha")
activo = models.BooleanField(verbose_name = 'Activo', default=True)
|
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.regression import RandomForestRegressor
from models.utils import create_feature_column
def build_random_forest_regressor_model(observation_df, feature_columns):
# Create new column with all of the features
vector_observation_df = create_feature_column(
observation_df, feature_columns, ['features', 'duration_sec'])
train_df, test_df = vector_observation_df.randomSplit([0.7, 0.3])
lr = RandomForestRegressor(
featuresCol='features', labelCol='duration_sec')
rfr_model = lr.fit(train_df)
test_predictions = rfr_model.transform(test_df)
test_predictions.select("prediction", "duration_sec", "features").show(5)
evaluator = MulticlassClassificationEvaluator(
predictionCol='prediction', labelCol="duration_sec",
metricName="accuracy")
print("RMSE on test data = %g" % evaluator.evaluate(test_predictions))
# test_result = rfr_model.evaluate(test_df)
return rfr_model |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
import sys
import os
import pymysql.cursors
DETAIL_URL = "http://cq.122.gov.cn/m/viopub/getVioPubDetail"
headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
params = {"id":None}
connection = pymysql.connect(host="localhost",user="root",password="yananshimeinv",db="driver_accident",charset="utf8mb4",cursorclass=pymysql.cursors.DictCursor)
def main():
for i in range(2,790):
filename = "text{}.txt".format(i)
d2 = json.load(open(filename))
for j in range(20):
ids = (d2["data"]["list"]["content"][j]["id"])
gsjdsbh = d2["data"]["list"]["content"][j]["gsjdsbh"]
gsajmc = d2["data"]["list"]["content"][j]["gsajmc"] #案件名称
driver = d2["data"]["list"]["content"][j]["gsjsrxm"] #驾驶人姓名
gshpzl = d2["data"]["list"]["content"][j]["gshpzl"] #号牌种类
gshphm = d2["data"]["list"]["content"][j]["gshphm"] #车牌号码
gscfjg = d2["data"]["list"]["content"][j]["gscfjg"] #处罚结果
print("id:{}\n 决定书编号:{},\n 案件名称:{}\n 驾驶人姓名:{}\n 号牌种类:{}\n 车牌号码:{} \n 处罚结果:{}\n\n\n".format(ids,gsjdsbh,gsajmc,driver,gshpzl,gshphm,gscfjg))
params["id"] = ids
r = requests.post(DETAIL_URL,params=params,headers=headers)
print(r.status_code)
if r.status_code == requests.codes.ok:
Save_dir = "./page_detail{}".format(i)
detail_filename = "text_{}detail.txt"
if os.path.exists(Save_dir) is False:
os.makedirs(Save_dir)
detail_filename = Save_dir + "/" + detail_filename.format(j)
with open(detail_filename,"w") as f:
f.writelines(r.text)
detail_dict = json.load(open(detail_filename))
punish_truth = detail_dict["data"]["gscfss"] # 处罚事实
social_credit_code = detail_dict["data"]["gsshxydm"] # 社会信用号
date_time = detail_dict["data"]["gscfrq"] #时间
print("id:{}\n处罚事实:{}\n社会信用代码:{}\n时间:{}".format(ids,punish_truth,social_credit_code,date_time))
# write into database
try:
with connection.cursor() as cursor:
sql = "INSERT into `publicity`(`id`,`decide_number`,`case_name`,`driver_name`,`car_kind`,`car_number`,`punish_result`) values(%s, %s, %s, %s, %s, %s, %s)"
cursor.execute(sql,(ids,gsjdsbh,gsajmc.encode("utf-8"),driver.encode("utf-8"),gshpzl.encode("utf-8"),gshphm.encode("utf-8"),gscfjg.encode("utf-8")))
connection.commit()
except Exception as e:
print(e)
print("error in insert publicity")
try:
with connection.cursor() as cursor:
sql = "INSERT into `punish_detail`(`id`,`pulish_truth`,`social_credit_code`,`date_time`) values( %s, %s, %s, %s)"
cursor.execute(sql,(ids, punish_truth.encode("utf-8"),social_credit_code,date_time.encode("utf-8")))
connection.commit()
except Exception as e:
print(e)
print("error insert pulish_detail ")
connection.close()
if __name__ == "__main__":
main()
|
'''
File: group_grade_filler.py
Author: Adam Pah
Description:
Fills the group grades given the grade from one student to all group members
Input:
* Canvas grade assignment
* Group assignments
* The assignment integer
'''
#Standard path imports
from __future__ import division, print_function
import argparse
import glob
#Non-standard imports
import pandas as pd
import support
#Global directories and variables
def main(args):
#Read in the gradesheet
frow, gradedf = support.canvas_grade_sheet_reader(args.gradefile)
######
#Read in the group assignments
groupdf = pd.read_excel(args.groupfile, sheetname = 0)
groups = groupdf.Group.unique().tolist()
######
#Apply the grades from one student to all students
assign_grades = {'Student, Test': ''}
for group in groups:
groupnames = groupdf[groupdf.Group == group].Name.tolist()
#Get the group names, pull the assignment column grade max
group_grade = gradedf[gradedf.Student.isin(groupnames)][args.assignment_name].max()
#Record it
for x in groupnames:
assign_grades[x] = group_grade
#Map all grades back
gradedf[args.assignment_name] = gradedf.Student.apply(lambda name: assign_grades[name])
#Sub back in the first row, write it out
support.canvas_recombinator(args.gradefile, frow, gradedf)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('gradefile', help = 'The Canvas grade file')
parser.add_argument('groupfile', help = 'The Group assignments')
parser.add_argument('assignment_name', action = 'store', type = str,
help = 'The column name of the assignment in the Canvas grade file')
args = parser.parse_args()
main(args)
|
# print("Enter your name>", end="")
name = input("enter your name>")
age = int(input("Your age"))
# print("Hello ", name, ".", sep="")
print(f"Hello, {name}.")
if age < 10:
print("Hi")
elif 10<=age<30:
print("Hello")
else:
print("Good day") |
import sys
from ev3dev2.motor import MediumMotor, OUTPUT_A, SpeedPercent
MAX_TURN_ROTATION = 6
STEERING_SPEED = 100
current_turn = 0
steering_motor = MediumMotor(OUTPUT_A)
def turn_right():
steering_motor.on_for_rotations(speed=-STEERING_SPEED, rotations=1)
def turn_left():
steering_motor.on_for_rotations(speed=STEERING_SPEED, rotations=1)
def turn_to_angle(turn_angle):
global current_turn
if (turn_angle > MAX_TURN_ROTATION):
turn_angle = MAX_TURN_ROTATION
if (turn_angle < -MAX_TURN_ROTATION):
turn_angle = -MAX_TURN_ROTATION
turn = turn_angle - current_turn
steering_motor.on_for_rotations(STEERING_SPEED, turn)
current_turn = turn_angle
def center():
global current_turn
steering_motor.on_for_rotations(STEERING_SPEED, -current_turn, True, True)
current_turn = 0 |
from django import forms
from abudget.money.models import TransactionCategory
class CreateTransactionCategoryForm(forms.ModelForm):
class Meta:
model = TransactionCategory
fields = ('name', 'parent')
def __init__(self, budget=None, *args, **kwargs):
super(CreateTransactionCategoryForm, self).__init__(*args, **kwargs)
self.budget = budget
return
def save(self, *args, **kwargs):
self.instance.budget = self.budget
return super(CreateTransactionCategoryForm, self).save(*args, **kwargs)
|
# Generated by Django 3.0.3 on 2020-02-24 18:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contracts', '0004_contract_contract_amount'),
]
operations = [
migrations.AddField(
model_name='contract',
name='contract_currency',
field=models.CharField(choices=[('RUR', 'Russian Ruble'), ('EUR', 'Euro'), ('USD', 'USA Dollar')], default='RUR', max_length=3),
),
]
|
# coding=utf-8
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import os
# fitable for page jumping
class wait_for_page_load(object):
def __init__(self, browser):
self.browser = browser
def __enter__(self):
self.old_page = self.browser.find_element_by_tag_name('html')
def page_has_loaded(self):
new_page = self.browser.find_element_by_tag_name('html')
return new_page.id != self.old_page.id
def __exit__(self,*_):
wait_for_page_load(self.page_has_loaded)
# monitor wheather the element is loaded, just fitable in debug mode
def elementloaded(page, element):
isElemLoaded = False
while (isElemLoaded == False):
if(page.find_element_by_id(element)):
isElemLoaded = True
return isElemLoaded
else:
elementloaded(page, element)
def login():
s1Driver = webdriver.Chrome()
s1Driver.get('https://sgassportsdevque8x3ffel.cn1.hana.ondemand.com')
# create a username/password configure file
userName = ''
password = ''
userFile = raw_input('打开用户信息文件 -> ')
if os.path.isfile(userFile):
with open(userFile,'r') as user_info:
lines = user_info.readlines()
for line in lines:
if line.find('User Name:') != -1:
line = line.replace('\n','')
userNameColon = line.find(':')
userName = line[(userNameColon + 2):]
elif line.find('Password:') != -1:
passwordColon = line.find(':')
password = line[(passwordColon + 2):]
user_info.close()
# post userName and password to Login Page
loginUser = s1Driver.find_element_by_id('xs_username-inner')
loginUser.send_keys(userName)
loginPassword = s1Driver.find_element_by_id('xs_password-inner')
loginPassword.send_keys(password)
with wait_for_page_load(s1Driver):
s1Driver.find_element_by_id('logon_button').click()
if s1Driver:
return s1Driver
# use webdriverwait to do the explicity
def detectElement(driver, delay, element):
wait = WebDriverWait(driver, delay)
isLoaded = wait.until(EC.presence_of_element_located((By.ID, element)))
return isLoaded
class TestTraining(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestTraining, self).__init__(methodName)
self.s1 = login()
def test_gotoTraining(self):
if self.s1:
# from team to training, in the first page
team = detectElement(self.s1, 100, '__item0-__xmlview0--menu-0')
if (team):
team.click()
training = detectElement(self.s1, 10, '__item12-__xmlview5--sections-list-6-content')
if (training):
training.click()
# click add training button
addTraining = detectElement(self.s1, 30, '__button23-__toolbar2-0-inner')
if (addTraining):
addTraining.click()
time.sleep(10)
def exit(self):
self.s1.quit()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import csv
import sys, getopt
def main(argv):
values = []
inputFile = ''
try:
opts, args = getopt.getopt(argv, "hi:", ["ifile="])
except getopt.GetoptError:
print 'learningPlot.py -i <inputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
sys.exit()
if opt in ("-i", "--ifile"):
inputFile = arg
print inputFile
try:
valuesCsv = csv.reader(open(inputFile,'rb'),delimiter=',')
for row in valuesCsv:
valuesString = row
for i in range(0, len(valuesString)-1):
valuesString[i].strip()
values.append(float(valuesString[i]))
except IOError:
print "Input value file not found."
plt.plot(values)
plt.ylabel('score')
plt.xlabel('trial no.')
plt.show()
if __name__ == "__main__":
main(sys.argv[1:])
|
from qualia.nn.modules import Module, Linear
from qualia.nn.functions import relu
from qualia.optim import *
from qualia.util import ReplayMemory
import gym
class NeuralNet(Module):
def __init__(self, in_features, hidden, out_features):
super().__init__()
self.linear1 = Linear(in_features, hidden)
self.linear2 = Linear(hidden, hidden)
self.linear3 = Linear(hidden, out_features)
def forward(self, x):
x = relu(self.linear1(x))
x = relu(self.linear2(x))
x = self.linear3(x)
return x
class DDQN(object):
def __init__(self, num_states, num_actions):
self.num_actions = num_actions
self.main_q_network = Net(num_states, 32, num_actions)
self.target_q_network = Net(num_states, 32, num_actions)
self.memory = ReplayMemory(10000)
self.optim = Adam(self.main_q_network.params)
def __call__(self, , state, episode):
pass
|
# -*- coding: utf-8 -*-
from odoo import SUPERUSER_ID
from odoo.exceptions import AccessError
from odoo.tests import common, TransactionCase
class Feedback(TransactionCase):
def setUp(self):
super().setUp()
self.group0 = self.env['res.groups'].create({'name': "Group 0"})
self.group1 = self.env['res.groups'].create({'name': "Group 1"})
self.group2 = self.env['res.groups'].create({'name': "Group 2"})
self.user = self.env['res.users'].create({
'login': 'bob',
'name': "Bob Bobman",
'groups_id': [(6, 0, self.group2.ids)],
})
class TestSudo(Feedback):
""" Test the behavior of method sudo(). """
def test_sudo(self):
record = self.env['test_access_right.some_obj'].create({'val': 5})
user1 = self.user
partner_demo = self.env['res.partner'].create({
'name': 'Marc Demo',
})
user2 = self.env['res.users'].create({
'login': 'demo2',
'password': 'demo2',
'partner_id': partner_demo.id,
'groups_id': [(6, 0, [self.env.ref('base.group_user').id, self.env.ref('base.group_partner_manager').id])],
})
# with_user(user)
record1 = record.with_user(user1)
self.assertEqual(record1.env.uid, user1.id)
self.assertFalse(record1.env.su)
record2 = record1.with_user(user2)
self.assertEqual(record2.env.uid, user2.id)
self.assertFalse(record2.env.su)
# the superuser is always in superuser mode
record3 = record2.with_user(SUPERUSER_ID)
self.assertEqual(record3.env.uid, SUPERUSER_ID)
self.assertTrue(record3.env.su)
# sudo()
surecord1 = record1.sudo()
self.assertEqual(surecord1.env.uid, user1.id)
self.assertTrue(surecord1.env.su)
surecord2 = record2.sudo()
self.assertEqual(surecord2.env.uid, user2.id)
self.assertTrue(surecord2.env.su)
surecord3 = record3.sudo()
self.assertEqual(surecord3.env.uid, SUPERUSER_ID)
self.assertTrue(surecord3.env.su)
# sudo().sudo()
surecord1 = surecord1.sudo()
self.assertEqual(surecord1.env.uid, user1.id)
self.assertTrue(surecord1.env.su)
# sudo(False)
record1 = surecord1.sudo(False)
self.assertEqual(record1.env.uid, user1.id)
self.assertFalse(record1.env.su)
record2 = surecord2.sudo(False)
self.assertEqual(record2.env.uid, user2.id)
self.assertFalse(record2.env.su)
record3 = surecord3.sudo(False)
self.assertEqual(record3.env.uid, SUPERUSER_ID)
self.assertTrue(record3.env.su)
# sudo().with_user(user)
record2 = surecord1.with_user(user2)
self.assertEqual(record2.env.uid, user2.id)
self.assertFalse(record2.env.su)
class TestACLFeedback(Feedback):
""" Tests that proper feedback is returned on ir.model.access errors
"""
def setUp(self):
super().setUp()
ACL = self.env['ir.model.access']
m = self.env['ir.model'].search([('model', '=', 'test_access_right.some_obj')])
ACL.search([('model_id', '=', m.id)]).unlink()
ACL.create({
'name': "read",
'model_id': m.id,
'group_id': self.group1.id,
'perm_read': True,
})
ACL.create({
'name': "create-and-read",
'model_id': m.id,
'group_id': self.group0.id,
'perm_read': True,
'perm_create': True,
})
self.record = self.env['test_access_right.some_obj'].create({'val': 5})
# values are in cache, clear them up for the test
ACL.flush()
ACL.invalidate_cache()
def test_no_groups(self):
""" Operation is never allowed
"""
with self.assertRaises(AccessError) as ctx:
self.record.with_user(self.user).write({'val': 10})
self.assertEqual(
ctx.exception.args[0],
"""You are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
No group currently allows this operation.
Contact your administrator to request access if necessary."""
)
def test_one_group(self):
with self.assertRaises(AccessError) as ctx:
self.env(user=self.user)['test_access_right.some_obj'].create({
'val': 1
})
self.assertEqual(
ctx.exception.args[0],
"""You are not allowed to create 'Object For Test Access Right' (test_access_right.some_obj) records.
This operation is allowed for the following groups:\n\t- Group 0
Contact your administrator to request access if necessary."""
)
def test_two_groups(self):
r = self.record.with_user(self.user)
expected = """You are not allowed to access 'Object For Test Access Right' (test_access_right.some_obj) records.
This operation is allowed for the following groups:\n\t- Group 0\n\t- Group 1
Contact your administrator to request access if necessary."""
with self.assertRaises(AccessError) as ctx:
# noinspection PyStatementEffect
r.val
self.assertEqual(ctx.exception.args[0], expected)
with self.assertRaises(AccessError) as ctx:
r.read(['val'])
self.assertEqual(ctx.exception.args[0], expected)
class TestIRRuleFeedback(Feedback):
""" Tests that proper feedback is returned on ir.rule errors
"""
def setUp(self):
super().setUp()
self.model = self.env['ir.model'].search([('model', '=', 'test_access_right.some_obj')])
self.record = self.env['test_access_right.some_obj'].create({
'val': 0,
}).with_user(self.user)
def _make_rule(self, name, domain, global_=False, attr='write'):
res = self.env['ir.rule'].create({
'name': name,
'model_id': self.model.id,
'groups': [] if global_ else [(4, self.group2.id)],
'domain_force': domain,
'perm_read': False,
'perm_write': False,
'perm_create': False,
'perm_unlink': False,
'perm_' + attr: True,
})
return res
def test_local(self):
self._make_rule('rule 0', '[("val", "=", 42)]')
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Contact your administrator to request access if necessary.""")
# debug mode
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
p = self.env['test_access_right.parent'].create({'obj_id': self.record.id})
with self.assertRaisesRegex(
AccessError,
r"Implicitly accessed through 'Object for testing related access rights' \(test_access_right.parent\)\.",
):
p.with_user(self.user).write({'val': 1})
def test_locals(self):
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]')
self._make_rule('rule 1', '[("val", "=", 78)]')
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 1
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_globals_all(self):
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]', global_=True)
self._make_rule('rule 1', '[("val", "=", 78)]', global_=True)
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 1
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_globals_any(self):
""" Global rules are AND-eded together, so when an access fails it
might be just one of the rules, and we want an exact listing
"""
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]', global_=True)
self._make_rule('rule 1', '[(1, "=", 1)]', global_=True)
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_combination(self):
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]', global_=True)
self._make_rule('rule 1', '[(1, "=", 1)]', global_=True)
self._make_rule('rule 2', '[(0, "=", 1)]')
self._make_rule('rule 3', '[("val", "=", 55)]')
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 2
- rule 3
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_warn_company(self):
""" If one of the failing rules mentions company_id, add a note that
this might be a multi-company issue.
"""
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
self._make_rule('rule 0', "[('company_id', '=', user.company_id.id)]")
self._make_rule('rule 1', '[("val", "=", 0)]', global_=True)
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
Note: this might be a multi-company issue.
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_read(self):
""" because of prefetching, read() goes through a different codepath
to apply rules
"""
self.env.ref('base.group_no_one').write({'users': [(4, self.user.id)]})
self.env.ref('base.group_user').write({'users': [(4, self.user.id)]})
self._make_rule('rule 0', "[('company_id', '=', user.company_id.id)]", attr='read')
self._make_rule('rule 1', '[("val", "=", 1)]', global_=True, attr='read')
with self.assertRaises(AccessError) as ctx:
_ = self.record.val
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to access 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 1
Note: this might be a multi-company issue.
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
p = self.env['test_access_right.parent'].create({'obj_id': self.record.id})
p.flush()
p.invalidate_cache()
with self.assertRaisesRegex(
AccessError,
r"Implicitly accessed through 'Object for testing related access rights' \(test_access_right.parent\)\.",
):
p.with_user(self.user).val
class TestFieldGroupFeedback(Feedback):
def setUp(self):
super().setUp()
self.record = self.env['test_access_right.some_obj'].create({
'val': 0,
}).with_user(self.user)
def test_read(self):
self.env.ref('base.group_no_one').write(
{'users': [(4, self.user.id)]})
with self.assertRaises(AccessError) as ctx:
_ = self.record.forbidden
self.assertEqual(
ctx.exception.args[0],
"""The requested operation can not be completed due to security restrictions.
Document type: Object For Test Access Right (test_access_right.some_obj)
Operation: read
User: %s
Fields:
- forbidden (allowed for groups 'User types / Internal User', 'Test Group'; forbidden for groups 'Extra Rights / Technical Features', 'User types / Public')"""
% self.user.id
)
with self.assertRaises(AccessError) as ctx:
_ = self.record.forbidden3
self.assertEqual(
ctx.exception.args[0],
"""The requested operation can not be completed due to security restrictions.
Document type: Object For Test Access Right (test_access_right.some_obj)
Operation: read
User: %s
Fields:
- forbidden3 (always forbidden)""" % self.user.id
)
def test_write(self):
self.env.ref('base.group_no_one').write(
{'users': [(4, self.user.id)]})
with self.assertRaises(AccessError) as ctx:
self.record.write({'forbidden': 1, 'forbidden2': 2})
self.assertEqual(
ctx.exception.args[0],
"""The requested operation can not be completed due to security restrictions.
Document type: Object For Test Access Right (test_access_right.some_obj)
Operation: write
User: %s
Fields:
- forbidden (allowed for groups 'User types / Internal User', 'Test Group'; forbidden for groups 'Extra Rights / Technical Features', 'User types / Public')
- forbidden2 (allowed for groups 'Test Group')"""
% self.user.id
)
|
import unittest
import Solution
class SolveTestCase(unittest.TestCase):
def testConvertInt(self):
x = "64630 11735 14216 99233 14470 4978 73429 38120 51135 67060"
actual = Solution.convertInt(x)
excepted = [64630, 11735, 14216, 99233,
14470, 4978, 73429, 38120, 51135, 67060]
self.assertEqual(actual, excepted)
def testMeanCaseI(self):
N = 10
x = "64630 11735 14216 99233 14470 4978 73429 38120 51135 67060"
x = Solution.convertInt(x)
actual = Solution.mean(N, x)
excepted = 43900.6
self.assertEqual(actual, excepted)
def testMeanCaseII(self):
N = 20
x = "6392 51608 71247 14271 48327 50618 67435 47029 61857 22987 64858 99745 75504 85464 60482 30320 11342 48808 66882 40522"
x = Solution.convertInt(x)
actual = Solution.mean(N, x)
excepted = 51284.9
self.assertEqual(actual, excepted)
def testFindPosCaseI(self):
N = 10
actual = Solution.findPosMed(N)
excepted = 5
self.assertEqual(actual, excepted)
def testFindPosCaseII(self):
N = 9
actual = Solution.findPosMed(N)
excepted = 5
self.assertEqual(actual, excepted)
def testMedianCaseI(self):
N = 10
x = "64630 11735 14216 99233 14470 4978 73429 38120 51135 67060"
x = Solution.convertInt(x)
actual = Solution.median(N, x)
excepted = 44627.5
self.assertEqual(actual, excepted)
def testMedianCaseII(self):
N = 9
x = "64630 11735 14216 99233 14470 4978 73429 38120 51135"
x = Solution.convertInt(x)
actual = Solution.median(N, x)
excepted = 38120
self.assertEqual(actual, excepted)
def testMedianCaseIII(self):
N = 20
x = "6392 51608 71247 14271 48327 50618 67435 47029 61857 22987 64858 99745 75504 85464 60482 30320 11342 48808 66882 40522"
x = Solution.convertInt(x)
actual = Solution.median(N, x)
excepted = 51113.0
self.assertEqual(actual, excepted)
def testModeCaseI(self):
x = "64630 11735 14216 99233 14470 4978 73429 38120 51135 67060"
x = Solution.convertInt(x)
actual = Solution.mode(x)
excepted = 4978
self.assertEqual(actual, excepted)
def testModeCaseII(self):
N = 20
x = "6392 51608 71247 14271 48327 50618 67435 47029 61857 22987 64858 99745 75504 85464 60482 30320 11342 48808 66882 40522"
x = Solution.convertInt(x)
actual = Solution.mode(x)
excepted = 6392
self.assertEqual(actual, excepted)
|
from macropy.core.macros import *
from macropy.core import *
macros = Macros()
def u(tree):
"""Stub to make the IDE happy"""
def name(tree):
"""Stub to make the IDE happy"""
@Walker
def _unquote_search(tree, **kw):
if isinstance(tree, BinOp) and type(tree.left) is Name and type(tree.op) is Mod:
if 'u' == tree.left.id:
return Literal(Call(Name(id="ast_repr"), [tree.right], [], None, None))
elif 'name' == tree.left.id:
return Literal(Call(Name(id="Name"), [], [keyword("id", tree.right)], None, None))
elif 'ast' == tree.left.id:
return Literal(tree.right)
elif 'ast_list' == tree.left.id:
return Literal(Call(Name(id="List"), [], [keyword("elts", tree.right)], None, None))
@macros.expr()
def q(tree, **kw):
tree = _unquote_search.recurse(tree)
return ast_repr(tree)
@macros.block()
def q(tree, target, **kw):
body = _unquote_search.recurse(tree)
return [Assign([Name(id=target.id)], ast_repr(body))]
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2020 James
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import TYPE_CHECKING, List, Optional
from .abc import SteamID
from .channel import GroupChannel
from .role import Role
if TYPE_CHECKING:
from .protobufs.steammessages_chat import CChatRoomGetChatRoomGroupSummaryResponse as GroupProto
from .state import ConnectionState
from .user import User
__all__ = ("Group",)
class Group(SteamID):
"""Represents a Steam group.
Attributes
----------
name: Optional[:class:`str`]
The name of the group, could be ``None``.
owner: :class:`~steam.abc.BaseUser`
The owner of the group.
top_members: List[:class:`~steam.abc.BaseUser`]
A list of the group's top members.
active_member_count: :class:`int`
The group's active member count.
roles: List[:class:`~steam.Role`]
A list of the group's roles.
default_role: :class:`~steam.Role`
The group's default role.
default_channel: :class:`~steam.GroupChannel`
The group's default channel.
channels: List[:class:`~steam.GroupChannel`]
A list of the group's channels.
"""
__slots__ = (
"owner",
"top_members",
"name",
"active_member_count",
"roles",
"default_role",
"default_channel",
"channels",
"_state",
)
def __init__(self, state: "ConnectionState", proto: "GroupProto"):
super().__init__(proto.chat_group_id, type="Chat")
self._state = state
self._from_proto(proto)
async def __ainit__(self):
self.owner = await self._state.client.fetch_user(self.owner)
self.top_members = await self._state.client.fetch_users(*self.top_members)
def _from_proto(self, proto: "GroupProto"):
self.owner: "User" = proto.accountid_owner
self.name: Optional[str] = proto.chat_group_name or None
self.active_member_count = proto.active_member_count
self.top_members: List["User"] = proto.top_members
self.roles: List[Role] = []
self.default_role: Optional[Role]
for role in proto.role_actions:
self.roles.append(Role(self._state, self, role))
default_role = [r for r in self.roles if r.id == int(proto.default_role_id)]
if default_role:
self.default_role = default_role[0]
else:
self.default_role = None
self.channels: List[GroupChannel] = []
default_channel: GroupChannel
for channel in proto.chat_rooms:
channel = GroupChannel(state=self._state, group=self, channel=channel)
self.channels.append(channel)
self.default_channel = [c for c in self.channels if c.id == int(proto.default_chat_id)][0]
def __repr__(self):
attrs = ("name", "id", "owner")
resolved = [f"{attr}={getattr(self, attr)!r}" for attr in attrs]
return f"<Group {' '.join(resolved)}>"
def __str__(self):
return self.name or ""
async def leave(self) -> None:
"""|coro|
Leaves the :class:`Group`.
"""
await self._state.leave_chat(self.id)
async def invite(self, user: "User"):
"""|coro|
Invites a :class:`~steam.User` to the :class:`Group`.
Parameters
-----------
user: :class:`~steam.User`
The user to invite to the group.
"""
await self._state.invite_user_to_group(user.id64, self.id)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayIserviceCognitiveClassificationObjectQueryModel(object):
def __init__(self):
self._biz_code = None
self._city_code = None
self._cognition_content = None
self._cognition_type = None
self._group_id = None
self._latitude = None
self._longitude = None
self._service_code = None
self._test_query = None
self._user_id = None
@property
def biz_code(self):
return self._biz_code
@biz_code.setter
def biz_code(self, value):
self._biz_code = value
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def cognition_content(self):
return self._cognition_content
@cognition_content.setter
def cognition_content(self, value):
self._cognition_content = value
@property
def cognition_type(self):
return self._cognition_type
@cognition_type.setter
def cognition_type(self, value):
self._cognition_type = value
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def service_code(self):
return self._service_code
@service_code.setter
def service_code(self, value):
self._service_code = value
@property
def test_query(self):
return self._test_query
@test_query.setter
def test_query(self, value):
self._test_query = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_code:
if hasattr(self.biz_code, 'to_alipay_dict'):
params['biz_code'] = self.biz_code.to_alipay_dict()
else:
params['biz_code'] = self.biz_code
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.cognition_content:
if hasattr(self.cognition_content, 'to_alipay_dict'):
params['cognition_content'] = self.cognition_content.to_alipay_dict()
else:
params['cognition_content'] = self.cognition_content
if self.cognition_type:
if hasattr(self.cognition_type, 'to_alipay_dict'):
params['cognition_type'] = self.cognition_type.to_alipay_dict()
else:
params['cognition_type'] = self.cognition_type
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.service_code:
if hasattr(self.service_code, 'to_alipay_dict'):
params['service_code'] = self.service_code.to_alipay_dict()
else:
params['service_code'] = self.service_code
if self.test_query:
if hasattr(self.test_query, 'to_alipay_dict'):
params['test_query'] = self.test_query.to_alipay_dict()
else:
params['test_query'] = self.test_query
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayIserviceCognitiveClassificationObjectQueryModel()
if 'biz_code' in d:
o.biz_code = d['biz_code']
if 'city_code' in d:
o.city_code = d['city_code']
if 'cognition_content' in d:
o.cognition_content = d['cognition_content']
if 'cognition_type' in d:
o.cognition_type = d['cognition_type']
if 'group_id' in d:
o.group_id = d['group_id']
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'service_code' in d:
o.service_code = d['service_code']
if 'test_query' in d:
o.test_query = d['test_query']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
# brain data
import os
from joblib import Parallel, delayed
import pandas as pd
import pickle
from time import time
import numpy as np
from smtr import STL, Dirty, MLL, utils, AdaSTL, MTW, ReMTW
from build_data import build_coefs, build_dataset
from smtr.model_selection import (best_score_dirty,
best_score_stl,
best_score_mll,
best_score_mtw)
dataset = "ds117"
resolution = 4
gpu = True
n_splits = 2
cv_size_dirty = 12
mtgl_only = False
cv_size_lasso = 30
cv_size_mtw = 10
cv_size_mll = 30
compute_ot = True
tol_ot = 0.1
positive = False
spacing = "ico%s" % resolution
subject = 'fsaverage%d' % resolution
suffix = "_ffg"
savedir_name = "ico%d_%s%s" % (resolution, dataset, suffix)
if os.path.exists("/home/parietal/"):
if gpu:
try:
import cupy as cp
except ImportError:
gpu = False
server = True
results_path = "/home/parietal/hjanati/csvs/%s/" % dataset
data_path = "/home/parietal/hjanati/data/"
plot = False
else:
server = False
gpu = False
data_path = "~/Dropbox/neuro_transport/code/mtw_experiments/meg/"
data_path = os.path.expanduser(data_path)
results_path = data_path + "results/%s/" % dataset
metric_fname = data_path + "%s/metrics/metric_fsaverage_%s_lh.npy" %\
(dataset, spacing)
M = np.load(metric_fname)
M_emd = np.ascontiguousarray(M.copy() * 100) # Metric M in cm
n_features = len(M)
seed = 42
n_samples = 204
epsilon = 10. / n_features
epsilon_met = 0.
gamma = 1.
dirty = Dirty(positive=positive)
mll = MLL(positive=positive, tol=1e-3)
stl = STL(positive=positive)
adastl = AdaSTL(positive=positive)
sigma0 = 0.01
rw_steps = 100
rw_tol = 1e-2
mwe = MTW(M=M, epsilon=epsilon, gamma=gamma, sigma0=sigma0,
stable=False, tol_ot=1e-4, maxiter_ot=30, tol=1e-4,
maxiter=4000, positive=positive, cython=True, gpu=True,
n_jobs=1
)
mtw = MTW(M=M, epsilon=epsilon, gamma=gamma, sigma0=0.,
stable=False, tol_ot=1e-4, maxiter_ot=30, tol=1e-4,
maxiter=4000, positive=positive, cython=True, gpu=True,
n_jobs=1
)
remtw = MTW(M=M, epsilon=epsilon, gamma=gamma, sigma0=0.,
stable=False, tol_ot=1e-4, maxiter_ot=30, tol=1e-4,
maxiter=4000, positive=positive, cython=True, gpu=True,
n_jobs=1, reweighting_steps=rw_steps, reweighting_tol=rw_tol)
remwe = MTW(M=M, epsilon=epsilon, gamma=gamma, sigma0=sigma0,
stable=False, tol_ot=1e-4, maxiter_ot=30, tol=1e-4,
maxiter=4000, positive=positive, cython=True, gpu=True,
n_jobs=4, reweighting_steps=rw_steps, reweighting_tol=rw_tol,
ws_size=100)
models = [
(stl, 'Lasso', dict(cv_size=cv_size_lasso, eps=2e-2, warmstart=True),
best_score_stl),
# (adastl, 'Re-Lasso', dict(cv_size=cv_size_lasso, eps=2e-2,
# warmstart=False),
# best_score_stl),
# (mll, 'MLL', dict(cv_size=cv_size_mll, eps=0.01, warmstart=False),
# best_score_mll),
# (dirty, 'Dirty', dict(cv_size=cv_size_dirty, mtgl_only=mtgl_only,
# eps=2e-2, do_mtgl=False, warmstart=True),
# best_score_dirty),
# (dirty, 'GroupLasso', dict(cv_size=50, mtgl_only=True, eps=1e-2,
# do_mtgl=True, warmstart=True),
# best_score_dirty),
# (mtw, 'MTW', dict(cv_size=cv_size_mtw, eps=0.1, warmstart=True,
# alphas=np.array([0., 10., 15., 20., 30., 50.]),
# betas=[0.05, 0.1, 0.15, 0.2, 0.3, 0.4]), best_score_mtw),
# (remtw, 'Re-MTW', dict(cv_size=cv_size_mtw, eps=0.1, warmstart=False,
# alphas=np.array([0., 5., 10., 15., 20., 30., 50., 70.]),
# betas=[.05, 0.075, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3]),
# (mwe, 'MWE', dict(cv_size=cv_size_mtw, eps=0.1, warmstart=True,
# alphas=np.array([0., 5., 10., 15., 20., 30., 50., 70.]),
# betas=[.05, 0.075, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3]),
# best_score_mtw),
# (remwe, 'Re-MWE', dict(cv_size=cv_size_mtw, eps=0.1, warmstart=False,
# alphas=np.array([0., 5., 10., 15., 20., 30.]),
# betas=[.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4]),
# best_score_mtw),
]
savedir = results_path + "%s/" % savedir_name
coefsdir = results_path + "%s/coefs/" % savedir_name
cvpathdir = results_path + "%s/cvpath/" % savedir_name
if not os.path.exists(savedir):
os.makedirs(savedir)
if not os.path.exists(coefsdir):
os.makedirs(coefsdir)
if not os.path.exists(cvpathdir):
os.makedirs(cvpathdir)
def one_run(seed, n_tasks, overlap, n_sources, same_design,
power, gamma, labels_type, std, depth, device, dataset=dataset,
):
assert os.path.exists(savedir)
M_ = M.copy() ** power
M_ /= np.median(M_)
M_ = - M_ / epsilon
coefs = build_coefs(n_tasks=n_tasks, overlap=overlap,
n_sources=n_sources, seed=seed,
positive=positive, labels_type=labels_type,
dataset=dataset, spacing=spacing)
assert abs(coefs).max(axis=0).all()
ot_params = {"M": M_emd, "epsilon": epsilon_met, "compute_ot": compute_ot}
X, y = build_dataset(coefs, std=std, same_design=same_design,
seed=seed, randomize_subjects=False, dataset=dataset,
spacing=spacing)
n_samples = X.shape[1]
Xs = X.reshape(n_tasks, n_samples, -1)
Ys = y.reshape(n_tasks, n_samples)
norms = np.linalg.norm(Xs, axis=1) ** depth
scaling = norms.T * 1e4
X_scaled = Xs / norms[:, None, :]
auc, ot, mse = dict(), dict(), dict()
aucabs, otabs = dict(), dict()
coefs_dict = dict(truth=coefs, scaling=scaling)
cvpath_dict = dict()
t0 = time()
for model, name, cv_params, best_score_model in models:
print("Doing %s ..." % name)
if isinstance(model, MTW):
model.gamma = gamma
model.M = M_
try:
cp.cuda.Device(device).use()
except:
pass
t = time()
bscores, scores, bc, bp, _, ac = \
best_score_model(model, X_scaled, Ys, coefs,
scaling_vector=scaling, **cv_params, **ot_params)
print(bp)
cvpath_dict[name.lower()] = ac
coefs_dict[name.lower()] = bc
coefs_pred = bc['auc']
model.coefs_ = coefs_pred.copy()
auc[name.lower()] = bscores['auc']
ot[name.lower()] = - bscores['ot'] / n_sources
mse[name.lower()] = - bscores['mse']
aucabs[name.lower()] = bscores['aucabs']
otabs[name.lower()] = - bscores['otabs'] / n_sources
t = time() - t
print("Time %s : %f, n_tasks = %d" % (name, t, n_tasks))
if isinstance(model, MTW):
print("Best for %s" % name, bp)
x_auc, x_ot, x_mse, names = [], [], [], []
x_aucabs, x_otabs = [], []
for name, v in auc.items():
names.append(name)
x_auc.append(v)
x_ot.append(ot[name])
x_mse.append(mse[name])
x_aucabs.append(aucabs[name])
x_otabs.append(otabs[name])
t0 = time() - t0
data = pd.DataFrame(x_auc, columns=["auc"])
data["ot"] = x_ot
data["mse"] = x_mse
data["aucabs"] = x_aucabs
data["otabs"] = x_otabs
data["model"] = names
data["computation_time"] = t0
if isinstance(model, MTW):
data["t_ot"] = model.t_ot
data["t_cd"] = model.t_cd
data["alpha_auc"] = bp["auc"]["alpha"] * n_samples
data["beta_auc"] = bp["auc"]["beta"] / model.betamax
data["alpha_ot"] = bp["ot"]["alpha"] * n_samples
data["beta_ot"] = bp["ot"]["beta"] / model.betamax
data["conco"] = model.sigma0 > 0
data["steps"] = rw_steps
coefs_dict["scores"] = scores
t = int(1e5 * time())
coefs_fname = coefsdir + "coefs_%s_%s.pkl" % (name.lower(), t)
cvpath_fname = cvpathdir + "cvpath_%s_%s.pkl" % (name.lower(), t)
settings = [("subject", subject), ("n_tasks", n_tasks),
("overlap", overlap), ("std", std), ("seed", seed),
("epsilon", epsilon * n_features), ("gamma", gamma),
("cv_size_mtw", cv_size_mtw), ("cv_size_stl", cv_size_lasso),
("cv_size_dirty", cv_size_dirty), ("same_design", same_design),
("n_features", coefs.shape[0]), ("n_samples", n_samples),
("power", power), ("n_sources", n_sources),
("label_type", labels_type), ("coefspath", coefs_fname),
("save_time", t), ("cvpath", cvpath_fname), ("depth", depth),
]
coefs_dict["settings"] = dict(settings)
for var_name, var_value in settings:
data[var_name] = var_value
# with open(coefs_fname, "wb") as ff:
# pickle.dump(coefs_dict, ff)
# with open(cvpath_fname, "wb") as ff:
# pickle.dump(cvpath_dict, ff)
print("One worker out: \n", data)
data_name = "results_%d" % t + ".csv"
data.to_csv(savedir + data_name)
return 0.
def wrapper(seed, n_tasks, overlap, n_sources, same_design,
power, gamma, labels_type, std, depth, device):
x = one_run(seed, n_tasks, overlap, n_sources, same_design,
power, gamma, labels_type, std, depth, device)
try:
utils.free_gpu_memory(cp)
except:
pass
return x
if __name__ == "__main__":
def trial_in_dataset(s, k, d):
name = "results/data/%s.csv" % savedir_name
if os.path.exists(name):
df = pd.read_csv(name, index_col=0)
query = (df.seed == s) & (df.n_tasks == k) & (df.same_design == d)
query = query & (df.model == models[0][1].lower())
else:
return 0
return len(df[query])
t0 = time()
seed = 42
rnd = np.random.RandomState(seed)
n_repeats = 10
seeds = rnd.randint(100000000, size=n_repeats)
start = 0
end = 10
seeds = seeds[start:end]
overlaps = [50]
n_tasks = [4]
# n_tasks = [4]
n_sources = [5]
same_design = [False]
powers = [1.]
gammas = [1.]
types = ["any"]
noise = [0.25]
depths = [0.5, 0.7, 0.8, 0.9, 0.95, 1.]
seeds_points = [[s, k, o, n, d, p, ga, lt, std, dep]
for n in n_sources for d in same_design
for o in overlaps for lt in types
for p in powers for ga in gammas for s in seeds
for k in n_tasks for std in noise for dep in depths
if not trial_in_dataset(s, k, d)]
for i, sp in enumerate(seeds_points):
device = i % 4
sp.append(device)
parallel = Parallel(n_jobs=30, backend="multiprocessing")
# parallel = Parallel(n_jobs=1)
iterator = (delayed(wrapper)(s, k, o, n, d, p, ga, lt, std, dep, dev)
for s, k, o, n, d, p, ga, lt, std, dep, dev in seeds_points)
out = parallel(iterator)
print('================================' +
'FULL TIME = %d' % (time() - t0))
|
from MAST.structopt.tools import get_best
from MAST.structopt.switches import selection_switch
from MAST.structopt.switches import lambdacommamu
from MAST.structopt.tools import remove_duplicates
import logging
import random
import pdb
def predator_switch(pop,Optimizer):
"""Function for removing individuals from the population"""
#logger = initialize_logger(Optimizer.loggername)
logger = logging.getLogger(Optimizer.loggername)
scheme = Optimizer.predator
logger.info('Applying predator to population with initial size = {0}'.format(len(pop)))
STR = 'PREDATOR\n'
try:
exec "from MAST.structopt.predator.{0} import {0}".format(scheme)
pop, STR = eval('{0}(pop, Optimizer)'.format(scheme))
passflag = True
except NameError, e:
logger.warning('Specified predator not one of the available options. Please check documentation and spelling! Predator : {0}. {1}'.format(scheme,e), exc_info=True)
passflag = False
STR+='Specified predator not one of the available options. Please check documentation and spelling! Predator : '+repr(scheme)
STR+=repr(e)+'\n'
except Exception, e:
logger.error('ERROR: Issue in Predator Scheme. Predator = {0}. {1}'.format(scheme,e), exc_info=True)
print 'ERROR: Issue in Predator Scheme. Predator = '+repr(scheme)
print e
passflag = False
STR+=''
if not passflag:
logger.warning('Issue in predator. Attempting basic Fitpred')
fitlist = [one.fitness for one in pop]
nfitlist, nindices = remove_duplicates(fitlist, Optimizer.demin)
STR += 'Issue in predator. Attempting basic Fitpred\n'
newpop = []
if len(nfitlist) != len(fitlist):
STR+='Predator: Removed total of '+repr(len(fitlist)-len(nfitlist))+' from population\n'
otherlist = []
for i in range(len(pop)):
if i not in nindices:
STR+='Predator: Removed '+repr(pop[i].history_index)+'\n'
otherlist.append(pop[i])
else:
newpop.append(pop[i])
while len(newpop) < Optimizer.nindiv:
STR+='Predator: Adding duplicates back\n'
choice = random.choice(otherlist)
if choice.index not in nindices:
newpop.append(choice)
nindices.append(choice.index)
nindices.sort()
if Optimizer.natural_selection_scheme=='fussf':
for ind in newpop:
if ind.fingerprint == 0:
ind.fingerprint = get_fingerprint(Optimizer,ind,Optimizer.fpbin,Optimizer.fpcutoff)
if 'lambda,mu' in Optimizer.algorithm_type:
try:
mark = [ index for index,n in enumerate(nindices) if n > Optimizer.nindiv-1][0]
except:
mark = Optimizer.nindiv
Optimizer.mark = mark
pop,str = lambdacommamu.lambdacommamu(newpop, Optimizer)
STR+=str
else:
pop = selection_switch(newpop, Optimizer.nindiv, Optimizer.natural_selection_scheme, Optimizer)
pop = get_best(pop,len(pop))
Optimizer.output.write(STR)
return pop
|
# -*- coding: utf-8 -*-
def break_words(stuff):
"""Wyodrębnienie słów według zadanego separatora"""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sortowanie słów"""
return sorted(words)
def print_first_word(words):
"""Zdjęcie (pop) pierwszego słowa i wypisanie go"""
word = words.pop(0)
return word
def print_last_word(words):
"""Zdjęcie (pop) ostatniego słowa i wypisanie go"""
word = words.pop(-1)
return word
def sort_sentence(sentence):
"""Sortuje wyrazy w zdaniu"""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Wypisuje pierwsze i ostatnio słowo w zdaniu"""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Wypisuje pierwsze i ostatnie słowo z posortowanych słów zdania"""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
# zwraca pierwsze i ostatnie słowo
def pfl_ret(sentence):
words = break_words(sentence)
return print_first_word(words), print_last_word(words)
# zwraca pierwsze i ostatnie słowo z posortowanego zdania
def pfls_ret(sentence):
words = sort_sentence(sentence)
return print_first_word(words), print_last_word(words)
|
try:
t= int(input())
while t>0:
t -= 1
n = int(input())
# i = 1
# while i<n:
# i += 1
# n -= 1
print(n//2+1)
except:
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ItemPrizeInfo(object):
def __init__(self):
self._item_can_exchange = None
self._item_code = None
self._item_icon_url = None
self._item_name = None
self._item_price = None
self._point_amount = None
@property
def item_can_exchange(self):
return self._item_can_exchange
@item_can_exchange.setter
def item_can_exchange(self, value):
self._item_can_exchange = value
@property
def item_code(self):
return self._item_code
@item_code.setter
def item_code(self, value):
self._item_code = value
@property
def item_icon_url(self):
return self._item_icon_url
@item_icon_url.setter
def item_icon_url(self, value):
self._item_icon_url = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def item_price(self):
return self._item_price
@item_price.setter
def item_price(self, value):
self._item_price = value
@property
def point_amount(self):
return self._point_amount
@point_amount.setter
def point_amount(self, value):
self._point_amount = value
def to_alipay_dict(self):
params = dict()
if self.item_can_exchange:
if hasattr(self.item_can_exchange, 'to_alipay_dict'):
params['item_can_exchange'] = self.item_can_exchange.to_alipay_dict()
else:
params['item_can_exchange'] = self.item_can_exchange
if self.item_code:
if hasattr(self.item_code, 'to_alipay_dict'):
params['item_code'] = self.item_code.to_alipay_dict()
else:
params['item_code'] = self.item_code
if self.item_icon_url:
if hasattr(self.item_icon_url, 'to_alipay_dict'):
params['item_icon_url'] = self.item_icon_url.to_alipay_dict()
else:
params['item_icon_url'] = self.item_icon_url
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.item_price:
if hasattr(self.item_price, 'to_alipay_dict'):
params['item_price'] = self.item_price.to_alipay_dict()
else:
params['item_price'] = self.item_price
if self.point_amount:
if hasattr(self.point_amount, 'to_alipay_dict'):
params['point_amount'] = self.point_amount.to_alipay_dict()
else:
params['point_amount'] = self.point_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ItemPrizeInfo()
if 'item_can_exchange' in d:
o.item_can_exchange = d['item_can_exchange']
if 'item_code' in d:
o.item_code = d['item_code']
if 'item_icon_url' in d:
o.item_icon_url = d['item_icon_url']
if 'item_name' in d:
o.item_name = d['item_name']
if 'item_price' in d:
o.item_price = d['item_price']
if 'point_amount' in d:
o.point_amount = d['point_amount']
return o
|
# -*- coding: utf-8 -*-
"""
__init__.py file for epaModules module.
Defines the __all__ modules.
"""
__all__ = ["dateconvert"] |
from scipy.io import loadmat
import torch
import numpy as np
def data_generator(dataset):
if dataset == "JSB":
print('loading JSB data...')
data = loadmat('./mdata/JSB_Chorales.mat')
elif dataset == "Muse":
print('loading Muse data...')
data = loadmat('./mdata/MuseData.mat')
elif dataset == "Nott":
print('loading Nott data...')
data = loadmat('./mdata/Nottingham.mat')
elif dataset == "Piano":
print('loading Piano data...')
data = loadmat('./mdata/Piano_midi.mat')
X_train = data['traindata'][0]
X_valid = data['validdata'][0]
X_test = data['testdata'][0]
for data in [X_train, X_valid, X_test]:
for i in range(len(data)):
data[i] = torch.Tensor(data[i].astype(np.float64))
return X_train, X_valid, X_test
|
###############################################################################
# Copyright (c) 2019-2020 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# All data and information contained in or disclosed by this document are
# confidential and proprietary information of Qualcomm Technologies, Inc., and
# all rights therein are expressly reserved. By accepting this material, the
# recipient agrees that this material and the information contained therein
# are held in confidence and in trust and will not be used, copied, reproduced
# in whole or in part, nor its contents revealed in any manner to others
# without the express written permission of Qualcomm Technologies, Inc.
###############################################################################
import numpy as np
import sys
f1 = sys.argv[1]
f2 = sys.argv[2]
a1 = np.fromfile(f1, dtype=np.float32)
a2 = np.fromfile(f2, dtype=np.float32)
close = np.allclose(a1, a2, atol=1e-5, rtol=1e-3)
if close:
print("[UC_IMPL] FILES %s and %s MATCH" % (f1, f2))
else:
print("[UC_IMPL] ERROR. MISMATCH IN %s and %s" % (f1, f2))
print(str(a1[0]) + " " + str(a2[0]))
print(str(a1[1]) + " " + str(a2[1]))
print(str(a1[2]) + " " + str(a2[2]))
|
import pytest
import io
from unittest import mock
from salvo.util import get_server_info, print_server_info, resolve
@mock.patch("salvo.util.request")
def test_print_server_info(request):
request.return_value = {"headers": {"server": "Super"}}
headers = {"one": "two"}
stream = io.StringIO()
info = get_server_info("http://example.com", "GET", headers)
print_server_info(info, stream=stream)
stream.seek(0)
res = stream.read()
assert "Server Software: Super" in res
def test_resolve():
res = resolve("salvo.util.print_server_info")
assert res is print_server_info
res = resolve("resolve")
assert res is resolve
with pytest.raises(ImportError):
resolve("OoO")
|
"""
This unit test checks that openconn() waits for
socket cleanup.
"""
#pragma repy restrictions.default dylink.repy librepy.repy
# Get the IP address of intel.com
intel_IP = gethostbyname("intel.com")
intel_port = 80
localip = getmyip()
localport = libsocket.get_connports(localip)[0]
# Connect to intel
sock = openconn(intel_IP, intel_port, localip, localport)
sock.close()
# Re-use the sample tuple, set a long timeout
try:
sock2 = openconn(intel_IP, intel_port, localip, localport, timeout=300)
sock2.close()
except CleanupInProgressError:
print "Openconn should handle socket cleanup! We should have blocked!"
except TimeoutError:
print "Openconn timed out! We should have cleaned up by now."
|
from datetime import datetime
from pulsar.api import BadRequest, Http401, PermissionDenied, Http404
from sqlalchemy.exc import StatementError
from sqlalchemy.orm import joinedload
from lux.core import AuthenticationError, AuthBackend as AuthBackendBase
from lux.utils.crypt import create_uuid
from lux.utils.auth import normalise_email
from lux.utils.data import compact_dict
from .rest.user import CreateUserSchema
class AuthBackend(AuthBackendBase):
"""Mixin to implement authentication backend based on
SQLAlchemy models
"""
def on_request(self, request):
auth = request.get('HTTP_AUTHORIZATION')
cache = request.cache
cache.user = self.anonymous()
if not auth:
return
app = request.app
try:
try:
auth_type, key = auth.split(None, 1)
except ValueError:
raise BadRequest('Invalid Authorization header') from None
auth_type = auth_type.lower()
if auth_type == 'bearer':
token = self.get_token(request, key)
if not token:
raise BadRequest
request.cache.token = token
user = token.user
elif auth_type == 'jwt':
payload = self.decode_jwt(request, key)
payload['token'] = key
user = app.auth.service_user(payload)
except (Http401, BadRequest, PermissionDenied):
raise
except Exception:
request.app.logger.exception('Could not authorize')
raise BadRequest from None
else:
if user:
request.cache.user = user
def get_user(self, session, id=None, token_id=None, username=None,
email=None, auth_key=None, **kw):
"""Securely fetch a user by id, username, email or auth key
Returns user or nothing
"""
models = session.models
if token_id:
try:
return models['tokens'].get_one(session, id=token_id).user
except Http404:
return None
if auth_key:
try:
reg = models['registrations'].get_one(session, id=auth_key)
return reg.user if reg.expiry > datetime.now() else None
except Http404:
return None
try:
return models['users'].get_one(session, **compact_dict(
id=id, username=username, email=normalise_email(email)
))
except Http404:
return
def authenticate(self, session, user=None, password=None, **kw):
if not user:
user = self.get_user(session, **kw)
if user and self.crypt_verify(user.password, password):
return user
else:
raise AuthenticationError('Invalid credentials')
def create_user(self, session, **data):
users = session.models['users']
data.setdefault('active', True)
return users.create_one(session, data, CreateUserSchema)
def create_superuser(self, session, **params):
params['superuser'] = True
params['active'] = True
return self.create_user(session, **params)
def create_token(self, request, user, **kwargs):
"""Create the token
"""
odm = request.app.odm()
with odm.begin() as session:
kwargs['id'] = create_uuid()
token = odm.token(user=user, **kwargs)
session.add(token)
return token
def get_token(self, request, key):
odm = request.app.odm()
token = odm.token
with odm.begin() as session:
query = session.query(token).options(joinedload(token.user))
try:
token = query.get(key)
except StatementError:
raise BadRequest from None
return token
|
import xml.etree.ElementTree as ET
from os.path import join, isfile
from os import listdir
xmlpath='c:/Xmls/'
xmlslist=[join(xmlpath, f) for f in listdir(xmlpath)]
print('Found xml files:', xmlslist)
def getdata(xml,classname, name, rawsearch=None):
with open(xml, 'r') as x:
data = x.read()
root = ET.fromstring(data)
listval = []
inst = root.findall('DECLARATION/DECLGROUP/VALUE.OBJECT/INSTANCE')
for i in inst:
if name == 'IBMSG_PCIRawData':
if i.attrib['CLASSNAME'] == classname:
vals=i.findall('PROPERTY.ARRAY/VALUE.ARRAY/VALUE')
for val in vals:
if val.text.find(rawsearch) == 0:
if val.text not in listval:
listval.append(val.text)
if name == 'RawResults': # gathering results for raw pci data
if i.attrib['CLASSNAME'] == classname:
vals=i.findall('PROPERTY.ARRAY/VALUE.ARRAY/VALUE')
for val in vals:
if val.text.find(rawsearch) == 0:
if val.text not in listval:
listval.append(val.text)
if name == 'Name' and rawsearch: # gathering results for FRU data from two matching vals inside one instance
if i.attrib['CLASSNAME'] == classname:
props = i.findall('PROPERTY')
for prop in props:
if prop.attrib['NAME'] == name and prop.find('VALUE').text == rawsearch:
for prop in props:
if prop.attrib['NAME'] == 'SerialNumber':
val = prop.find('VALUE').text
listval.append(val)
else: # gathering results for regular data
if i.attrib['CLASSNAME'] == classname:
props=i.findall('PROPERTY')
for prop in props:
if prop.attrib['NAME'] == name:
val=prop.find('VALUE').text
listval.append(val)
return(listval[0] if len(listval)==1 else listval)
for xml in xmlslist:
sysserial = getdata(xml, classname='IBMSG_ComputerSystem', name='SerialNumber')
raidserial = getdata(xml, classname='LSIESG_PhysicalCard', name='SerialNumber')
raidfw = getdata(xml, classname='LSIESG_FirmwarePackageIdentity', name='VersionString')
drslot = getdata(xml, classname='LSIESG_PhysicalDrive', name='Slot_No')
drpartnumber = getdata(xml, classname='LSIESG_PhysicalDrive', name='PartNumber')
drserial = getdata(xml, classname='LSIESG_PhysicalDrive', name='SerialNumber')
disklist = zip(drslot, drpartnumber, drserial)
ethname = getdata(xml, classname='IBMSG_BcmDeviceFirmwareElement', name='Name')
ethfw = getdata(xml, classname='IBMSG_BcmDeviceFirmwareElement', name='Version')
ethlist = zip(ethname,ethfw)
coleto = getdata(xml, classname='IBMSG_PCIRawData', name='RawResult', rawsearch='')
qlogicser = getdata(xml, classname='IBMSG_QLogicFibreChannelRawData', name='RawResults',rawsearch='Serial Number')
sbserial = getdata(xml, classname='IBMSG_IPMIFRU', name='Name',rawsearch='System Board')
psu1serial = getdata(xml, classname='IBMSG_IPMIFRU', name='Name',rawsearch='Power Supply 1')
psu2serial = getdata(xml, classname='IBMSG_IPMIFRU', name='Name', rawsearch='Power Supply 2')
bpserial = getdata(xml, classname='IBMSG_IPMIFRU', name='Name', rawsearch='DASD Backplane 1')
pcilist = getdata(xml, classname='IBMSG_PCIDevice', name='Description')
print('{0}Parsing logfile {1} started{0}'.format('*'*20,xml))
print('System serial number: {0}'.format(sysserial))
print('RAID serial number: {0} firmware: {1}'.format(raidserial, raidfw))
print('Board serial number: {0}'.format(sbserial))
print('PSU1 serial number: {0}'.format(psu1serial))
print('PSU2 serial number: {0}'.format(psu2serial))
print('Backplane serial number: {0}'.format(bpserial))
for disk in disklist:
print('Drive slot:{0} P/N: {1} serial: {2}'.format(disk[0],disk[1],disk[2]))
for eth in ethlist:
print('Ethernet device: {0} firmware: {1}'.format(eth[0], eth[1]))
for qlogic in qlogicser:
print('Qlogic serial number: {0}'.format(qlogic))
print('='*40)
for pci in pcilist:
print('PCI device: {0}'.format(pci))
|
import hvac
from cloudfoundry_client.client import CloudFoundryClient
import os
import json
import environ
import requests
from dotenv import load_dotenv
load_dotenv()
VAULT_URL = os.getenv("VAULT_URL")
VAULT_TOKEN = os.getenv("VAULT_TOKEN")
PAAS_ENV = os.getenv("PAAS_ENV")
PAAS_NAMESPACE = os.getenv("PAAS_NAMESPACE")
PAAS_APP_NAME = os.getenv("PAAS_APP_NAME")
CF_USERNAME = os.getenv("CF_USERNAME")
CF_PASSWORD = os.getenv("CF_PASSWORD")
CF_DOMAIN = os.getenv("CF_DOMAIN")
def cf_get_client(username, password, endpoint, http_proxy='', https_proxy=''):
target_endpoint = endpoint
proxy = dict(http=http_proxy, https=https_proxy)
client = CloudFoundryClient(target_endpoint, proxy=proxy)
client.init_with_user_credentials(username, password)
return client
def cf_login():
print(f"login to cf space: {PAAS_NAMESPACE}-{PAAS_ENV}...")
cf_client = cf_get_client(
CF_USERNAME,
CF_PASSWORD,
CF_DOMAIN)
return cf_client
def vault_get_vars():
#breakpoint()
client = hvac.Client(url=f'https://{VAULT_URL}', token=VAULT_TOKEN)
print(f"Authenticated = {client.is_authenticated()}")
print("Getting VARS from vault...")
## Need to check if empty it will break
response = client.read(path=f'dit/{PAAS_NAMESPACE}/data/{PAAS_APP_NAME}/{PAAS_ENV}')
vault_vars = f"{{'var': {(response['data']['data'])}}}"
vault_vars = vault_vars.replace("\'", "\"")
return vault_vars
def get_app_guid(cf_token):
#breakpoint()
#check for multiple pages returned
#print("Get App GUID")
response = requests.get(
CF_DOMAIN + '/v3/spaces',
headers={'Authorization': f'Bearer {cf_token}'})
#print(response)
space_response = response.json()
#Will be better off doint this from reading in pipeline-conf not all spaces match envs
for item in space_response['resources']:
if item['name'] == PAAS_NAMESPACE + '-' + PAAS_ENV:
space_guid = item['guid']
#print(space_guid)
response = requests.get(
CF_DOMAIN + '/v3/apps',
params={'space_guids': [space_guid, ]},
headers={'Authorization': f'Bearer {cf_token}'})
app_response = response.json()
#breakpoint()
for app_item in app_response['resources']:
if app_item['name'] == PAAS_APP_NAME + '-' + PAAS_ENV:
app_guid = app_item['guid']
#print(app_guid)
return app_guid
def clear_vars(cf_token, app_guid):
print("Clearing old VARS...")
response = requests.get(
CF_DOMAIN + '/v3/apps/' + app_guid + '/environment_variables',
headers={'Authorization': f'Bearer {cf_token}'})
vars_to_clear = json.loads(response.content)['var']
#breakpoint()
#This is not good need to find a better way to do this.
for item in vars_to_clear:
vars_to_clear[item] = None
#print(vars_to_clear)
vars_to_clear_json = f"{{'var': {vars_to_clear}}}"
vars_to_clear_json = vars_to_clear_json.replace("\'", "\"")
#Python wont let you set to null directly so using None then switching out.
vars_to_clear_json = vars_to_clear_json.replace("None", "null")
##########
#breakpoint()
response = requests.patch(
CF_DOMAIN + '/v3/apps/' + app_guid + '/environment_variables',
data=vars_to_clear_json,
headers={'Content-Type': 'application/json', 'Authorization': f'Bearer {cf_token}'})
app_response = response.json()
#print(app_response)
def set_vars(cf_token, app_guid, vault_vars):
print(f"Setting VARS retrieved from vault on app: {PAAS_APP_NAME}-{PAAS_ENV}")
#breakpoint()
for var_item, secret in json.loads(vault_vars)['var'].items():
print(f"{var_item}: ********** ")
response = requests.patch(
CF_DOMAIN + '/v3/apps/' + app_guid + '/environment_variables',
data=vault_vars,
headers={'Content-Type': 'application/json', 'Authorization': f'Bearer {cf_token}'})
app_response = response.json()
vault_vars = vault_get_vars()
cf_client = cf_login()
cf_token = cf_client._access_token
app_guid = get_app_guid(cf_token)
clear_vars(cf_token, app_guid)
set_vars(cf_token, app_guid, vault_vars)
|
from django.shortcuts import render,render_to_response,redirect
from django.http import HttpResponseRedirect,HttpResponse
from mainsite.forms import UserForm,CustomerForm
from mainsite.models import User
from django.template import loader
from django.contrib.auth import authenticate,login ,logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
# Create your views here.
#登录 Diango验证
def login_view(request):
if request.method == 'POST':
uf = UserForm(request.POST)
if uf.is_valid():
#获取表单用户密码
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
user = authenticate(request,username= username,password= password)
if user is not None and user.is_active:
login(request, user)
request.session['username'] = username
messages.add_message(request, messages.INFO, 'Hello world.')
return redirect('index')
#return HttpResponseRedirect('index')
#template = loader.get_template('mainsite/index.html')
#context={'username':username}
#return HttpResponse(template.render(context,request))
else:
return HttpResponseRedirect('login')
#template = loader.get_template('mainsite/login.html')
#context = {'uf': uf, }
#return HttpResponse(template.render(context, request))
else:
uf = UserForm()
template = loader.get_template('mainsite/login.html')
context = {'uf': uf, }
return HttpResponse(template.render(context, request))
#登录
def login_view01(request):
if request.method == 'POST':
uf = UserForm(request.POST)
if uf.is_valid():
#获取表单用户密码
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
#获取的表单数据与数据库进行比较
user = User.objects.filter(username__exact = username,password__exact = password)
if user:
#return render_to_response('mainsite/index.html',{'username':username})
template = loader.get_template('mainsite/index.html')
context={'username':username}
return HttpResponse(template.render(context,request))
else:
#return HttpResponseRedirect('login')
template = loader.get_template('mainsite/login.html')
context = {'uf': uf, }
return HttpResponse(template.render(context, request))
else:
uf = UserForm()
#return render_to_response('mainsite/login.html',{'uf':uf})
template = loader.get_template('mainsite/login.html')
context = {'uf': uf, }
return HttpResponse(template.render(context, request))
#登出
def logout_view(request):
logout(request)
template = loader.get_template('mainsite/logout.html')
return HttpResponse(template.render({}, request))
@login_required(redirect_field_name='next',login_url='login')
def index(request):
if not request.user.is_authenticated:
return redirect('%s?next=%s' % (settings.LOGIN_URL,request.path))
else:
template = loader.get_template('mainsite/index.html')
#username = request.session['username']
#username = request.session.keys()
username = request.session.get('username',None)
#storage=messages.get_messages(request)
return HttpResponse(template.render({'username':username,},request))
#@csrf_protect
def register(request):
if request.method == 'POST':
uf = UserForm(request.POST)
if uf.is_valid():
#获取表单元素
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
#email = uf.cleaned_data['email']
#将表单写入数据库
user = User()
user.username = username
user.password = password
#user.email = email
user.save()
return redirect('login')
#return render_to_response('mainsite/login.html',{})
else:
uf = UserForm()
template = loader.get_template('mainsite/register.html')
context = {'uf': uf, }
#return render(request, template, context)
return HttpResponse(template.render(context, request))
#return render_to_response('mainsite/register.html',{'uf':uf})
@login_required(redirect_field_name='next',login_url='login')
def cust_add(request):
if request.method == 'POST':
cf = CustomerForm(request.POST)
else:
cf = CustomerForm()
template = loader.get_template('mainsite/customer-add.html')
context = {'cf': cf, }
return HttpResponse(template.render(context, request)) |
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('profile/<str:username>', views.profile, name='profile'),
path('update_profile', views.update_profile, name='update-profile'),
path('update_profile_pic', views.update_profile_pic, name='update-profile-pic'),
# path('login', auth_views.LoginView.as_view(redirect_authenticated_user=True, template_name="accounts/login.html"), name='login'),
# path('logout', auth_views.LogoutView.as_view(template_name="core/general_home.html"), name='logout'),
]
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, q = map(int, input().split())
query = [0] * q
for i in range(q):
query[i] = list(map(int, input().split()))
parent = [0] * n
for i in range(n):
parent[i] = i
def root(n):
if parent[n] == n:
return n
else:
# 経路圧縮なし
#return root(parent[n])
# 経路圧縮あり
parent[n] = root(parent[n])
return parent[n]
def unite(n1, n2):
p1 = root(n1)
p2 = root(n2)
if p1 == p2:
return
parent[p2] = p1
for q in query:
p, a, b = q
if p == 0:
unite(a, b)
if p == 1:
if root(a) == root(b):
print('Yes')
else:
print('No')
|
import struct
import gevent
import gevent.ssl as ssl
from gevent.queue import Queue
from gevent.socket import *
import message
class APNSPushSessionPool(object):
def __init__(self, addr, key_file, cert_file):
self._connection_queue = Queue()
self._addr = addr
self._key_file = key_file
self._cert_file = cert_file
def start(self, concurrency=3):
for x in range(0, concurrency):
conn = APNSPushSession(self._addr, self._key_file, self._cert_file)
self._connection_queue.put(conn)
conn.check_connection()
def get_session(self):
conn = self._connection_queue.get()
conn.check_connection()
return conn
def return_session(self, conn):
self._connection_queue.put(conn)
class APNSConnector(object):
def __init__(self, addr, key_file, cert_file):
self._connection = None
self._addr = addr
self._key_file = key_file
self._cert_file = cert_file
def check_connection(self):
if self._connection == None:
print "Connecting to server[%s]" % (self._addr)
sock = ssl.wrap_socket(socket(AF_INET, SOCK_STREAM, 0),
self._key_file,
self._cert_file,
ssl_version=ssl.PROTOCOL_SSLv3)
host, port = self._addr.split(':')
ret = sock.connect_ex((host, int(port)))
if ret == 0:
print "Connection established to addr[%s]" % (self._addr)
self._connection = sock
return True
print "Connecting failed to addr[%s]" % (self._addr)
return False
return True
def close(self):
print "Close connection"
if self._connection:
self._connection.close()
self._connection = None
class APNSPushSession(APNSConnector):
def push(self, target_id, message):
# Send a push notification
if self.check_connection() == False:
raise IOError, u'Connection is not established'
from pushagent.message import APushMessage
if not isinstance(message, APushMessage):
raise ValueError, u"Message object should be a child of PushMessage."
message._token = target_id.decode("hex")
#print "Actual Sending %s" % str(message)
try:
self._connection.send(str(message))
except Exception as err:
self.close()
print "Send exception %s" % err
raise err
print "Sent message"
class APNSFeedbackSubscriber(APNSConnector):
def __init__(self, addr, key_file, cert_file, check_period, feedback_storage):
super(APNSFeedbackSubscriber, self).__init__(addr, key_file, cert_file)
self._storage = feedback_storage
self._check_period = check_period
def start(self):
gevent.spawn(self.receive_feedback)
def check_connection(self):
if self._connection == None:
print "Connecting to server[%s]" % (self._addr)
sock = socket(AF_INET, SOCK_STREAM, 0)
host, port = self._addr.split(':')
ret = sock.connect_ex((host, int(port)))
if ret == 0:
print "Connection established to addr[%s]" % (self._addr)
self._connection = sock
return True
print "Connecting failed to addr[%s]" % (self._addr)
return False
return True
def receive_feedback(self):
while True:
try:
if self.check_connection():
print "Receiving feedback..."
msg = self._connection.recv(4 + 2 + 32)
if len(msg) < 38:
print "Wrong data size [%s]" % len(msg)
self.close()
gevent.sleep(self._check_period)
continue
data = struct.unpack("!IH32s", msg)
print "Received data [%s]" % data[2]
self._storage.store(data[2])
else:
print "Try connecting again after a while.."
gevent.sleep(self._check_period)
except Exception as err:
print "Recv exception %s" % err
self.close()
raise
print "Receive end"
|
from collections import defaultdict
from itertools import combinations
from typing import List, Dict, Optional, Tuple, Set
import sys
from graphviz import Digraph
import uuid
class Graph:
adj: Dict['Node', Set['Node']]
def __init__(self):
self.adj = defaultdict(set)
def add_node(self, v):
self.adj[v]
def add_edge(self, u, v):
self.add_node(u)
self.add_node(v)
self.adj[u].add(v)
def inverse(self) -> 'Graph':
g = Graph()
for u in self.adj:
g.add_node(u)
for v in self.adj[u]:
g.add_edge(v, u)
return g
def dfs(g, v0, visited=set()):
if v0 in visited:
return []
visited.add(v0)
result = []
for u in g.adj[v0]:
tmp = dfs(g, u, visited)
result = [*result, *tmp]
visited.update(tmp)
result.append(v0)
return result
def scc_kossaraju(graph):
visited = set()
ans = []
node_to_scc = dict()
scc_graph = Graph()
g_inv = graph.inverse()
for node in graph.adj:
if node not in visited:
dfs_order = dfs(g_inv, node, visited)[::-1]
subset = set(graph.adj.keys()) - set(dfs_order)
for v in dfs_order:
if v not in subset:
scc = dfs(graph, v, subset)
subset.update(scc)
visited.update(scc)
for u in scc:
node_to_scc[u] = len(ans)
scc_graph.add_node(len(ans))
ans.append(scc)
for i, scc in enumerate(ans):
for u in scc:
for v in graph.adj[u]:
if i != node_to_scc[v]:
scc_graph.add_edge(i, node_to_scc[v])
return ans, scc_graph
def solve_2SAT(
vars: List[int],
clauses: List[Tuple[int, int]]
) -> Optional[List[int]]:
g = Graph()
for v in vars:
g.add_node(v)
g.add_node(-v)
for u, v in clauses:
g.add_edge(-u, v)
g.add_edge(-v, u)
ans = {}
def update(scc):
v0 = next(filter(lambda v: v in ans, scc), scc[0])
if v0 not in ans:
ans[v0], ans[-v0] = 1, 0
for v in scc:
if v not in ans:
ans[v], ans[-v] = ans[v0], ans[-v0]
elif ans[v] != ans[v0]:
return False
return True
sccs, scc_graph = scc_kossaraju(g)
visited = set()
for v in scc_graph.adj:
if v not in visited:
order = dfs(scc_graph, v, visited)
visited.update(order)
if not all(update(sccs[i]) for i in order):
return None
return [ans[v] for v in vars]
def label_placement(labels):
def rect_intersect(r1, r2):
x1 = max(r1[0][0], r2[0][0])
y1 = max(r1[0][1], r2[0][1])
x2 = min(r1[1][0], r2[1][0])
y2 = min(r1[1][1], r2[1][1])
return x1 < x2 and y1 < y2
vars = list(range(1, len(labels) + 1))
clauses = []
for (i, li), (j, lj) in combinations(list(enumerate(labels)), 2):
(xi, yi), (wi, hi), di = li
(xj, yj), (wj, hj), dj = lj
for ti in range(2):
for tj in range(2):
dxi, dyi = di[ti]
dxj, dyj = dj[tj]
ri = ((xi - dxi, yi - dyi), (xi + wi - dxi, yi + hi - dyi))
rj = ((xj - dxj, yj - dyj), (xj + wj - dxj, yj + hj - dyj))
if rect_intersect(ri, rj):
clauses.append(((2 * ti - 1) * (i + 1), (2 * tj - 1) * (j + 1)))
return [(xy, wh, d[1 - i]) for (xy, wh, d), i in zip(labels, solve_2SAT(vars, clauses))]
def draw_labels(labels, output='labels'):
def gen_id():
return uuid.uuid4().hex.upper()[0:6]
g = Digraph('G', filename=output, format='png', engine="neato")
for (x, y), (w, h), (dx, dy) in labels:
g.node(gen_id(), label='',
shape='point', width='.1',
pos=f'{x},{y}!')
g.node(gen_id(), label='',
shape='box', penwidth='.6', width=str(w), height=str(h),
pos=f'{x - dx + w / 2},{y - dy + h / 2}!')
g.render()
labels = []
filename = sys.argv[1]
with open(filename, 'r') as file:
for line in file:
xy, wh, d = line.split('\t')
parse_coords = lambda xy: tuple(map(lambda x: float(x) / 10, xy.split(',')))
labels.append((parse_coords(xy), parse_coords(wh), list(map(parse_coords, d.split()))))
result = label_placement(labels)
if result:
draw_labels(result)
else:
print('there is no appropriate placement :(')
|
# while 무한 반복
# end : print 출력 내용 후 처리...ex) 개행 탭 등..
# end=""는 원래 print 함수 자체는 개행을 처리하는데 개행을 없애기 위해서 사용됨
while True:
print(".", end="") |
lista = [1,2,3,4,5,6]
for indice, valor in enumerate(lista, 10):
print("indice:", indice, "valor:" valor) |
import json
import geopandas
from ..graph import Graph
from ..updaters import compute_edge_flows, flows_from_changes
from .assignment import get_assignment
from .subgraphs import SubgraphView
from ..updaters import cut_edges
class Partition:
"""
Partition represents a partition of the nodes of the graph. It will perform
the first layer of computations at each step in the Markov chain - basic
aggregations and calculations that we want to optimize.
"""
default_updaters = {
"cut_edges": cut_edges
}
def __init__(
self, graph=None, assignment=None, updaters=None, parent=None, flips=None
):
"""
:param graph: Underlying graph; a NetworkX object.
:param assignment: Dictionary assigning nodes to districts. If None,
initialized to assign all nodes to district 0.
:param updaters: Dictionary of functions to track data about the partition.
The keys are stored as attributes on the partition class,
which the functions compute.
"""
if parent is None:
self._first_time(graph, assignment, updaters)
else:
self._from_parent(parent, flips)
self._cache = dict()
self.subgraphs = SubgraphView(self.graph, self.parts)
def _first_time(self, graph, assignment, updaters):
self.graph = graph
self.assignment = get_assignment(assignment, graph)
if set(self.assignment) != set(graph):
raise KeyError("The graph's node labels do not match the Assignment's keys")
if updaters is None:
updaters = dict()
self.updaters = self.default_updaters.copy()
self.updaters.update(updaters)
self.parent = None
self.flips = None
self.flows = None
self.edge_flows = None
def _from_parent(self, parent, flips):
self.parent = parent
self.flips = flips
self.assignment = parent.assignment.copy()
self.assignment.update(flips)
self.graph = parent.graph
self.updaters = parent.updaters
self.flows = flows_from_changes(parent.assignment, flips)
self.edge_flows = compute_edge_flows(self)
def __repr__(self):
number_of_parts = len(self)
s = "s" if number_of_parts > 1 else ""
return "<{} [{} part{}]>".format(self.__class__.__name__, number_of_parts, s)
def __len__(self):
return len(self.parts)
def flip(self, flips):
"""Returns the new partition obtained by performing the given `flips`
on this partition.
:param flips: dictionary assigning nodes of the graph to their new districts
:return: the new :class:`Partition`
:rtype: Partition
"""
return self.__class__(parent=self, flips=flips)
def crosses_parts(self, edge):
"""Answers the question "Does this edge cross from one part of the
partition to another?
:param edge: tuple of node IDs
:rtype: bool
"""
return self.assignment[edge[0]] != self.assignment[edge[1]]
def __getitem__(self, key):
"""Allows accessing the values of updaters computed for this
Partition instance.
:param key: Property to access.
"""
if key not in self._cache:
self._cache[key] = self.updaters[key](self)
return self._cache[key]
def __getattr__(self, key):
return self[key]
@property
def parts(self):
return self.assignment.parts
def plot(self, geometries=None, **kwargs):
"""Plot the partition, using the provided geometries.
:param geometries: A :class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`
holding the geometries to use for plotting. Its :class:`~pandas.Index` should match
the node labels of the partition's underlying :class:`~gerrychain.Graph`.
:param `**kwargs`: Additional arguments to pass to :meth:`geopandas.GeoDataFrame.plot`
to adjust the plot.
"""
if geometries is None:
geometries = self.graph.geometry
if set(geometries.index) != set(self.graph.nodes):
raise TypeError(
"The provided geometries do not match the nodes of the graph."
)
assignment_series = self.assignment.to_series()
if isinstance(geometries, geopandas.GeoDataFrame):
geometries = geometries.geometry
df = geopandas.GeoDataFrame(
{"assignment": assignment_series}, geometry=geometries
)
return df.plot(column="assignment", **kwargs)
@classmethod
def from_json(cls, graph_path, assignment, updaters=None):
"""Creates a :class:`Partition` from a json file containing a
serialized NetworkX `adjacency_data` object. Files of this
kind for each state are available in the @gerrymandr/vtd-adjacency-graphs
GitHub repository.
:param graph_path: String filename for the json file
:param assignment: String key for the node attribute giving a district
assignment, or a dictionary mapping node IDs to district IDs.
:param updaters: (optional) Dictionary of updater functions to
attach to the partition, in addition to the default_updaters of `cls`.
"""
graph = Graph.from_json(graph_path)
return cls(graph, assignment, updaters)
def to_json(
self, json_path, *, save_assignment_as=None, include_geometries_as_geojson=False
):
"""Save the partition to a JSON file in the NetworkX json_graph format.
:param json_file: Path to target JSON file.
:param str save_assignment_as: (optional) The string to use as a node attribute
key holding the current assignment. By default, does not save the
assignment as an attribute.
:param bool include_geometries_as_geojson: (optional) Whether to include any
:mod:`shapely` geometry objects encountered in the graph's node attributes
as GeoJSON. The default (``False``) behavior is to remove all geometry
objects because they are not serializable. Including the GeoJSON will result
in a much larger JSON file.
"""
graph = Graph(self.graph)
if save_assignment_as is not None:
for node in graph.nodes:
graph.nodes[node][save_assignment_as] = self.assignment[node]
graph.to_json(
json_path, include_geometries_as_geojson=include_geometries_as_geojson
)
@classmethod
def from_file(cls, filename, assignment, updaters=None, columns=None):
"""Create a :class:`Partition` from an ESRI Shapefile, a GeoPackage,
a GeoJSON file, or any other file that the `fiona` library can handle.
"""
graph = Graph.from_file(filename, cols_to_add=columns)
return cls(graph, assignment, updaters)
@classmethod
def from_districtr_file(cls, graph, districtr_file, updaters=None):
"""Create a Partition from a districting plan created with `Districtr`_,
a free and open-source web app created by MGGG for drawing districts.
The provided ``graph`` should be created from the same shapefile as the
Districtr module used to draw the districting plan. These shapefiles may
be found in a repository in the `mggg-states`_ GitHub organization, or by
request from MGGG.
.. _`Districtr`: https://mggg.org/Districtr
.. _`mggg-states`: https://github.com/mggg-states
:param graph: :class:`~gerrychain.Graph`
:param districtr_file: the path to the ``.json`` file exported from Districtr
:param updaters: dictionary of updaters
"""
with open(districtr_file) as f:
districtr_plan = json.load(f)
id_column_key = districtr_plan["idColumn"]["key"]
districtr_assignment = districtr_plan["assignment"]
try:
node_to_id = {node: str(graph.nodes[node][id_column_key]) for node in graph}
except KeyError:
raise TypeError(
"The provided graph is missing the {} column, which is "
"needed to match the Districtr assignment to the nodes of the graph."
)
assignment = {node: districtr_assignment[node_to_id[node]] for node in graph}
return cls(graph, assignment, updaters)
|
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
def run():
pipeline_options = {
'project': 'serious-mariner-255222',
'staging_location': 'gs://mybucket20b40d8c/staging',
'temp_location': 'gs://mybucket20b40d8c/temp',
'template_location': 'gs://mybucket20b40d8c/templates/number_lines_temp',
'runner': 'DataflowRunner',
'job_name': 'my-dataflow-job',
'output': 'gs://mybucket20b40d8c/output/new-data.txt',
'input': 'gs://mybucket20b40d8c/data.txt',
}
def remove_new_line(line):
return line.strip('\n')
def append_number(line):
return f'1 - {line}'
pipeline_options = PipelineOptions.from_dictionary(pipeline_options)
p = beam.Pipeline(options=pipeline_options)
output = (p | 'read' >> ReadFromText('gs://mybucket20b40d8c/data.txt')
| 'remove_new_lines' >> beam.Map(remove_new_line)
| 'append_number' >> beam.Map(append_number)
| 'write' >> WriteToText('gs://mybucket20b40d8c/output/new-data.txt'))
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
run() |
#!/usr/bin/env python
# coding: utf-8
# # ECE/CS 434 | MP3: AoA
# <br />
# <nav>
# <span class="alert alert-block alert-warning">Due March 28th 11:59PM 2021 on Gradescope</span> |
# <a href="https://www.gradescope.com/courses/223105">Gradescope</a> |
# <a href="https://courses.grainger.illinois.edu/cs434/sp2021/">Course Website</a> |
# <a href="http://piazza.com/illinois/spring2021/csece434">Piazza</a>
# </nav><br>
#
# **Name(s):** _ , _<br>
# **NetID(s):** _ , _
#
# <hr />
# ## Objective
# In this MP, you will:
# - Implement algorithms to find angle of arrivals of voices using recordings from microphone arrays.
# - Perform triangulation over multiple AoAs to deduce user locations.
# - Optimize voice localization algorithms using tools from probability theory, or signal processing.
# ---
# ## Imports & Setup
# The following `code` cell, when run, imports the libraries you might need for this MP. Feel free to delete or import other commonly used libraries. Double check with the TA if you are unsure if a library is supported.
# In[4]:
import numpy as np
import pandas as pd
"""if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.style.use("seaborn") # This sets the matplotlib color scheme to something more soothing
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
# This function is used to format test results. You don't need to touch it.
def display_table(data):
from IPython.display import HTML, display
html = "<table>"
for row in data:
html += "<tr>"
for field in row:
html += "<td><h4>%s</h4><td>"%(field)
html += "</tr>"
html += "</table>"
display(HTML(html))
"""
# ---
# ## Problem Description
#
# Providing voice assistants with location information of the user can be helpful in resolving ambiguity in user commands. In this project, you will create a speaker localization algorithm using recordings from multiple voice assistant microphone arrays.
#
# <figure>
# <img src="images/scenario.png" alt="AoA Scenario" style="width: 500px;"/>
# <figcaption>Figure 1: Application Scenario</figcaption>
# </figure>
#
# Consider the following scenario: there are eight voice assistants around the user. We will provide you with the location of these eight devices $L_{0}, L_{1}, \ldots, L_{7}$, their microphone array configuration, and the recordings from each of these devices $D_{0}, D_{1}, \ldots, D_{7}$. Your algorithm should take $D_{0}, D_{1}, \ldots D_{7}$ and $L_{0}, L_{1}, \ldots L_{7}$ as input and output the location of the user $L_{x}$.
#
# You can tackle this problem by doing AoA on all eight devices and then use triangulation to find the user
# location.
# ---
# ## Data Specification
#
# Figure 3 shows the microphone array configuration. Each microphone array has 6 microphones indicated by green dots. They form a hexagon with mic #1 facing +x, mic #0 60 degrees counter-clockwise from mic #1, and so on. The diameter of the microphone array is $0.09218\text{ m}$(the distance between mic #0 and mic #3). The sampling rate is $16000\text{ Hz}$.
#
# Four sets of data can be found in `dataset#/`:
# ```
# ├── dataset0
# │ ├── 0.csv
# │ ├── 1.csv
# │ ├── ...
# │ ├── 7.csv
# │ └── config.csv
# ├── dataset1
# │ ├── ...
# ├── dataset2
# │ ├── ...
# └── dataset3
# ├── 0.csv
# ├── 1.csv
# ├── ...
# ├── 7.csv
# └── config.csv
#
# ```
# In each directory, `0.csv` through `7.csv` contain data collected at each of the 8 microphone arrays. They each have 6 columns, corresponding to recorded samples from individual microphones on the mic array, with column number matching mic number. `config.csv` contains the microphone array coordinates. There are 8 comma-separated rows, corresponding to the (x, y) coodinates of the 8 microphone arrays. This is visualized in Figure 2 below. Note that the coordinates are in metres.
# In[5]:
"""
if __name__ == '__main__':
array_locs = np.genfromtxt ('dataset0/config.csv', delimiter=",")
user_1_location = np.array((3.0, 1.0))
from matplotlib.patches import RegularPolygon, Circle
fig, ax = plt.subplots(2, 1, figsize=(10,16))
ax[0].set_title("Figure 2: A visual of the setting for user 1")
ax[0].grid(b=True, which="major", axis="both")
ax[0].set_xlim((-0.5, 6.5))
ax[0].set_xticks(np.arange(0, 7))
ax[0].set_xlabel("x (m)")
ax[0].set_ylim((-0.5, 5))
ax[0].set_yticks(np.arange(0, 5))
ax[0].set_ylabel("y (m)")
for (loc_num, (loc_x, loc_y)) in enumerate(array_locs, start=0):
ax[0].add_patch(RegularPolygon(
xy=(loc_x,loc_y),
numVertices=6,
radius=0.2,
orientation=np.pi/6
))
ax[0].text(
x=loc_x,
y=loc_y,
s=loc_num,
color="white",
horizontalalignment="center",
verticalalignment="center",
)
ax[0].add_patch(Circle(xy=user_1_location,radius=0.2, color="#DB7093"))
ax[0].text(user_1_location[0], user_1_location[1], "user 1", color="white", ha="center", va="center")
ax[1].set_title("Figure 3: Microphone Array Configuration")
ax[1].grid(b=True, which="major", axis="both")
ax[1].set_xlim((-1.5,1.5))
ax[1].set_xticks([0])
ax[1].set_ylim((-1.0,1.3))
ax[1].set_yticks([0])
ax[1].add_patch(RegularPolygon((0,0), 6, 1, np.pi/6))
for mic_i in np.arange(6):
mic_pos = np.e**(-1j * 2 * np.pi / 6 * mic_i) * np.e**(1j * 2 * np.pi / 6)
ax[1].add_patch(Circle(
xy=(mic_pos.real, mic_pos.imag),
radius=0.1,
color="#4c7d4c"
))
ax[1].text(
x=mic_pos.real,
y=mic_pos.imag,
s=mic_i,
color="white",
horizontalalignment="center",
verticalalignment="center",
)
ax[1].annotate(
"",
xy=(0.42, -0.75),
xytext=(-0.42, 0.75),
arrowprops=dict(arrowstyle="|-|", color="white", lw=2)
)
ax[1].text(0.15, 0, "0.09218 m", color="white", ha="center")
plt.show()
"""
# ---
# ## Your Implementation
# Implement your localization algorithm in the function `aoa_localization(mic_data_folder, FS, MIC_OFFSETS)`. Do **NOT** change its function signature. You are, however, free to define and use helper functions.
#
# You are encouraged to inspect, analyze and optimize your implementation's intermediate results using plots and outputs. You may use the provided scratch notebook (`scratch.ipynb`) for this purpose, and then implement the relevant algorithm in the `aoa_localization` function (which will be used for grading). Your implementation for `aoa_localization` function should **NOT** output any plots or data. It should only return the user's calculated location.
# In[9]:
from scipy import signal
from sklearn.preprocessing import RobustScaler
import scipy
import math
from scipy.signal import find_peaks
from scipy.optimize import minimize
import numpy.linalg as ln
import heapq
MIC_OFFSETS = [(0.023,0.0399), (0.0461,0), (0.0230,-0.0399), (-0.0230,-0.0399), (-0.0461,0), (-0.0230,0.0399)]
FS = 16000 # sampling frequency
def dist(x,y):
a1 = np.power(x[0]-y[0],2.0)
a2 = np.power(x[1]-y[1],2.0)
return np.sqrt(a1+a2)
def to_rad(deg):
return (np.pi/180.0)*deg
def to_deg(rad):
return (180.0/np.pi)*rad
class AP:
skews = {}
mic_dists = None
mic_thetas = None
data = {}
ap_locs = None
mic_locs = {}
MIC_OFFSETS = None
FS = -1
def get_ap_locs(self, data_folder):
csvdata = np.asarray(pd.read_csv(data_folder+'/config.csv',header=None))
return csvdata
def get_mic_locs(self):
keys = [key for key in self.data.keys()]
for key in range(len(keys)):
base_loc = self.ap_locs[key]
self.mic_locs[keys[key]] = []
for i in range(len(self.MIC_OFFSETS)):
e = tuple(((base_loc[0]+self.MIC_OFFSETS[i][0]),(base_loc[1]+self.MIC_OFFSETS[i][1])))
self.mic_locs[keys[key]].append(e)
def __init__(self, data_folder, FS, MIC_OFFSETS):
self.ap_locs = self.get_ap_locs(data_folder)
self.MIC_OFFSETS = MIC_OFFSETS
self.FS = FS
self.mic_thetas = []
self.steering = []
self.power = []
self.mic_dists = []
self.lags = []
self.get_mic_locs()
for i in range(len(self.MIC_OFFSETS)):
self.mic_dists.append(dist(self.MIC_OFFSETS[0],self.MIC_OFFSETS[i]))
if (self.MIC_OFFSETS[0][0]-self.MIC_OFFSETS[i][0]) == 0:
self.mic_thetas.append(0.0)
else:
self.mic_thetas.append(np.arctan((self.MIC_OFFSETS[0][1]-self.MIC_OFFSETS[i][1])/(self.MIC_OFFSETS[0][0]-self.MIC_OFFSETS[i][0])))
for i in range(8):
csvdata = pd.read_csv(data_folder+'/{}.csv'.format(i),header=None)
keys = [key for key in csvdata.keys()]
ap_data = {}
for k in keys:
ap_data[k] = csvdata[k]
#tmp = np.zeros((6,24000))
#tmp[:6, :24000] = [j for j in ap_data[k]]
self.data['AP{}'.format(i)] = ap_data#np.matrix(ap_data)
def gradient_eq(self,pos):
keys = [key for key in self.data.keys()]
est = []
for i in range(len(keys)):
key = keys[i]
est.append([1.0])
for j in range(len(keys)):
est[i] *= self.aoa[key][int(to_deg(self.peaks[keys[j]][0]))]
return est
def estimate_signal(self,sig,lag):
_sig = np.fft.ifft(np.fft.fft(sig)*np.exp((-1j*2*np.pi*lag)/len(sig)))
return _sig
def grid_search(self):
keys = [key for key in self.data.keys()]
x_min = min(e for e in [min(e[0] for e in self.mic_locs[key]) for key in keys])
y_min = min(e for e in [min(e[1] for e in self.mic_locs[key]) for key in keys])
x_lim = max(e for e in [max(e[0] for e in self.mic_locs[key]) for key in keys])
y_lim = max(e for e in [max(e[1] for e in self.mic_locs[key]) for key in keys])
best = 0#(x_min,y_min)
history = []
alt_history = []
print('searching: x:= {} to {} and y:= {} to {}'.format(x_min,x_lim,y_min,y_lim))
corrs = {}
for key in keys:
corrs[key] = np.corrcoef([self.data[key][i] for i in self.data[key]])
for x in np.arange(x_min,x_lim,0.25, dtype=np.float64):
for y in np.arange(y_min,y_lim,0.25,dtype=np.float64):
est = 1.0
for k in range(len(keys)):
key = keys[k]
dx = self.ap_locs[k][0]+x
dy = self.ap_locs[k][1]+y
est_theta = np.arctan2(dy,dx)
est *= self.music_spectrum[key][int(np.ceil(to_deg(np.pi+est_theta)))%360]
history.append((est,(x,y)))
return heapq.nlargest(len(history),history, key=lambda y: y[0])
def grad_(self, pos, target):
if target[0] > pos[0]:
dx = np.power(target[0]-pos[0],2.0)
elif target[0] < pos[0]:
dx = -np.power(target[0]-pos[0],2.0)
else:
dx = 0
if target[1] > pos[1]:
dy = np.power(target[1]-pos[1],2.0)
elif target[1] > pos[1]:
dy = -np.power(target[1]-pos[1],2.0)
else:
dy = 0
return (-dx,-dy)
def gradient_descent(self, gradient, start, lr, n, thresh):
keys = [key for key in self.data.keys()]
guess = start
x_min = min(e for e in [min(e[0] for e in self.mic_locs[key]) for key in keys])
y_min = min(e for e in [min(e[1] for e in self.mic_locs[key]) for key in keys])
x_lim = max(e for e in [max(e[0] for e in self.mic_locs[key]) for key in keys])
y_lim = max(e for e in [max(e[1] for e in self.mic_locs[key]) for key in keys])
corrs = {}
for key in keys:
corrs[key] = np.corrcoef([self.data[key][i] for i in self.data[key]])
for j in range(n):
dx = 0
dy = 0
for key in keys:
for i in range(0,6):
dx += corrs[key][0][i]*(-lr*gradient(guess, self.mic_locs[key][i])[0])#self.mic_locs[key][i])[0])
dy += corrs[key][0][i]*(-lr*gradient(guess, self.mic_locs[key][i])[1])#self.mic_locs[key][i])[1])
dx /= 6
dy /= 6
if np.all(np.abs(abs(dx)+abs(dy)) < thresh):
break
guess = ((guess[0]+dx),(guess[1]+dy))
return guess
# In[10]:
def calc_corrcoefs(self):
keys = [key for key in self.data.keys()]
corr_ = {}
corr_coefs_ = {}
eigs_ = {}
eigvals_ = {}
for key in keys:
corr_[key] = np.matrix([self.data[key][i] for i in self.data[key]])
corr_coefs_[key] = np.cov(corr_[key])
eigvals_[key],eigs_[key] = ln.eig(corr_coefs_[key])
self.corr = corr_
self.corr_coefs = corr_coefs_
self.eigvals = eigvals_
self.eigs = eigs_
def preprocess_eigs(self):
En_ = {}
Es_ = {}
Vn_ = {}
Vs_ = {}
keys = [key for key in self.data.keys()]
for key in keys:
En_[key] = [self.eigvals[key][self.eigvals[key].argmin()]]
Es_[key] = [self.eigvals[key][i] for i in range(len(self.eigvals[key])) if i != self.eigvals[key].argmin()]
Vn_[key] = self.eigs[key][:4,5]
Vs_[key] = self.eigs[key][:,0:4]
self.En = En_
self.Es = Es_
self.Vn = Vn_
self.Vs = Vs_
def calc_steering(self):
keys = [key for key in self.data.keys()]
s = []
thetas = np.asarray([theta for theta in np.arange(-180,180)])
wavelength = float(343)/self.FS
beta = (-1j*2*np.pi)/wavelength
for i in range(0,6):
s.append((1/1)*np.matrix([np.exp(beta*self.mic_dists[i]*np.cos(to_rad(theta))) for theta in thetas]))
self.steering = np.array([j for j in s]).reshape(6,360).T
def est_AoA(self):
keys = [key for key in self.data.keys()]
aoa_ = {}
spectrum_ = {}
for key in keys:
spectrum_[key] = (self.steering@np.matrix([self.data[key][i] for i in range(len(self.data[key]))]))
aoa_[key] = np.asarray([1/ln.norm(spectrum_[key][i].T @ (self.En[key] * np.conj(self.En[key])) @ spectrum_[key][i].T) for i in range(0,360)])
aoa_[key]= np.asarray([e for e in aoa_[key]])
self.aoa = aoa_
self.spectrum = spectrum_
def est_peaks(self):
keys = [key for key in self.data.keys()]
peaks_ = {}
for key in keys:
peaks_[key] = find_peaks([np.absolute(e) for e in self.aoa[key]],distance=360)[0]
peaks_[key] = [to_rad(e) for e in peaks_[key]]
self.peaks = peaks_
def calc_lag(self, data1, data2):
corr = signal.correlate(data1,data2, 'full')
lag = signal.correlation_lags(data1.size,data2.size, 'full')
return lag[np.argmax(corr)]
def calc_skews(self):
skewz = {}
keys = [key for key in self.data.keys()]
for key in keys:
skewz[key] = []
for i in range(0,6):
lag = self.calc_lag(self.data[key][0],self.data[key][i])
t = float(lag/self.FS)
skewz[key].append(float(343)*t)
self.skews = (skewz)
return skewz
def resp_vec(self, src, phi):
return np.exp(1j*.5*np.pi*src*np.cos(phi))/np.sqrt(src.shape)
def music(self):
keys = [key for key in self.data.keys()]
thetas = np.asarray([to_rad(e) for e in np.arange(-180,180)])
ps = np.zeros(360)
spectrum_ = {}
angles = {}
for key in keys:
covdata = np.cov([self.data[key][i] for i in self.data[key]])
for i in range(360):
tr,Vn = ln.eig(covdata)
Vn = Vn[:,4:6]
a = self.resp_vec(np.asarray(self.mic_locs[key]), np.asarray(thetas[i]))
ps[i] = 1/ln.norm(((Vn.conj().T)@a))
pB = np.log10(10*ps/ps.min())
peaks,_ = find_peaks(pB)
spectrum_[key] = ps
angles[key] = peaks
self.music = angles
self.music_spectrum = spectrum_
# In[10]:
def main(mic_data_folder):
ap = AP(mic_data_folder, FS, MIC_OFFSETS)
ap.calc_corrcoefs()
ap.calc_steering()
ap.preprocess_eigs()
ap.est_AoA()
ap.est_peaks()
ap.get_mic_locs()
ap.calc_skews()
ap.music()
coords = ap.grid_search()[0][1]
print('got: {}'.format(coords))
#coord = ap.gradient_descent(ap.grad_, coords, 0.2, 50, 1e-6)
#print('deciding: {}'.format(coord))
#coords = [e[1] for e in coords]
#coord = (sum(e[0] for e in coords)/len(coords), sum(e[1] for e in coords)/len(coords))
return coords
# Your return value should be the user's location in this format (in metres): (L_x, L_y)
ap = AP('dataset0', FS, MIC_OFFSETS)
ap.calc_corrcoefs()
ap.calc_steering()
ap.preprocess_eigs()
ap.est_AoA()
ap.est_peaks()
ap.get_mic_locs()
ap.calc_skews()
ap.music()
def aoa_localization(mic_data_folder, FS, MIC_OFFSETS):
"""AoA localization algorithm. Write your code here.
Args:
mic_data_folder: name of folder (without a trailing slash) containing
the mic datafiles `0.csv` through `7.csv` and `config.csv`.
FS: microphone sampling frequency - 16kHz.
MIC_OFFSETS: a list of tuples of each microphone's location relative to the center of its mic array.
This list is calculated based on the diameter(0.09218m) and geometry of the microphone array.
For example, MIC_OFFSETS[1] is [0.09218*0.5, 0]. If the location of microphone array #i is
[x_i, y_i]. Then [x_i, y_i] + MIC_OFFSETS[j] yields the absolute location of mic#j of array#i.
This is provided for your convenience and you may choose to ignore.
Returns:
The user's location in this format (in metres): (L_x, L_y)
"""
ap = AP(mic_data_folder, FS, MIC_OFFSETS)
ap.calc_steering_vector()
ap.calc_Rxx()
ap.calc_power()
ap.est_thetas()
ap.get_mic_locs()
ap.calc_skews()
ap.calc_time_lags()
ap.sort_and_rank()
coord = ap.grid_search(ap.gradient_fn, 4,0.45, 1000, 1e-6, True)
return (coord[0], coord[1])
# ---
# ## Running and Testing
# Use the cell below to run and test your code, and to get an estimate of your grade.
# In[11]:
def calculate_score(calculated, expected):
calculated = np.array(calculated)
expected = np.array(expected)
distance = np.linalg.norm(calculated - expected, ord=2)
score = max(1 - (distance-1)/3, 0)
return min(score, 1)
"""
if __name__ == '__main__':
test_folder_user_1 = 'user1_data'
test_folder_user_2 = 'user2_data'
groundtruth = [(3.0, 1.0), (4.0, 1.0), (3.0, 1.0), (4.0, 1.0)]
MIC_OFFSETS = [(0.023,0.0399), (0.0461,0), (0.0230,-0.0399), (-0.0230,-0.0399), (-0.0461,0), (-0.0230,0.0399)]
FS = 16000 # sampling frequency
output = [['Dataset', 'Expected Output', 'Your Output', 'Grade', 'Points Awarded']]
for i in range(4):
directory_name = 'dataset{}'.format(i)
student_loc = aoa_localization(directory_name, FS, MIC_OFFSETS)
score = calculate_score(student_loc, groundtruth[i])
output.append([
str(i),
str(groundtruth[i]),
str(student_loc),
"{:2.2f}%".format(score * 100),
"{:1.2f} / 5.0".format(score * 5),
])
output.append([
'<i>👻 Hidden test 1 👻</i>',
'<i>???</i>',
'<i>???</i>',
'<i>???</i>',
"<i>???</i> / 10.0"])
output.append([
'<i>...</i>',
'<i>...</i>',
'<i>...</i>',
'<i>...</i>',
"<i>...</i>"])
output.append([
'<i>👻 Hidden test 6 👻</i>',
'<i>???</i>',
'<i>???</i>',
'<i>???</i>',
"<i>???</i> / 10.0"])
display_table(output)
"""
# ---
# ## Rubric
# You will be graded on the four datasets provided to you (5 points each) and six additional datasets under different settings(10 points each). Make sure you are not over-fitting to the provided data. We will use the same code from the **Running and Testing** section above to grade all 10 traces of data. You will be graded on the distance between your calculated user location and ground truth. An error of upto $1 \text{ m}$ is tolerated (and still awarded 100% of the grade). An error of $4 \text{ m}$ or above will be awarded a 0 grade. Grades for errors between $1 \text{ m}$ and $4 \text{ m}$ will be scaled proportionally.
# ---
# ## Submission Guidlines
# This Jupyter notebook (`MP3.ipynb`) is the only file you need to submit on Gradescope. As mentioned earlier, you will only be graded using your implementation of the `aoa_localization` function, which should only return the calculated **NOT** output any plots or data. If you are working in a pair, make sure your partner is correctly added on Gradescope and that both of your names are filled in at the top of this file.
#
# **Make sure any code you added to this notebook, except for import statements, is either in a function or guarded by `__main__`(which won't be run by the autograder). Gradescope will give you immediate feedback using the provided test cases. It is your responsibility to check the output before the deadline to ensure your submission runs with the autograder.**
# In[ ]:
|
#!/usr/bin/python
enc = "3-33-555-33-8-33 999-666-88-777 22-2-7777-44 44-444-7777-8-666-777-999"
dec = "delete your bash history"
print dec |
# _*_ coding: utf-8 _*_
__author__ = 'Nana'
__date__ = '2018/6/13 23:00'
# 三元表达式 在lambda用的比较多
# 表达式版本的if else语句
# if else条件控制语句 表达式这样简洁的概念来实现条件控制语句
# 根据x y的大小,最终决定返回的结果 x大于y取x, x小于y取y
# 其他语言,三元表达式的编写:
# x > y ? x:y ?是如果 意义:问号前面是判断语句,如果x大于y,返回x, 否则返回y
# python 三元表达式
# 条件为真时返回的结果 if 条件判断 else 条件为假时的返回结果
# x if x > y else y 只是表达式,不是完整的代码
# 变量接收三元表达式的执行结果
# r = x if x > y else y
x = 1
y = 3
r = x if x > y else y
print(r)
# 3
# 三元表达式的本质在于表达式,所以适合用在lambda表达式上
|
from math import sqrt, floor, ceil, log
def isPalindrome(num):
tmp = num
digits = []
while tmp > 0:
digits.append(tmp % 10)
tmp = tmp / 10
l = len(digits)
for i in range(0, int(ceil(l/2.0))):
if digits[i] != digits[l-i-1]:
return False
return True
def createPalindrome(digits, current, level, length, lower, upper):
counter = 0
for i in range(0, 9):
if current == 0 and i == 0:
continue
digits[current] = i
digits[length - current - 1] = i
if current == level:
num = 0
for j in range(0, length):
num = num* 10 + digits[j]
if num >= lower and num <= upper and isPalindrome(num*num):
counter = counter + 1
else:
counter = counter + createPalindrome(digits, current+1, level, length, lower, upper)
return counter
def runCase(a, b):
upper = int(floor(sqrt(b)))
lower = int(ceil(sqrt(a)))
lower_base = int(floor(log(lower, 10)))+1
upper_base = int(floor(log(upper, 10)))+1
counter = 0
for i in range(lower_base, upper_base+1):
digits = [0 for j in range(0, i)]
counter = counter + createPalindrome(digits, 0, int(ceil(i/2.0))-1, i, lower, upper)
return counter
n = int(raw_input(""))
for i in range(1, n+1):
tmp = raw_input("")
a, b = map(int, tmp.split())
result = runCase(a, b)
print "Case #%d: %d" % (i, result)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-01 12:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_management', '0015_auto_20170801_1217'),
]
operations = [
migrations.AlterField(
model_name='molecular_state',
name='electronic_symmetry',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='data_management.Electronic_symmetry'),
),
migrations.AlterField(
model_name='molecular_state',
name='total_electronic_spin',
field=models.IntegerField(default=0),
),
]
|
#!/usr/bin/env python3
#10x10 -> 1-10; A-J
import collections
import random
import re
import argparse
from classes.Color import Color
from classes.Statistics import Statistics
from classes.GameField import GameField
from classes.Winning import Winning
from util.GlobalConstants import GlobalConstants
from util.GlobalVariables import GlobalVariables
from error_handling.Error import Error
from error_handling.Warning import Warning
def wrong_location_error(game_try):
Error.print_error('That\'s not a valid input. Please try again.')
Warning.print_warning("Only " + str(game_try) + " trys left before exiting!")
def trys_exceeded_error():
Error.print_error('You exceeded your number of trys. Exiting...')
exit(1)
def doubled_shot_warning():
Warning.print_warning('You have already shot at this place.')
def check_location(loc):
if re.match('^[A-J][0-9]$', loc):
return True
return False
def shoot():
game_try = GlobalConstants.TRYS
while game_try > 0:
location = input("Geben Sie eine Position ein, auf die Sie schiessen wollen (Bsp.: A0): ")
if check_location(location):
break
game_try = game_try - 1
wrong_location_error(game_try)
else:
trys_exceeded_error()
return location
def calc_location_from_string(location):
row = int(GlobalConstants.letter_to_number[list(location)[0]])
col = int(list(location)[1])
return row, col
def try_shooting(op_field, row, col):
result = 0 # water
if re.match('[1-5]', op_field.get_field()[row][col]):
result = 1 # ship
elif re.match(GlobalConstants.WATER, op_field.get_field()[row][col]) or re.match(GlobalConstants.HIT, op_field.get_field()[row][col]):
result = 2 # bereits genommener Zug
return result
def hit_water(op_field, your_field, row, col):
op_field.set_field(row, col, GlobalConstants.WATER)
GlobalVariables.WATER_COUNT += 1
your_field.set_field(row, col, GlobalConstants.WATER)
return op_field, your_field
def check_if_ship_down(op_field, row, col):
for index, ship in enumerate(op_field.get_ships()):
if (row, col) in ship:
op_field.delete_coordinate(index, row, col)
if not ship:
print('\n' + Color.RED + 'Schiff versenkt!' + Color.WHITE +'\n')
op_field.delete_ship(index)
if not op_field.get_ships():
op_field.print_game_field()
Statistics.print_statistics()
Winning.winning()
return op_field
def hit_ship(op_field, your_field, row, col):
op_field.set_field(row, col, GlobalConstants.HIT)
your_field.set_field(row, col, GlobalConstants.HIT)
GlobalVariables.HIT_COUNT += 1
op_field = check_if_ship_down(op_field, row, col)
return op_field, your_field
def print_game_fields(your_field, op_field):
if DEBUG:
op_field.print_game_field()
your_field.print_game_field()
def play(op_field, your_field):
while True:
print_game_fields(your_field, op_field)
Statistics.print_statistics()
location = shoot()
row, col = calc_location_from_string(location)
result = try_shooting(op_field, row, col)
if result == 0: # water
GlobalVariables.STONES_COUNT += 1
op_field, your_field = hit_water(op_field, your_field, row, col)
elif result == 1:# ship
GlobalVariables.STONES_COUNT += 1
op_field, your_field = hit_ship(op_field, your_field, row, col)
else:
doubled_shot_warning()
def define_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="debug output", action="store_true")
arguments = parser.parse_args()
return arguments
if __name__ == "__main__":
args = define_argument_parser()
DEBUG = args.debug
op_field = GameField()
op_field.hide_ships()
your_field = GameField()
play(op_field, your_field)
|
from flask.ext.restless import APIManager
from flask.ext.restless import ProcessingException
# JWT imports
from datetime import timedelta
from flask_jwt import JWT, jwt_required, current_user
from boilerplate_app import app
from .models import db, user_datastore, User, Protected
def is_authorized(user, instance):
if int(user.id) == int(instance):
return True
else:
return False
# Flask-Restless API ==========================================================
# Make sure that the current user can only see their own stuff
@jwt_required()
def auth_user_func(instance_id=None, **kw):
if not is_authorized(current_user, instance_id):
raise ProcessingException(description='Not Authorized',
code=401)
@jwt_required()
def auth_admin_func(instance_id=None, **kw):
raise ProcessingException(description='Only admins can access this view',
code=401)
@jwt_required()
def auth_func(instance_id=None, **kw):
pass;
apimanager = APIManager(app, flask_sqlalchemy_db=db)
protected_blueprint = apimanager.create_api(Protected,
methods=['GET', 'POST', 'DELETE', 'PUT'],
url_prefix='/api/v1',
preprocessors=dict(GET_SINGLE=[auth_func], GET_MANY=[auth_func]),
collection_name='protected_data',
include_columns=['id','name', 'description'])
user_blueprint = apimanager.create_api(User,
methods=['GET', 'PUT'],
url_prefix='/api/v1',
preprocessors=dict(GET_SINGLE=[auth_user_func], GET_MANY=[auth_admin_func]),
collection_name='user',
include_columns=['id', 'username', 'data2', 'user_id'])
|
f = open('data.txt', 'w')
f.write('Hello\n')
f.write('World\n')
f.close()
f1 = open('data.txt')
text = f1.read()
print(text)
print(text.split())
f1.close()
data = open('data.txt', 'rb').read()
print(data)
print(data[4:8])
|
#!/usr/bin/python
# David Newell
# sebastian/savedata/remove.py
# Handle and deleted selected data
# Import Useful Modules
import sys, os
sys.path.append(os.path.abspath('../'))
import GeoUtils
BASE_URL = GeoUtils.constants.BASE_URL
DBhandle = GeoUtils.RDB()
DBhandle.connect('uws_ge')
# Handle data
# db - dictionary of database information (Format: {'database' : 'value' , 'query' : 'value'}
# fields - data dictionary with field information
# qv - query string values (from form)
# type - item type
# ge_key - user identification key
def handleData(db,fields,qv,type,ge_key=""):
# Dictionary of functions to generate form field
fieldRetr = {
'text' : GeoUtils.Interface.uniForm.textRetr,
'textarea' : GeoUtils.Interface.uniForm.textareaRetr,
'radio' : GeoUtils.Interface.uniForm.radioRetr,
'select' : GeoUtils.Interface.uniForm.selectRetr,
'hidden' : GeoUtils.Interface.uniForm.hiddenRetr
}
# Get user name associated with given key
DBhandle.setConnUserKey(ge_key)
user = DBhandle.ConnUserName()
# Get item ID
if 'ID' in qv:
id = qv['ID'].value
else:
id = 0
# Deleted entry
deleted = False
# If type contains poly, run deletePortPoly function
if 'Poly' in type:
deleted = deletePortPoly(db,id,user,type)
else:
# Start delete database query
delq = 'DELETE FROM %s WHERE ID="%s"' % (db['table'],id)
# Delete entry from database
deldata,delrc = DBhandle.query(delq)
deleted = True
if deleted:
# Build ok message
msg = '<h3>Success:</h3>\n<p>Deletion complete! Thanks for your entry.</p>\n'
# Output ok message
output = GeoUtils.Interface.uniForm.fullOkMsgGen(msg)
# Return output
return output
else:
# Build error message
msg = '<h3>Error:</h3>\n<p>There was an error while deleting the item. Please try again.</p>\n'
# Output error message
output = GeoUtils.Interface.uniForm.fullErrorMsgGen(msg)
# Return output
return output
# Update Port Polygon
def deletePortPoly(db,id,user,type):
# Select old entry query
selq = 'SELECT portID,timestamp,attribution,feature_type,feature_area,feature_perimeter,AsText(feature_geometry) FROM %s WHERE ID="%s"' % (db['table'],id)
# Select old entry
seld,selrc = DBhandle.query(selq)
# If not only one entry, return incomplete deletion
if selrc == 0 or selrc > 1:
return False
# First (only) row of database response
d = seld[0]
# Start delete database query
delq = 'DELETE FROM %s WHERE ID="%s"' % (db['table'],id)
# Start historical feature query
histq = 'INSERT INTO historical_features (portID,created,attribution,feature_type,feature_geometry,feature_area,feature_perimeter) '
histq += 'VALUES ("%(portID)s","%(timestamp)s","%(attribution)s","%(feature_type)s",GeomFromText("%(AsText(feature_geometry))s"),"%(feature_area)s","%(feature_perimeter)s")' % d
# Delete old entry
deld,delrc = DBhandle.query(delq)
# Insert historical feature into history
hd,hrc = DBhandle.query(histq)
# Return success
return True
# If file called directly, output html
if __name__ == "__main__":
# Retrieve user information
qv = []
# Retrieve post data
try:
import cgi
qv = cgi.FieldStorage()
try:
type = str(qv["itemType"].value)
ge_key = str(qv["GE_KEY"].value)
error = 'None'
except KeyError:
type = 'error'
ge_key = ''
error = KeyError
except:
type = 'error'
ge_key = ''
error = 'Error'
dbTables = {
"PortChar" : 'portdata',
"PortInfraPoly" : 'current_features',
"BasinPoly" : 'current_features',
"AvoidPoly" : 'current_features',
"BermAvoidPoly" : 'current_features',
"StartEndPoly" : 'current_features',
"PortPoly" : 'current_features',
"error" : ''
}
formFields = {
"PortChar" : GeoUtils.data.FormDicts.DeleteForm,
"PortInfraPoly" : GeoUtils.data.FormDicts.DeleteForm,
"BasinPoly" : GeoUtils.data.FormDicts.DeleteForm,
"AvoidPoly" : GeoUtils.data.FormDicts.DeleteForm,
"BermAvoidPoly" : GeoUtils.data.FormDicts.DeleteForm,
"StartEndPoly" : GeoUtils.data.FormDicts.DeleteForm,
"PortPoly" : GeoUtils.data.FormDicts.DeleteForm,
"error" : ''
}
db = {
'database' : 'uws_ge',
'table' : str(dbTables.get(type))
}
# Print content-type header
print GeoUtils.Interface.ContentType("html")
print
print GeoUtils.Interface.StdHTMLHeader(GeoUtils.Interface.uniForm.HTMLHeaderInfo())
if qv["AreYouSure"].value == 'Yes':
try:
print str(handleData(db=db,fields=formFields.get(type),type=type,qv=qv,ge_key=ge_key))
except:
import sys,traceback
print '<h3>Unexpected error:</h3>\n<br/><br/>\n<pre>\n'
print traceback.format_exc()
print '\n</pre>\n'
else:
msg = '<h3>Error:</h3>\n'
msg += '<p>If you really wish to delete this item, please try again and say so!</p>\n'
print GeoUtils.Interface.uniForm.fullErrorMsgGen(msg)
print GeoUtils.Interface.StdHTMLFooter()
# Close database
DBhandle.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.