blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
559e7cf427eed5557dbf2584f0a0122c5073701d | adf84e1c5d2f717e51af53ee4e1141afbe47e53e | /metadata_extractor.py | beb8f97212d73a33b51f9596e7c63a7774525c2c | [] | no_license | Hallec/SAWTools | c955a7383d34fea615add9753dbd0cac1a036c52 | 88e96406b35192e1977d801dfdd722589bebf04d | refs/heads/master | 2021-08-23T02:41:47.027613 | 2017-12-02T16:33:58 | 2017-12-02T16:33:58 | 112,791,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,631 | py | #!/usr/bin/env python3
import os
import re
import time
import json
import secrets
import requests
import datetime
import argparse
import googlemaps
import subprocess
import configparser
from PIL import Image
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class MetadataExtractor:
#Terminal colors
colors = {
"blue" : '\033[94m',
"green" : '\033[92m',
"yellow" : '\033[93m',
"red": '\033[91m',
"end": '\033[0m',
}
#Constructor
def __init__(self):
self.metadata = {}
self.gps_tags = {}
self.config = configparser.ConfigParser()
self.config.read("/home/saw/Scripts/conf/api_keys.cfg")
self.config.sections()
#Enable key/value, gps and latitude/longitude detection
self.__create_key_value_regex()
self.__detect_location_tags()
self.__extract_lat_long_values()
#Function for selecting a random color for terminal messages
def __random_color(self):
choice = secrets.choice(range(len(self.colors)-2))
for i,color in enumerate(self.colors):
if i == choice: return self.colors[color]
#Function for getting current date time
def __get__time(self):
return datetime.datetime.now().strftime("%A - %d-%m-%Y at %H:%M:%S").capitalize()
#Function for checking Google Maps API key
def __checking_api_key(self):
try:
self.api_key = self.config["KEYS"]["GoogleMaps"].strip()
if self.api_key == "":
print("[ERROR] The Google Maps API Key is empty. Please, introduce your key in order to use this service, (conf/api_keys.cfg)")
except:
exit()
#Function for introducing the script
def introduction(self):
print(self.__random_color(),"""
███╗ ███╗███████╗████████╗ █████╗ ██████╗ █████╗ ████████╗ █████╗ ███████╗██╗ ██╗████████╗██████╗ █████╗ ██████╗████████╗ ██████╗ ██████╗
████╗ ████║██╔════╝╚══██╔══╝██╔══██╗██╔══██╗██╔══██╗╚══██╔══╝██╔══██╗ ██╔════╝╚██╗██╔╝╚══██╔══╝██╔══██╗██╔══██╗██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗
██╔████╔██║█████╗ ██║ ███████║██║ ██║███████║ ██║ ███████║ █████╗ ╚███╔╝ ██║ ██████╔╝███████║██║ ██║ ██║ ██║██████╔╝
██║╚██╔╝██║██╔══╝ ██║ ██╔══██║██║ ██║██╔══██║ ██║ ██╔══██║ ██╔══╝ ██╔██╗ ██║ ██╔══██╗██╔══██║██║ ██║ ██║ ██║██╔══██╗
██║ ╚═╝ ██║███████╗ ██║ ██║ ██║██████╔╝██║ ██║ ██║ ██║ ██║ ███████╗██╔╝ ██╗ ██║ ██║ ██║██║ ██║╚██████╗ ██║ ╚██████╔╝██║ ██║
╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝
""",self.colors["end"])
def check_input_dir(self):
self.files = os.listdir("/home/saw/Scripts/input/metadata")
if not self.files:
print("[ERROR] There is no files in the input/metadata folder. Please drop some files in order to be analyzed.")
exit()
else:
self.list_input_dir()
#Function for listing the content of the input folder
def list_input_dir(self):
valid = False
files_size = len(self.files)
while not valid:
print("\n"*3)
print("What file do you want to analyze?")
print("---------------------------------")
for index,file in enumerate(self.files):
print("{}). {}".format(index+1,file))
try:
self.option = int(input("Select an option: "))
except:
self.option = 0
if self.option >= 1 and self.option <= files_size:
valid = True
else:
if files_size == 1:
print("[ERROR] The option must be a numeric argument of 1")
else:
print("[ERROR] The option must be a numeric argument from: 1 to {}".format(files_size))
self.file_selected = [self.files[self.option-1]]
#Function for maping the file selected to a folder format
def __map_file_folder(self):
self.file_selected = list(map(lambda x: "/home/saw/Scripts/input/metadata/{}".format(x),self.file_selected))
#Function for calling the exiftool command
def call(self):
command = ["exiftool"]
self.__map_file_folder()
command.extend(self.file_selected)
self.output = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.DEVNULL).communicate()
self.__parse()
self.prettify(self.file_selected[0])
#Function for calling the exiftool command with arguments
def call_with_args(self,args,output=True,overwrite=False):
command = ["exiftool"]
#self.__map_file_folder()
command.extend([args])
#To overwrite original file
if overwrite: command.extend(["-overwrite_original"])
command.extend(self.file_selected)
lines = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.DEVNULL).communicate()[0].decode("utf-8").splitlines()
if not output: lines = []
response_command = {}
for line in lines:
#Cleaning useless characters from output
response = self.key_value_regex.search(line).groupdict()
key,value = response["key"].strip(),response["value"].strip()
response_command[key] = value
return response_command
#Function for parsing the output of exiftool command
def __parse(self):
lines = list(map(lambda x: x.decode("iso-8859-1"),self.output[0].splitlines()))
for line in lines:
#Cleaning useless characters from output
response = self.key_value_regex.search(line).groupdict()
key,value = response["key"].strip(),response["value"].strip()
self.metadata[key] = value
#Detecting GPS Tags
if self.location_regex.search(key):
self.gps_tags[key] = value
#Function for creating a regex which split key/value from output
def __create_key_value_regex(self):
self.key_value_regex = re.compile(r"(?P<key>[A-Za-z0-9 ]*):(?P<value>.*)")
#Function for detecting Location tags in the output information
def __detect_location_tags(self):
self.location_regex = re.compile(r"\.*GPS\.*")
#Function for extracting default latitude/longitude format
def __extract_lat_long_values(self):
self.lat_long_regex = re.compile(r"(?P<degree>[0-9\.]*) deg (?P<minutes>[0-9\.]*)' (?P<seconds>[0-9\.]*)\" (?P<orientation>[NSWE])")
#Function for prettifying the output
def prettify(self,file_name):
print()
print("--- OUTPUT from: {} ---".format(file_name))
print()
for key in self.metadata:
print("* [{}]: {}".format(key,self.metadata[key]))
#Function for converting latitude/longitude values into decimal number
def lat_long_to_decimal(self,input_values):
decimal_value = (1 if input_values["orientation"] in ["N","E"] else -1)*(float(input_values["degree"]) + float(input_values["minutes"])/60 + float(input_values["seconds"])/3600)
return decimal_value
#Function for parsing the reverse geocode output
def __parse_place(self,place_tags):
place_information = {}
for tag in place_tags:
if "street_number" in tag["types"]:
place_information["street_number"] = tag["long_name"]
elif "route" in tag["types"]:
place_information["street_name"] = tag["long_name"]
elif "locality" in tag["types"]:
place_information["city_name"] = tag["long_name"]
elif "administrative_area_level_2" in tag["types"]:
place_information["province"] = tag["long_name"]
elif "administrative_area_level_1" in tag["types"]:
place_information["autonomous_community"] = tag["long_name"]
elif "country" in tag["types"]:
place_information["country"] = tag["long_name"]
elif "postal_code" in tag["types"]:
place_information["postal_code"] = tag["long_name"]
return place_information
#Function for collecting private information
def collect_private_information(self):
print("\n"*3)
self.private_information = {}
print("[{}] Collecting private information...".format(self.__get__time()))
author = self.call_with_args("-Author")["Author"] if self.call_with_args("-Author") else ""
if author != "": self.private_information["author"] = author
camera_model_name = self.call_with_args("-Model")["Camera Model Name"] if self.call_with_args("-Model") else ""
if camera_model_name != "": self.private_information["camera_model"] = camera_model_name
software_version = self.call_with_args("-Software")["Software"] if self.call_with_args("-Software") else ""
if software_version != "": self.private_information["software_version"] = software_version
create_date = self.call_with_args("-CreateDate")["Create Date"] if self.call_with_args("-CreateDate") else ""
if create_date != "": self.private_information["create_date"] = create_date
maker = self.call_with_args("-Make")["Make"] if self.call_with_args("-Make") else ""
if maker != "": self.private_information["maker"] = maker
gps_altitude = self.call_with_args("-gpsaltitude")["GPS Altitude"] if self.call_with_args("-gpsaltitude") else ""
if gps_altitude != "": self.private_information["gps_altitude"] = gps_altitude
compression = self.call_with_args("-Compression")["Compression"] if self.call_with_args("-Compression") else ""
if compression != "": self.private_information["compression"] = compression
if self.gps_tags:
self.private_information["gps_latitude"] = self.gps_tags["GPS Latitude"]
self.private_information["gps_longitude"] = self.gps_tags["GPS Longitude"]
if author != "" or camera_model_name != "" or software_version != "" or create_date != "" or maker != "" or gps_altitude != "" or compression != "":
print("--- PRIVATE INFORMATION ---")
for key in self.private_information:
if key == "author": print("* Author: {}".format(self.private_information["author"]))
elif key == "camera_model": print("* Camera Model: {}".format(self.private_information["camera_model"]))
elif key == "software_version": print("* Software Version: {}".format(self.private_information["software_version"]))
elif key == "create_date": print("* Create Date: {}".format(self.private_information["create_date"]))
elif key == "maker": print("* Maker: {}".format(self.private_information["maker"]))
elif key == "gps_altitude": print("* GPS Altitude: {}".format(self.private_information["gps_altitude"]))
elif key == "gps_latitude": print("* GPS Latitude: {}".format(self.private_information["gps_latitude"]))
elif key == "gps_longitude": print("* GPS Longitude: {}".format(self.private_information["gps_longitude"]))
elif key == "compression": print("* Compression: {}".format(self.private_information["compression"]))
else:
print("[INFO] There is no additional private information to display.")
#Function for displaying place informatino
def __display_place(self,place_information):
print("\n"*3)
print("--- PLACE REVIEW ----")
print("* Address: {}".format(place_information["street_name"]))
print("* Address Number: {}".format(place_information["street_number"]))
print("* Postal Code: {}".format(place_information["postal_code"]))
print("* City: {}".format(place_information["city_name"]))
print("* Province: {}".format(place_information["province"]))
print("* Autonomous Community: {}".format(place_information["autonomous_community"]))
print("* Country: {}".format(place_information["country"]))
print("--- Coordinates ---")
print("\t- Latitude: {}".format(place_information["coordinates"][0]))
print("\t- Longitude: {}".format(place_information["coordinates"][1]))
#Function for analyzing location tags
def analyze_location_tags(self):
print("\n"*7)
valid = False
while not valid:
try:
option = input("Do you want to analyze GPS location tags? [y/n]: ").lower()
if option not in ["y","n"]:
print("[ERROR] The option must be: y or n [yes/no]")
else:
valid = True
except:
print("[ERROR] Incorrect option format. The selection must be: y or n [yes/no]")
#Analize if option is enabled
if option == "y":
#Checking wheter or not the API is written in the proper configuration file
self.__checking_api_key()
gmaps = googlemaps.Client(key=self.api_key)
latitude = 0
Longitude = 0
#Detecting latitude/longitude values
for tag in self.gps_tags:
if tag == "GPS Latitude":
latitude = self.lat_long_regex.search(self.gps_tags[tag]).groupdict()
latitude = self.lat_long_to_decimal(latitude)
elif tag == "GPS Longitude":
longitude = self.lat_long_regex.search(self.gps_tags[tag]).groupdict()
longitude = self.lat_long_to_decimal(longitude)
if latitude != 0 and longitude != 0:
results = gmaps.reverse_geocode((latitude,longitude))
for result in results:
if "street_address" in result["types"]:
place = result["address_components"]
break
place_information = self.__parse_place(place)
place_information["coordinates"] = (latitude,longitude)
self.__display_place(place_information)
#Ask the user for saving a screenshot
self.__save_screen(place_information)
else:
print("[INFO] There is no GPS Location information available.")
#Function for detecting the screen size of certain computer
def __screen_size(self):
response = subprocess.Popen(["xrandr"],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
screen_size = subprocess.Popen(['grep',"*"],stdin=response.stdout,stdout=subprocess.PIPE).communicate()
screen_size = screen_size[0].decode("utf-8")
screen_size = re.search(r"\s*([0-9x]*)\s*",screen_size).group(1).split("x")
return screen_size
#Function for saving an screenshot of the location map
def __save_screen(self,place_information):
print("\n"*3)
valid = False
while not valid:
try:
option = input("Do you want to save an screenshot of the map? [y/n]: ").lower()
valid = True
except:
print("[ERROR] The option must be y or n [yes/no]")
if option == "y":
print("[{}] Saving screen in output/maps. Please wait a few seconds...".format(self.__get__time()))
"""
CHROME_PATH = '/usr/bin/chromium-browser'
CHROMEDRIVER_PATH = '/usr/bin/google-chrome'
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size={}".format(WINDOW_SIZE))
chrome_options.binary_location = CHROME_PATH
"""
"""
web_browser = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,chrome_options=chrome_options)
web_browser.get("https://www.google.es/maps/place/{},{}".format(place_information["coordinates"][0],place_information["coordinates"][1]))
map_canvas = web_browser.find_element_by_class_name("widget-scene")
location = map_canvas.location
size = map_canvas.size
"""
#The same name of the file being analyzed, but with png extension
file_name = self.file_selected[0].rsplit("/",1)[1].split(".")[0]
#Customized Window Size
screen_size = self.__screen_size()
WINDOW_SIZE = "{},{}".format(screen_size[0],screen_size[1])
command = ["google-chrome",
"--headless",
"--disable-gpu",
"--window-size={}".format(WINDOW_SIZE),
"--screenshot=/home/saw/Scripts/output/maps/{}.png".format(file_name)
,"https://www.google.es/maps/place/{},{}".format(place_information["coordinates"][0],place_information["coordinates"][1])]
subprocess.Popen(command,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL).communicate()
"""
web_browser.save_screenshot('output/maps/{}.png'.format(file_name))
web_browser.quit()
"""
im = Image.open('/home/saw/Scripts/output/maps/{}.png'.format(file_name))
left = 430
top = 0
right = 430 + (int(screen_size[0]) - 430)
bottom = top + int(screen_size[1])
im = im.crop((left, top, right, bottom))
im.save('/home/saw/Scripts/output/maps/{}.png'.format(file_name))
print("[{}] Screenshot saved successfully.".format(self.__get__time()))
im.show()
#Dumping private information to a json file
self.__dump_json()
#Function for dumping all private information to a json file
def __dump_json(self):
print("\n"*3)
print("[{}] Dumping private information to a json file (output/json)...".format(self.__get__time()))
file_name = self.file_selected[0].split("/")[-1].split(".")[0]
with open("/home/saw/Scripts/output/json/{}.json".format(file_name),"w+") as f:
f.write(json.dumps(self.private_information))
#Function for anonymizing a certain file
def anonymize(self):
print("[{}] Anonymizing: {}...".format(self.__get__time(),self.file_selected[0]))
self.call_with_args("-all=",output=False,overwrite=True)
print("[{}] Process complete successfully.".format(self.__get__time()))
#Function for faking the metadata of certain file
def fake(self):
print("[{}] Starting faking procedure of file: {}...".format(self.__get__time(),self.file_selected[0]))
fake_config = configparser.ConfigParser()
fake_config.read("/home/saw/Scripts/conf/fake_options.cfg")
fake_config.sections()
#Manufacturer Name
manufacturer_name = secrets.choice(["Apple","Google"])
#Remove private metadata
self.call_with_args("-all=",output=False,overwrite=True)
#Change Author Name
self.call_with_args("-Author='{}'".format(secrets.choice(json.loads(
fake_config["OPTIONS"]["Author"]))),output=False,overwrite=True
)
#Change Camera Model
self.call_with_args("-Model='{}'".format(secrets.choice(json.loads(
fake_config["OPTIONS"]["CameraModelApple"] if manufacturer_name == "Apple" else fake_config["OPTIONS"]["CameraModelAndroid"]))),output=False,overwrite=True
)
#Change Software Version
self.call_with_args("-Software='{}'".format(secrets.choice(json.loads(
fake_config["OPTIONS"]["SoftwareApple"] if manufacturer_name == "Apple" else fake_config["OPTIONS"]["SoftwareAndroid"]))),output=False,overwrite=True
)
#Change Software Version
self.call_with_args("-CreateDate='{}'".format((datetime.datetime.now() - datetime.timedelta(days=secrets.choice(range(1,100)))).strftime("%Y:%m:%d %H:%M:%S+01:00")),output=False,overwrite=True)
#Change Maker
self.call_with_args("-Make='{}'".format(secrets.choice(json.loads(
fake_config["OPTIONS"]["MakerApple"] if manufacturer_name == "Apple" else fake_config["OPTIONS"]["MakerAndroid"]))),output=False,overwrite=True
)
#Compression
self.call_with_args("-Compression='{}'".format(secrets.choice(json.loads(fake_config["OPTIONS"]["Compression"]))),output=False,overwrite=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a','--anonymize', action='store_true',help='Anonymize certain file')
parser.add_argument('-f','--fake',action='store_true',help='Fake private information available')
parser.add_argument('-s','--show',action='store_true',help='Show metadata associated to a certain file')
parser.add_argument('-v','--version', action='version',version='%(prog)s 1.0',help='Show current version of the program')
results = parser.parse_args()
if results.anonymize: #Anonymize File
metadata_extractor = MetadataExtractor()
metadata_extractor.introduction()
metadata_extractor.check_input_dir()
metadata_extractor.call()
metadata_extractor.anonymize()
elif results.fake: #Fake File
metadata_extractor = MetadataExtractor()
metadata_extractor.introduction()
metadata_extractor.check_input_dir()
metadata_extractor.call()
metadata_extractor.fake()
elif results.show: #Show File
metadata_extractor = MetadataExtractor()
metadata_extractor.introduction()
metadata_extractor.check_input_dir()
metadata_extractor.call()
else: #No parameter execution
metadata_extractor = MetadataExtractor()
metadata_extractor.introduction()
metadata_extractor.check_input_dir()
metadata_extractor.call()
metadata_extractor.collect_private_information()
metadata_extractor.analyze_location_tags()
| [
"saw@saw.com"
] | saw@saw.com |
ba4522ad55dbece12e1e1aee2aebbb878afd032c | 4f6b1013e77c9dd909038f0c790d436985bd2a75 | /cifar10_example/wrn_28_10_baseline.py | a14b11c99b72ebf27d1196b4aaeed461efe46ef8 | [] | no_license | joonsang-yu/Network-Recasting | 820fb8cac9a8efbd82cf6dad36a58afda5a77123 | 886a8ac5e7d1f7c884928d97deea34adfd258e75 | refs/heads/master | 2020-04-28T14:51:23.270742 | 2019-06-21T01:19:38 | 2019-06-21T01:19:38 | 175,351,504 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,767 | py | # Import torch and model
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
sys.path.append("../common")
from model_generator import ModelGenerator
from net import Net
# Set hyper params
batch_size = 128
num_epoch = 200
lr = 0.1
gamma = 0.2 # learning rate decay
weight_decay = 0.0005
## for SGD
opt_momentum = 0.9
opt_nesterov = True
dropout_on = False
batchnorm_on = True
scheduler_step_size = [60, 120, 160]
pretrained_model = './cifar10_wrn_28_10_pretrained.pth'
# Load dataset
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))])
transform_train = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# WideResNet-28-10 model
model_gen = ModelGenerator(dropout = dropout_on, batchnorm = batchnorm_on)
model_gen.CifarResnetConfig(k = 10, num_layers = 28, cifar = 10)
model = model_gen.GetCifarWrn()
net = Net(model)
net.Gpu()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.GetTotalParams(), lr=lr, weight_decay=weight_decay, momentum=opt_momentum, nesterov=opt_nesterov )
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones = scheduler_step_size, gamma = gamma)
for epoch in range(num_epoch): # loop over the dataset multiple times
running_loss = 0.0
scheduler.step()
net.TrainMode()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
# zero the parameter gradients
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss = (running_loss * i + loss.cpu().data.numpy()) / (i+1)
correct = 0
total = 0
net.TestMode()
for data in testloader:
images, labels = data
outputs = net(Variable(images.cuda()))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
print('%d epoch end, loss: %3.6f, Test Acc: %4.2f %%' %(epoch + 1, running_loss, 100 * correct / total))
print('\nTraining is finished!')
correct = 0
total = 0
net.TestMode()
for data in testloader:
images, labels = data
outputs = net(Variable(images.cuda()))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
print('Accuracy of the network on the 10000 test images: %4.2f %%' % (100 * correct / total))
torch.save(net.GetStateDict(), pretrained_model)
| [
"shorm21@dal.snu.ac.kr"
] | shorm21@dal.snu.ac.kr |
a819f0bbd31b39542a7f259702d30ca5f646f260 | 391f170a8c61e49fe6fcaea1e5942a02341398de | /appointments_scheduler/app/models.py | 88ab3c404d3b681c75619a95dd2a760b27f1e506 | [] | no_license | bhargavaganti/appointments_scheduler | 5755639ba4d23fd8c3b4a162e1726214bd19d992 | b57ec02827041b504ca2eb38680f953f5350297d | refs/heads/master | 2020-04-10T06:56:22.657184 | 2018-03-05T20:52:32 | 2018-03-05T20:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # -*- coding: utf-8 -*-
from django.db import models
class Patient(models.Model):
""" Representative class for the patient table in database. """
SEX_CHOICES = (
('F', 'Female'),
('M', 'Male')
)
name = models.CharField(max_length=250, verbose_name='Nome')
birthdate = models.DateField(max_length=3, verbose_name='Data de nascimento')
sex = models.CharField(max_length=1, choices=SEX_CHOICES, verbose_name='Sexo')
phone = models.CharField(max_length=20, verbose_name='Telefone')
email = models.EmailField(verbose_name='E-mail')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Paciente'
class Procedure(models.Model):
""" Representative class for the procedure table in database """
description = models.CharField(max_length=100, verbose_name='Procedimento')
cost = models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Valor')
def __str__(self):
return self.description
class Meta:
verbose_name = 'Procedimento'
class Appointment(models.Model):
""" Representative class for the appointment table in database """
patient = models.ForeignKey("Patient", related_name="patient")
procedure = models.ForeignKey("Procedure", related_name="procedure")
date = models.DateField()
start_at = models.TimeField(null=False)
end_at = models.TimeField()
def __str__(self):
return "{} - {}, dia {} às {} horas".format(self.patient.name, self.procedure.description, self.date, self.start_at)
class Meta:
verbose_name = 'Agendamento' | [
"viniciuschan@hotmail.com"
] | viniciuschan@hotmail.com |
a927158be32566f3165b2f91adcba401db0009e2 | 8439bea3d530d29d83a215f75c495c8720a37acd | /Project/Project/pageslist/reducer.py | 2533d346f0f9ba84c93d8d2c5ef123d8ac5a0c18 | [] | no_license | gfalcone/LogAnalysis | 8599d3b6e5922155ea1b337084a8bae1f87f962c | e6a8258633201dab2a54844432158cd02848cca2 | refs/heads/master | 2018-12-28T07:29:35.108472 | 2015-03-03T12:06:46 | 2015-03-03T12:06:46 | 31,596,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #!/usr/bin/python
import sys
totalNumber = 0
oldKey = None
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
# Something has gone wrong. Skip this line.
continue
thisKey, thisNumber = data_mapped
if oldKey and oldKey != thisKey:
print oldKey, "\t", totalNumber
oldKey = thisKey
totalNumber = 0
oldKey = thisKey
totalNumber += float(thisNumber)
if oldKey != None:
print oldKey, "\t", totalNumber
| [
"paologenissel@gmail.com"
] | paologenissel@gmail.com |
bafa8d6d5390a2c94bf8f17ec808289408d16115 | f70bf7dc7f3eae1bd1f55b2d8400a002762deeed | /main.py | f08f68bf13cceba4e60e62562608489249f646a9 | [] | no_license | ShedrackGodson/spaceInvader | dfb503132a962b61b2d4ba949560ff6bf9f5f334 | 998ac7cc2b44966133512148e3d221a388dc341d | refs/heads/main | 2023-06-10T03:06:31.002709 | 2021-06-22T10:07:42 | 2021-06-22T10:07:42 | 379,758,551 | 1 | 0 | null | 2021-06-24T00:08:52 | 2021-06-24T00:08:52 | null | UTF-8 | Python | false | false | 4,570 | py | import pygame
import random
import math
from pygame import mixer
#initiate pygame
pygame.init()
#create the screen
screen = pygame.display.set_mode((800,600))
#background image
background = pygame.image.load('images/background.jpg')
# backgroound music
mixer.music.load('sounds/background.wav')
mixer.music.play(-1)
#title and icon
pygame.display.set_caption("space Invader")
icon =pygame.image.load('images/alien.png') # how to upload an icon on a window header
pygame.display.set_icon(icon)
#player
playerImg = pygame.image.load('images/astronomy.png')
playerX = 370
playerY = 480
playerX_change = 0
def player(x,y):
screen.blit(playerImg, (x,y))
#Enemy
enemyImg = []
enemyX = []
enemyY = []
enemyX_change = []
enemyY_change = []
num_of_enemies = 6
for i in range(num_of_enemies):
enemyImg.append(pygame.image.load('images/enemy.png'))
enemyX.append(random.randint(0, 800))
enemyY.append(random.randint(50, 150))
enemyX_change.append(0.3)
enemyY_change.append(40)
def enemy(x,y,i):
screen.blit(enemyImg[i], (x,y))
#Bullet (ready state means that you can not see the bullet)
bulletImg = pygame.image.load('images/bullet.png')
bulletX = 0
bulletY = 480
buletX_change = 0
bulletY_change = 1
bullet_state = "ready"
# score
score_value = 0
font = pygame.font.Font('freesansbold.ttf', 32)
#coordinates on where the score to appear
textX = 10
textY = 10
#Game over font
over_font=pygame.font.Font('freesansbold.ttf', 64)
def show_score(x,y):
score = font.render("Score :" + str(score_value),True, (255, 255, 255))
screen.blit(score, (x,y))
def game_over_text():
over_text= over_font.render("Game Over", True, (197, 0, 0))
screen.blit(over_text, (200, 250))
def fire_bullet(x,y):
global bullet_state
bullet_state = "fire"
screen.blit(bulletImg, (x + 16, y + 10))
def is_collision(enemyX,enemyY,bulletX,bulletY):
distance = math.sqrt(math.pow(enemyX-bulletX,2) + math.pow(enemyY-bulletY,2))
if distance <27:
return True
else:
return False
#Game loop
running = True
while running:
screen.fill((0, 0, 0)) # red green blue
# background Image
screen.blit(background, (0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running =False
# if a keystroke is placed check if it is left or right.
if event.type == pygame.KEYDOWN: # KEYDOWN is when we place a keyboard
if event.key == pygame.K_LEFT:
playerX_change = -0.5
if event.key == pygame.K_RIGHT:
playerX_change = 0.5
if event.key == pygame.K_SPACE:
if bullet_state == "ready":
bullet_sound= mixer.Sound('sounds/laser.wav')
bullet_sound.play()
bulletX = playerX
fire_bullet(bulletX,bulletY)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerX_change = 0
# making sure a player does not get out of screen
playerX += playerX_change
if playerX <= 0:
playerX = 0
elif playerX>= 736:
playerX = 736
#manaing enemy movements
for i in range(num_of_enemies):
#Game Over
if enemyY[i] > 400:
for j in range(num_of_enemies):
enemyY[j] = 2000
game_over_text()
break
enemyX[i] += enemyX_change[i]
if enemyX[i] <= 0:
enemyX_change[i] = 0.3
enemyY[i] += enemyY_change[i]
elif enemyX[i] >= 736:
enemyX_change[i] = -0.3
enemyY[i] += enemyY_change[i]
collision = is_collision(enemyX[i],enemyY[i],bulletX,bulletY)
if collision:
collision_sound = mixer.Sound('sounds/explosion.wav')
collision_sound.play()
# collision_image= pygame.image.load('images/explosion.png')
# screen.blit(collision_image, (enemyX,enemyY))
bulletY = 480
bullet_state = "ready"
score_value += 1
enemyX[i] = random.randint(0, 800)
enemyY[i] = random.randint(50, 150)
enemy(enemyX[i],enemyY[i], i)
#bullet movement
if bulletY <=0:
bulletY = 480
bullet_state = "ready"
if bullet_state == "fire":
fire_bullet(bulletX,bulletY,)
bulletY -= bulletY_change
#collision
player(playerX,playerY)
show_score(textX, textY)
pygame.display.update() | [
"mgenih914@gmail.com"
] | mgenih914@gmail.com |
9282f0f2d60b06e9b9fc014933fb9d202252282c | ee78f995af759dd5a4358d0c3b25c2b822bd0a9c | /mybapp/views/first_rigs/form.py | f790b51467c0c0816262393ee68e7ade5c900ceb | [] | no_license | Garcia-Julz/mine-your-business | 5d615078b298865e85bfca84a6e1bb049e41b684 | bc7618b164067c48dcbeb188d79c4188618107b2 | refs/heads/master | 2021-09-26T16:47:13.432539 | 2020-08-11T14:35:04 | 2020-08-11T14:35:04 | 246,666,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | import sqlite3
from django.shortcuts import render, redirect
from django.urls import reverse
from mybapp.models import Ticket, Rig
# from mybapp.models import model_factory
from ..connection import Connection
# from .ticket_details import get_ticket
from django.contrib.auth.decorators import login_required
# @login_required
def get_locations(request):
with sqlite3.connect(Connection.db_path) as conn:
current_user = request.user
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
select
l.id,
l.city,
u.id
FROM mybapp_location l
JOIN auth_user u
ON u.id = l.user_id
WHERE l.user_id = ?
""", (current_user.id,))
return db_cursor.fetchall()
# @login_required
def rig_form_on(request):
if request.method == 'GET':
location = get_locations(request)
template = 'first_rigs/form.html'
context = {
'all_locations': location
}
return render(request, template, context)
# def ticket_edit_form(request, ticket_id):
# if request.method == 'GET':
# ticket = get_ticket(ticket_id)
# rig = get_rigs()
# template = 'tickets/ticket_form.html'
# context = {
# 'ticket': ticket,
# 'all_rigs': rig
# }
# return render(request, template, context) | [
"garcia.j.julian@gmail.com"
] | garcia.j.julian@gmail.com |
a2320094d3de4d7a17062ce221f96b8313e02f33 | 520634ca00b12d12cc0952bab04bf3ee55622118 | /python/paddle/v2/fluid/tests/test_sequence_expand.py | 0f22612d3dbe483e4d5a8638636e44e172160156 | [
"Apache-2.0"
] | permissive | LeungGeorge/Paddle | 6d18866cda5e6ce01acdb8061707f42c5852fd54 | b2ade90ec65d0de1c23d4982f6b29485ff68de63 | refs/heads/develop | 2021-09-10T16:25:18.319966 | 2018-03-29T06:30:07 | 2018-03-29T06:30:07 | 115,273,399 | 0 | 0 | null | 2018-03-29T06:34:05 | 2017-12-24T15:57:41 | C++ | UTF-8 | Python | false | false | 2,099 | py | import unittest
import numpy as np
from op_test import OpTest
class TestSequenceExpand(OpTest):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_lod = [[0, 1, 4, 8]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
def compute(self):
x = self.inputs['X']
x_data, x_lod = x if type(x) == tuple else (x, None)
n = 1 + x_data.shape[0] if not x_lod else len(x_lod[0])
y_data, y_lod = self.inputs['Y']
repeats = [((y_lod[-1][i + 1] - y_lod[-1][i]))
for i in range(len(y_lod[-1]) - 1)]
out = x_data.repeat(repeats, axis=0)
self.outputs = {'Out': out}
def setUp(self):
self.op_type = 'sequence_expand'
self.set_data()
self.compute()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSequenceExpandCase1(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
x_lod = [[0, 2, 5]]
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32')
y_lod = [[0, 2, 5], [0, 2, 4, 7, 10, 13]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
class TestSequenceExpandCase2(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32')
x_lod = [[0, 1]]
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32')
y_lod = [[0, 2]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
class TestSequenceExpandCase3(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32')
x_lod = [[0, 1, 2, 3, 4]]
y_data = np.random.uniform(0.1, 1, [6, 1]).astype('float32')
y_lod = [[0, 2, 4, 4, 6]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
if __name__ == '__main__':
unittest.main()
| [
"wanghaoshuang@baidu.com"
] | wanghaoshuang@baidu.com |
162669ddc7a2cfe7fffd20b2993e22b246593cf8 | cc3e8c5b7912a8289eb1dcbfad50b98ecfea0ccf | /PatrickDandPeterF/FeatureClassTools2.pyt | 7270460b73b22f28dbd45af0744b9b908b9d56c2 | [] | no_license | acgis-fair0089/gis4207-day03 | 6c6a9aeb15c2a1c9e507394f4000538b01c6bd42 | ccac6edca322006600f3ae0f4f200365c62c40ab | refs/heads/master | 2021-05-09T15:23:56.178137 | 2018-01-30T15:04:28 | 2018-01-30T15:04:28 | 119,091,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,435 | pyt | import arcpy
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = "Toolbox"
self.alias = "fctoolsPYT"
# List of tool classes associated with this toolbox
self.tools = [BasicDescribe, FeatureClassLister]
class BasicDescribe(object):
def BasicDescribe(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Basic Describe associated with Describe03.py"
self.description = " "
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
params = []
param1 = arcpy.Parameter(
displayName = "Feature Class",
name = "Feature Class",
datatype = "DEFeatureClass",
parameterType = "Required",
direction = "Input")
params = [param1]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
from arcpy import GetParameterAsText
fc = parameters[0].valueAsText
Dfc = arcpy.Describe(fc)
arcpy.AddMessage("{:13}: {}".format("BaseName", Dfc.BaseName))
arcpy.AddMessage("{:13}: {}".format("CatalogPath", Dfc.CatalogPath))
arcpy.AddMessage("{:13}: {}".format("DataType", Dfc.DataType))
return
class FeatureClassLister(object):
def FeatureClassLister(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Feature Class Lister associated with List02.py"
self.description = ""
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
params = []
Folparam = arcpy.Parameter(
displayName = "Folder",
name = "Folder",
datatype = "DEFolder",
parameterType = "Required",
direction = "Input")
params = [Folparam]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
qwe = arcpy.env.workspace = parameters[0].valueAsText
if arcpy.Exists(qwe):
f = arcpy.ListFeatureClasses()
for i in f:
arcpy.AddMessage(i)
| [
"fair0089@algonquinlive.com"
] | fair0089@algonquinlive.com |
b8b05646cbff68391707c965423c95ffb1cde81f | 27e74d73ed008532a6827033acac5ff937e846f5 | /test-subject.py | eedf7a63c4d6298bf97d8013c406dc6651964832 | [] | no_license | amitmittal117/bs4-with-multithreading | 0a182774904827da826671fe989db45f02a8dbc0 | dc3c9d4fde4a20176b97c9a30690e5f24fdcdf1c | refs/heads/master | 2020-03-20T00:57:27.172449 | 2018-06-12T20:20:24 | 2018-06-12T20:20:24 | 137,062,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # from BSFOUR import *
import threading
def retriever_soup(st):
# print (st)
return (st)
# arr =["http://www.facebook.com","http://www.google.com"]
arr =["http://www.google.com"]
arr_of_functions = [retriever_soup,retriever_soup]
def m_thread(arr_of_function):
threads = []
for i in range(len(arr_of_function)):
threads.append(i)
threads[i] = threading.Thread(target = arr_of_function[0],args=(arr[0],))
threads[i].start()
for i in range(len(arr_of_function)):
threads[i].join()
return (threads)
save_soup = m_thread(arr_of_functions)
print(save_soup) #not returning the "st" from the above function | [
"amitarmittal@gmail.com"
] | amitarmittal@gmail.com |
ef35b89dd5410350e58399fe98455e588eb8b924 | 4a95637e59696f783424eb673e60c0cfa82f96ba | /even-fibonacci.py | d7bf4110651932044bd44643f2a030fac292c5b7 | [] | no_license | katiebug2001/project-euler | eca6ee2aaecf914b10c464e79d24a283f289ab9a | c00fe5c9e3c76715f3792549f1c130cc637baa01 | refs/heads/master | 2021-01-20T02:28:40.066880 | 2017-08-15T18:09:56 | 2017-08-15T18:09:56 | 89,411,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | import unittest
def even_fibonacci(up_to):
"""Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms."""
even_fibs = []
last_fib = 0
this_fib = 1
while this_fib <= up_to:
#print(this_fib)
#print(even_fibs)
if (this_fib % 2) == 0:
#sum_even_fibs += this_fib
even_fibs.append(this_fib)
last_fib, this_fib = this_fib, (last_fib + this_fib)
print(even_fibs)
return sum(even_fibs)
print(even_fibonacci(4000000))
class test_even_fibonacci(unittest.TestCase):
"""
tests even_fibonacci()
"""
def test_different_up_to(self):
"""
tests the sum of even fibonaccis when taken up to different numbers
"""
test_up_to = [1, 2, 10, 50, 55, 700, 350000]
test_even_fib = []
for n in test_up_to:
test_even_fib.append(even_fibonacci(n))
self.assertEqual([0, 2, 10, 44, 44, 798, 257114], test_even_fib)
if __name__ == '__main__':
unittest.main() | [
"katie@honsingerfamily.com"
] | katie@honsingerfamily.com |
c1285be45d4c6b2c73314456eaebe6a127ec96ab | c7f32bfb03559f012daff57991cb977c4a334182 | /Homework/hw2 n-queens/code/submission.py | 12b31f3fa3833cd24d41a5e3cfe626da8d8ebf58 | [] | no_license | reeddotaer/AI-PAs | c26ea0e4d0339ce04f6cc4eabc34249bd2e47735 | 0c367e483bd13cb6cd73a5cadaa9f7e1f2a11572 | refs/heads/master | 2020-09-23T00:36:01.094724 | 2017-06-28T15:14:32 | 2017-06-28T15:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,198 | py | import collections, util, copy
############################################################
# Problem 1
def create_nqueens_csp(n = 8):
"""
Return an N-Queen problem on the board of size |n| * |n|.
You should call csp.add_variable() and csp.add_binary_factor().
@param n: number of queens, or the size of one dimension of the board.
@return csp: A CSP problem with correctly configured factor tables
such that it can be solved by a weighted CSP solver.
"""
csp = util.CSP()
domain = range(n)
for i in range(n):
csp.add_variable(i, domain)
for i in range(n):
for j in range(n):
if i != j :
csp.add_binary_factor(i, j, lambda x, y: x != y and abs(x-y) != abs(i-j))
return csp
# A backtracking algorithm that solves weighted CSP.
# Usage:
# search = BacktrackingSearch()
# search.solve(csp)
class BacktrackingSearch():
def reset_results(self):
"""
This function resets the statistics of the different aspects of the
CSP solver. We will be using the values here for grading, so please
do not make any modification to these variables.
"""
# Keep track of the best assignment and weight found.
self.optimalAssignment = {}
self.optimalWeight = 0
# Keep track of the number of optimal assignments and assignments. These
# two values should be identical when the CSP is unweighted or only has binary
# weights.
self.numOptimalAssignments = 0
self.numAssignments = 0
# Keep track of the number of times backtrack() gets called.
self.numOperations = 0
# Keep track of the number of operations to get to the very first successful
# assignment (doesn't have to be optimal).
self.firstAssignmentNumOperations = 0
# List of all solutions found.
self.allAssignments = []
def print_stats(self):
"""
Prints a message summarizing the outcome of the solver.
"""
if self.optimalAssignment:
print "Found %d optimal assignments with weight %f in %d operations" % \
(self.numOptimalAssignments, self.optimalWeight, self.numOperations)
print "First assignment took %d operations" % self.firstAssignmentNumOperations
else:
print "No solution was found."
def get_delta_weight(self, assignment, var, val):
"""
Given a CSP, a partial assignment, and a proposed new value for a variable,
return the change of weights after assigning the variable with the proposed
value.
@param assignment: A dictionary of current assignment. Unassigned variables
do not have entries, while an assigned variable has the assigned value
as value in dictionary. e.g. if the domain of the variable A is [5,6],
and 6 was assigned to it, then assignment[A] == 6.
@param var: name of an unassigned variable.
@param val: the proposed value.
@return w: Change in weights as a result of the proposed assignment. This
will be used as a multiplier on the current weight.
"""
assert var not in assignment
w = 1.0
if self.csp.unaryFactors[var]:
w *= self.csp.unaryFactors[var][val]
if w == 0: return w
for var2, factor in self.csp.binaryFactors[var].iteritems():
if var2 not in assignment: continue # Not assigned yet
w *= factor[val][assignment[var2]]
if w == 0: return w
return w
def solve(self, csp, mcv = False, ac3 = False):
"""
Solves the given weighted CSP using heuristics as specified in the
parameter. Note that unlike a typical unweighted CSP where the search
terminates when one solution is found, we want this function to find
all possible assignments. The results are stored in the variables
described in reset_result().
@param csp: A weighted CSP.
@param mcv: When enabled, Most Constrained Variable heuristics is used.
@param ac3: When enabled, AC-3 will be used after each assignment of an
variable is made.
"""
# CSP to be solved.
self.csp = csp
# Set the search heuristics requested asked.
self.mcv = mcv
self.ac3 = ac3
# Reset solutions from previous search.
self.reset_results()
# The dictionary of domains of every variable in the CSP.
self.domains = {var: list(self.csp.values[var]) for var in self.csp.variables}
# Perform backtracking search.
self.backtrack({}, 0, 1)
# Print summary of solutions.
self.print_stats()
def backtrack(self, assignment, numAssigned, weight):
"""
Perform the back-tracking algorithms to find all possible solutions to
the CSP.
@param assignment: A dictionary of current assignment. Unassigned variables
do not have entries, while an assigned variable has the assigned value
as value in dictionary. e.g. if the domain of the variable A is [5,6],
and 6 was assigned to it, then assignment[A] == 6.
@param numAssigned: Number of currently assigned variables
@param weight: The weight of the current partial assignment.
"""
self.numOperations += 1
assert weight > 0
if numAssigned == self.csp.numVars:
# A satisfiable solution have been found. Update the statistics.
self.numAssignments += 1
newAssignment = {}
for var in self.csp.variables:
newAssignment[var] = assignment[var]
self.allAssignments.append(newAssignment)
if len(self.optimalAssignment) == 0 or weight >= self.optimalWeight:
if weight == self.optimalWeight:
self.numOptimalAssignments += 1
else:
self.numOptimalAssignments = 1
self.optimalWeight = weight
self.optimalAssignment = newAssignment
if self.firstAssignmentNumOperations == 0:
self.firstAssignmentNumOperations = self.numOperations
return
# Select the next variable to be assigned.
var = self.get_unassigned_variable(assignment)
# Get an ordering of the values.
ordered_values = self.domains[var]
# Continue the backtracking recursion using |var| and |ordered_values|.
if not self.ac3:
# When arc consistency check is not enabled.
for val in ordered_values:
deltaWeight = self.get_delta_weight(assignment, var, val)
if deltaWeight > 0:
assignment[var] = val
self.backtrack(assignment, numAssigned + 1, weight * deltaWeight)
del assignment[var]
else:
# Arc consistency check is enabled.
# Problem 1c: skeleton code for AC-3
# You need to implement arc_consistency_check().
for val in ordered_values:
deltaWeight = self.get_delta_weight(assignment, var, val)
if deltaWeight > 0:
assignment[var] = val
# create a deep copy of domains as we are going to look
# ahead and change domain values
localCopy = copy.deepcopy(self.domains)
# fix value for the selected variable so that hopefully we
# can eliminate values for other variables
self.domains[var] = [val]
# enforce arc consistency
self.arc_consistency_check(var)
self.backtrack(assignment, numAssigned + 1, weight * deltaWeight)
# restore the previous domains
self.domains = localCopy
del assignment[var]
def get_unassigned_variable(self, assignment):
"""
Given a partial assignment, return a currently unassigned variable.
@param assignment: A dictionary of current assignment. This is the same as
what you've seen so far.
@return var: a currently unassigned variable.
"""
if not self.mcv:
# Select a variable without any heuristics.
for var in self.csp.variables:
if var not in assignment: return var
else:
# Problem 1b
# Heuristic: most constrained variable (MCV)
# Select a variable with the least number of remaining domain values.
# Hint: given var, self.domains[var] gives you all the possible values
# Hint: get_delta_weight gives the change in weights given a partial
# assignment, a variable, and a proposed value to this variable
# Hint: for ties, choose the variable with lowest index in self.csp.variables
flag, mcv_var = 99999999, None
for var in self.csp.variables:
if var not in assignment:
count = 0
for value in self.domains[var]:
if self.get_delta_weight(assignment, var, value) != 0:
count += 1
if count < flag:
flag, mcv_var = count, var
return mcv_var
def arc_consistency_check(self, var):
"""
Perform the AC-3 algorithm. The goal is to reduce the size of the
domain values for the unassigned variables based on arc consistency.
@param var: The variable whose value has just been set.
"""
# Problem 1c
# Hint: How to get variables neighboring variable |var|?
# => for var2 in self.csp.get_neighbor_vars(var):
# # use var2
#
# Hint: How to check if a value or two values are inconsistent?
# - For unary factors
# => self.csp.unaryFactors[var1][val1] == 0
#
# - For binary factors
# => self.csp.binaryFactors[var1][var2][val1][val2] == 0
# (self.csp.binaryFactors[var1][var2] returns a nested dict of all assignments)
def revise(x, y):
revised = False
to_remove = []
for value_x in self.domains[x]:
allow = False
for value_y in self.domains[y]:
if self.csp.binaryFactors[x][y][value_x][value_y]:
allow = True
break
if not allow:
to_remove.append(value_x)
revised = True
for i in to_remove:
self.domains[x].remove(i)
return revised
queue = [(neighbor, var) for neighbor in self.csp.get_neighbor_vars(var)]
while len(queue) != 0:
x, y = queue.pop(0)
if revise(x, y):
for z in self.csp.get_neighbor_vars(x):
if z != y :
queue.append((z, x))
| [
"hutianxiao_fdu@126.com"
] | hutianxiao_fdu@126.com |
1eced88ff0f03519808423767eaf750e207e3445 | 3f7caea7eefe154c0a1e41a16f6d2f7679f3083e | /testcase.py | 5bfee1fe7d32bdec03257b7f8eaa782d8138fbde | [
"MIT"
] | permissive | fiber-space/t3 | 42cbb71aa15ace2cd0f77855dfdcd739af8ac39b | 47e38cc956b3da55dde1c22fd71c12477e6a2aed | refs/heads/master | 2021-01-10T11:27:59.937639 | 2016-03-07T03:53:17 | 2016-03-07T03:53:17 | 53,105,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | # ======================================================================
#
# Copyright (C) 2016 Kay Schluehr (kay@fiber-space.de)
#
# t3testcase.py, v B.0 2016/03/03
#
# ======================================================================
__all__ = ["T3TestCase", "ExpectFailure"]
import abc
import sys
import os
import traceback
import time
from t3.pattern import MatchingFailure
class ExpectFailure(Exception): pass
class T3TestCase(object):
def __enter__(self):
settings["testcnt"]+=1
return self
def __exit__(self, typ, value, tb):
if typ:
if typ in (AssertionError, MatchingFailure, ExpectFailure):
settings["failcnt"]+=1
self._status = "FAIL"
sys.stderr.write("\n<< TEST FAILED >>\n")
else:
settings["errcnt"]+=1
self._status = "ERROR"
traceback.print_exc()
return True
| [
"kay@fiber-space.de"
] | kay@fiber-space.de |
4575374be2e7976e2f336e05ff19aeeef1452441 | f3eeb766d2ef73b696a23cbf44aa27c59d7e758f | /train/train_single_l16.py | f09f4db9d355ff89f409eeb0c00427ffc5143938 | [] | no_license | lazycal/learned-index-tuning | dce3777bf164963fa28edd83b8911fd3a0597448 | 64d814ba5ba802b86f48e759181d57d814e19dae | refs/heads/main | 2023-02-07T09:09:20.325680 | 2020-12-15T03:26:46 | 2020-12-15T03:26:46 | 318,956,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,313 | py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from operator import itemgetter
import tqdm
import random
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import argparse
import timer
import struct
import math
from sys import byteorder
assert byteorder=='little', byteorder
def get_query_data(path_file):
x = np.fromfile(path_file, dtype=np.uint64)[1:]
datax = x[0::2] # key value
datay = x[1::2] # index value
return datax, datay
def get_index_data(path_file):
print('reading index')
res = np.fromfile(path_file, dtype=np.uint64)[1:]
print('read done')
return res
def convert(l1_model, l2_models, l2_error, out_path):
'''
l1model is one Cubic object,
l2_models is a list of Linear objects
'''
'''
Encode VERSION_NUM = 0, num_layers = 2, # of models in l1, # of models in l2,
a,b,c,d, [a,b]*1000
'''
assert len(l1_model) == 1, 'layer 1 should only have one Cubic model'
VERSION_NUM = 0
num_layers = 2
num_of_models_l1 = len(l1_model)
num_of_models_l2 = len(l2_models)
with open(out_path, 'wb') as fout:
fout.write(struct.pack('<Q', VERSION_NUM))
fout.write(struct.pack('<Q', num_layers))
fout.write(struct.pack('<Q', 2))
fout.write(struct.pack('<Q', 0))
fout.write(struct.pack('<Q', num_of_models_l1))
fout.write(struct.pack('<Q', num_of_models_l2))
L0_params = np.array([l1_model[0].a, l1_model[0].b, l1_model[0].c, l1_model[0].d], dtype='<f8')
# L1_params = []
# L1_params = np.array(L1_params, dtype='<f8')
# ba_l0 = bytearray()
fout.write(L0_params.tobytes())
# ba_l1 = bytearray(struct.pack("f", L1_params))
for x,e in zip(l2_models,l2_error):
# L1_params.append(x.a)
# L1_params.append(x.b)
# L1_params.append(e)
fout.write(struct.pack('<d', x.b.item()))
fout.write(struct.pack('<d', x.a.item()))
fout.write(struct.pack('<Q', int(math.ceil(e))))
# fout.write(L1_params.tobytes())
class Linear(nn.Module):
def __init__(self):
super(Linear, self).__init__()
self.a = nn.Parameter(torch.tensor(0, dtype=torch.float64, requires_grad=True))
self.b = nn.Parameter(torch.tensor(0, dtype=torch.float64, requires_grad=True))
torch.nn.init.uniform_(self.a, -1, 1)
torch.nn.init.uniform_(self.b, -1, 1)
def forward(self, x):
return x * self.a + self.b
def transform_back(self, x1, x2, y1, y2):
'''x1: shift for x
x2: scale for x
y1: shift for y
y2: scale for y'''
# original: f'(x')=a'x'+b'
# transformed: f(x)=(f'(x')-y1)/y2
# Thus f'(x')=f(x)*y2+y1=(ax+b)y2+y1=(a(x'-x1)/x2+b)y2+y1=(a/x2*x'-a/x2*x1+b)y2+y1
# = a/x2*y2 * x' + (-a/x2*x1+b)*y2+y1 =
# Now we're in transformed version. To return to original:
# set a'=a/x2*y2, b'=(-a/x2*x1+b)*y2+y1
a, b = self.a.item(), self.b.item()
self.a.data.fill_(a/x2*y2)
self.b.data.fill_((-a/x2*x1+b)*y2+y1)
class Cubic(nn.Module):
def __init__(self):
super(Cubic, self).__init__()
self.a = nn.Parameter(torch.tensor(0, dtype=torch.float64, requires_grad=True))
self.b = nn.Parameter(torch.tensor(0, dtype=torch.float64, requires_grad=True))
self.c = nn.Parameter(torch.tensor(0, dtype=torch.float64, requires_grad=True))
self.d = nn.Parameter(torch.tensor(0, dtype=torch.float64, requires_grad=True))
torch.nn.init.uniform_(self.a, -1, 1)
torch.nn.init.uniform_(self.b, -1, 1)
torch.nn.init.uniform_(self.c, -1, 1)
torch.nn.init.uniform_(self.d, -1, 1)
def forward(self, x):
return x**3 * self.a + x**2 * self.b + x * self.c + self.d
def transform_back(self, x1, x2, y1, y2):
'''x1: shift for x
x2: scale for x
y1: shift for y
y2: scale for y'''
a, b, c, d = self.a.item(), self.b.item(), self.c.item(), self.d.item()
a_new = y2 * a / x2**3
b_new = -3 * x1 * y2 * a / x2**3 + y2 * b / x2**2
c_new = 3 * x1**2 * y2 * a / x2**3 - 2 * x1 * y2 * b / x2**2 + y2 * c / x2
d_new = -x1**3 * y2 * a / x2**3 + x1**2 * y2 * b / x2**2 - x1 * y2 * c / x2 + d * y2 + y1
self.a.data.fill_(a_new)
self.b.data.fill_(b_new)
self.c.data.fill_(c_new)
self.d.data.fill_(d_new)
def L2_loss(output, target):
loss = torch.mean((output - target)**2)
return loss
def L1_loss(output, target):
loss = torch.mean((output - target)**16)
return loss
def MaxLoss(output, target):
return torch.max(torch.abs(output - target))
def transform(a):
'''perform some form of standardization'''
a = np.array(a)
scale = max(a) - min(a)
if scale == 0: scale = 1
b = (a - min(a)) / scale
b = 2 * b - 1
return b, min(a) + scale * 0.5, scale * 0.5
# evaluate model
def eval_model(model: nn.Module, x_val, y_val, criterion):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if isinstance(x_val, list): x_val = np.array(x_val)
if isinstance(y_val, list): y_val = np.array(y_val)
if isinstance(x_val, np.ndarray): x_val = torch.tensor(x_val).to(device)
if isinstance(y_val, np.ndarray): y_val = torch.tensor(y_val).to(device)
return criterion(model(x_val),y_val).item()
def do_lr_decay(opt, epoch, lr_decay):
lr = None
for e, l in lr_decay:
if epoch >= e: lr = l
assert lr is not None, lr
# print('update lr to', lr)
for param_group in opt.param_groups:
param_group['lr'] = lr
def train_model(model: nn.Module, x, y,
max_epoch=100,
criterion=L2_loss,
batch_size=None,
wd=0,
lr_decay=((0,1),),# ((0,1e-18), (40,1e-19), (70,1e-20)) # lr=0.01 for epoch<4; lr=0.001 for epoch<7; ...
log_freq=10,
):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
x_ori, y_ori = x, y
x, x_shift, x_scale = transform(x)
y, y_shift, y_scale = transform(y)
x_gpu, y_gpu = torch.tensor(x).to(device), torch.tensor(y).to(device)
# Note: Alternative is optim.Adam optimizer, which claims to tune the lr automatically
# (though usually worse than hand-tuned SGD)
# opt = optim.SGD(model.parameters(), lr=0, weight_decay=wd)
opt = optim.Adam(model.parameters(), lr=0, weight_decay=wd)
num_data = len(x)
assert batch_size is None # use full batch for now
if batch_size is None:
batch_size = num_data # default to full batch SGD
# create data loader to support mini-batch SGD
# train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_gpu, y_gpu),
# batch_size=batch_size,
# shuffle=True if batch_size < len(x) else False,
# num_workers=8)
train_loader = [(x_gpu, y_gpu)]
train_loss = []
for j in range(max_epoch):
if log_freq > 0 and j % log_freq == 0:
train_loss.append((j, eval_model(model, x_gpu, y_gpu, criterion), eval_model(model, x_gpu, y_gpu, MaxLoss))))
print('Epoch', j, ': mean loss on training set is', train_loss[-1][-1])
do_lr_decay(opt, j, lr_decay)
for data, target in train_loader:
# use GPU if available
data, target = data.to(device), target.to(device)
opt.zero_grad()
loss = criterion(model(data), target)
loss.backward()
opt.step()
err = eval_model(model, x_gpu, y_gpu, criterion)
err_max = eval_model(model, x_gpu, y_gpu, MaxLoss)
train_loss.append((max_epoch, err, err_max))
print('Final mean loss on training set is', err)
# now transform model back: compute model' s.t. (model'(x_ori)-y_shift)/y_scale = model(x),
# where x is the transformed x_ori, i.e. x=(x_ori-x_shift)/x_scale
model.transform_back(x_shift, x_scale, y_shift, y_scale)
print(y_scale)
err_ori = eval_model(model, x_ori, y_ori, criterion)
print('Final mean original loss on training set is', err_ori)
#assert abs(err_ori / y_scale**2 - err) < 1e-5, (y_scale, err_ori / y_scale**2, err)
return train_loss, y_scale
def seed_all(seed, deterministic_but_slow):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if deterministic_but_slow:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
def set_empty_const(empty_num, linear_list, data2_y, num_module2):
# Empty set
right_index = []
left_index = []
const = []
if len(empty_num) == 1:
if empty_num[0] == 0:
right_index = empty_num[0] + 1
right_val = sorted(data2_y[right_index])[0]
const.append(right_val)
elif empty_num[0] == num_module2 - 1:
left_index = empty_num[0] - 1
left_val = sorted(data2_y[left_index])[-1]
const.append(left_val)
else:
right_index = empty_num[0] + 1
left_index = empty_num[0] - 1
right_val = sorted(data2_y[right_index])[0]
left_val = sorted(data2_y[left_index])[-1]
const.append(0.5 * (right_val + left_val))
else:
for i in range(len(empty_num)):
signal_left = -2
signal_right = -2
# Special Case: the first element
if i == 0:
if empty_num[i] == 0:
left_index.append(-1)
else:
left_index.append(empty_num[i]-1)
signal_left = 0
# Find the index of the first non-empty set at the left of the set whose index is empty_num(i)
for k in range(i+1,len(empty_num)):
if k == len(empty_num)-1:
if empty_num[k] != num_module2 - 1:
right_index.append(empty_num[k] + 1)
signal_right = 0
break
if empty_num[k] != empty_num[k-1] + 1:
right_index.append(empty_num[k-1]+ 1)
signal_right = 0
break
if signal_right == -2:
right_index.append(-1)
# Special Case: the last element
elif i == len(empty_num)-1:
if empty_num[i] == num_module2 - 1:
right_index.append(-1)
else:
right_index.append(empty_num[i]+1)
signal_right = 0
for l in range(i):
if l == i-1:
if empty_num[i-1-l] != 0:
left_index.append(empty_num[i-1-l] - 1)
signal_left = 0
break
if empty_num[i-1-l] != empty_num[i-l] - 1:
left_index.append(empty_num[i-l] - 1)
signal_left = 0
break
if signal_left == -2:
left_index.append(-1)
# Usual Case
else:
# Find the index of the first non-empty set at the left of the set whose index is empty_num(i)
for k in range(i+1,len(empty_num)):
if k == len(empty_num)-1:
if empty_num[k] != num_module2 - 1:
right_index.append(empty_num[k] + 1)
signal_right = 0
break
if empty_num[k] != empty_num[k-1] + 1:
right_index.append(empty_num[k-1]+ 1)
signal_right = 0
break
# Find the index of the first non-empty set at the right of the set whose index is empty_num(i)
for l in range(i):
if l == i-1:
if empty_num[i-1-l] != 0:
left_index.append(empty_num[i-1-l] - 1)
signal_left = 0
break
if (empty_num[i-1-l] != empty_num[i-l] - 1):
left_index.append(empty_num[i-l] - 1)
signal_left = 0
break
if signal_right == -2:
right_index.append(-1)
if signal_left == -2:
left_index.append(-1)
if signal_right == -2:
left_val = sorted(data2_y[left_index[i]])[-1]
const.append(left_val)
elif signal_left == -2:
right_val = sorted(data2_y[right_index[i]])[0]
const.append(right_val)
else:
right_val = sorted(data2_y[right_index[i]])[0]
left_val = sorted(data2_y[left_index[i]])[-1]
const.append(0.5 * (right_val.item() + left_val.item()))
linear_list[empty_num[i]].a.data.fill_(0)
linear_list[empty_num[i]].b.data.fill_(const[i])
def train_L2(top_model, x, y, num_module2, log_freq=-1, max_epoch2=100,
criterion_train=L1_loss):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
linear_list = []
errs = np.zeros(num_module2) # store max error
# distibute data into 2nd layer
with torch.no_grad():
model_index = top_model(torch.tensor(x).to(device)).detach().cpu().numpy()
print('model_index.shape=',model_index.shape)
data2_x = [[] for _ in range(num_module2)]
data2_y = [[] for _ in range(num_module2)]
for i in range(len(model_index)):
mi = max(0, min(num_module2 - 1, int(model_index[i])))
data2_x[mi].append(x[i])
data2_y[mi].append(y[i])
del x,y # just for checking correctness
# train 2nd layer
linear_list = []
empty_num = []
print('num_data for each layer-2 model', list(map(len, data2_x)))
train_loss, y_scale = [[] for _ in range(num_module2)], [[] for _ in range(num_module2)]
signal = 0
for i in tqdm.tqdm(range(num_module2)):
print(f'traing #{i}')
linear_model = Linear().to(device)
if i == 0:
continue
if len(data2_x[i]) != 0 and i > 0:
signal = 1
if len(data2_x[i]) == 0:
continue
empty_num.append(i)
linear_list.append(linear_model)
continue # skip
train_loss[i], y_scale[i] = train_model(linear_model, data2_x[i], data2_y[i], max_epoch2,
log_freq=log_freq, criterion=criterion_train)
linear_list.append(linear_model)
max_err = eval_model(linear_model, data2_x[i], data2_y[i], MaxLoss)
errs[i] = max_err
print("max error={}".format(max_err))
if signal == 1:
break
return linear_list, data2_x, data2_y, errs, train_loss, y_scale
def do_stretch(x, y):
assert np.all(x[:-1]<=x[1:]), 'data not sorted'
# do the so-call "stretching" augmentation:
# "given a key with position p before “stretching”, if its access frequency is
# f, then we need to (1) shift its position to be p+ (f−1)/2; (2) and shift all keys after it with f−1"
# the 2nd "shifting" part essentially equivallent to this (in my opinion, hope you can verify that)
y1 = np.copy(y)
for i in range(len(x)):
if i == 0 or x[i-1] < x[i]:
lb = i
y1[i] = lb
cnt_times = {}
# 1st "shifting"
for i in range(len(x)):
cnt_times[x[i]]=cnt_times.get(x[i], 0) + 1 # calc frequency
for i in range(len(x)):
y1[i] += float(cnt_times[x[i]]-1) / 2 # shift to middle so that has same distance between previous key's y and next key's y
return y1
def sort_data(x, y):
idx = np.argsort(x)
return x[idx], y[idx]
def work(x, y, index_array, out_path, max_epoch1, max_epoch2,
num_module2, log_freq=-1, seed=7, deterministic_but_slow=True, stretch=False, args={}):
ti = timer.Timer()
# x, y = get_query_data(path_query)
# index_array = get_index_data(path_index)
num_data1 = len(x)
x, y = sort_data(x, y)
x, y = x.astype(np.float64), y.astype(np.float64)
cubic_list = []
datax = x#x # 1st layer data
datay = y#y # 1st layer label
if stretch:
print('doing stretch')
y1 = do_stretch(x, y)
f = np.sum(y1 == max(y1))
nxt_lower_bound = len(y1) # can be regarded as a rigorous alternative to max(datay)+1
assert np.allclose(-(f-1.) / 2 + f + max(y1), nxt_lower_bound), nxt_lower_bound
datay = y1
datay = (datay - min(datay)) * 1. / (nxt_lower_bound - min(datay)) * num_module2 #scale
else:
nxt_lower_bound = np.sum(index_array <= max(x)) # can be regarded as a rigorous alternative to max(datay)+1
datay = (datay - min(datay)) * 1. / (nxt_lower_bound - min(datay)) * num_module2 #scale
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
del index_array
seed_all(seed, deterministic_but_slow)
print('init done')
# train 1st layer
cubic_list = []
print('num_data for each layer-1 model', len(datax))
ti('init')
print(f'traing #{0}')
cubic_model = Cubic().to(device)
l1_train_loss, l1_y_scale = train_model(cubic_model, datax, datay, max_epoch1, log_freq=1)
ti('train_l1')
cubic_list.append(cubic_model)
err1 = eval_model(cubic_model, datax, datay, L2_loss)
ti('other')
linear_list, data2_x, data2_y, errs, l2_train_loss, l2_y_scale = train_L2(cubic_model, x.astype(np.float64), y.astype(np.float64), num_module2,
log_freq=log_freq, max_epoch2=max_epoch2)
ti('train_l2')
wts = np.array(list(map(len, data2_x)))
mean_max_err = np.sum(np.array(errs) * wts) / wts.sum()
mean_log2_max_err = np.sum(np.log2(np.maximum(1., np.array(errs))) * wts) / wts.sum()
print("mean of max error of each layer 2 model=", mean_max_err)
print("mean of log2(max error of each layer 2 model)=", mean_log2_max_err)
convert(cubic_list, linear_list, errs, out_path)
ti('other')
np.savez(out_path+"_train_profile.npz", mean_max_err=mean_max_err, mean_log2_max_err=mean_log2_max_err,
wts=wts, L2_err_layer1=err1, max_errs_layer2=errs,
linear_list=linear_list, cubic_list=cubic_list, loss={
'l1_train_loss': l1_train_loss, 'l1_y_scale': l1_y_scale,
'l2_train_loss': l2_train_loss, 'l2_y_scale': l2_y_scale},
ti=ti, args=vars(args))
print(ti)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path-query', default="./data/wiki_ts_200M_uint64_queries_10M_in_train")
parser.add_argument('--path-index', default="./data/wiki_ts_200M_uint64")
parser.add_argument('--out-path', default="./rmi_data/V1_3_PARAMETERS")
parser.add_argument('--max-epoch1', type=int, default=10000)
parser.add_argument('--max-epoch2', type=int, default=1000)
parser.add_argument('--num-module2', type=int, default=1000)
parser.add_argument('--log-freq', type=int, default=100)
parser.add_argument('--seed', type=int, default=7)
parser.add_argument('--stretch', action='store_true')
args = parser.parse_args()
path_query = args.path_query
path_index = args.path_index
out_path = args.out_path
max_epoch1 = args.max_epoch1
max_epoch2 = args.max_epoch2
num_module2 = args.num_module2
log_freq = args.log_freq
seed = args.seed
stretch = args.stretch
x, y = get_query_data(path_query)
work(x, y, get_index_data(path_index), out_path, max_epoch1, max_epoch2, num_module2,
log_freq=log_freq, seed=seed, stretch=stretch, args=args)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
639f16425d3065089d8b72c9284afc0205529c27 | e74148971dad06f0e2541a81bb8581f5caf47002 | /apps/courses/migrations/0002_auto_20180307_1633.py | 4edb24b38e9742ff230aee998762343b783215fa | [] | no_license | hanchaobiao/mxonline2 | f07d643ad57f73b622eb2af5992cf741088307a9 | 62887b2c492d8cb5d2100aa80e879a268580878f | refs/heads/master | 2020-04-07T20:59:44.893558 | 2018-03-07T12:25:27 | 2018-03-07T12:25:27 | 124,231,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-03-07 08:33
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='courseresource',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 7, 16, 33, 34, 144000), verbose_name='\u6dfb\u52a0\u65f6\u95f4'),
),
]
| [
"1017421922@qq.com"
] | 1017421922@qq.com |
67d39e23e66dfb2ff728bb624916f24b36df5d90 | f64708d12edac4df2899f6de7159d58e79c8127e | /AT_new_version/utils.py | 038d444d30f791ccc5252ba6aba0168d9c354d13 | [] | no_license | pyzeon/Algorithmic-trading | 80312a504666fd00851cecc38e8963b4e3217be4 | b4f2b2a8a8fbccd3591a949e1e1930e259118565 | refs/heads/master | 2023-01-05T18:29:04.946528 | 2020-11-03T20:03:37 | 2020-11-03T20:03:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import numpy as np
def adjustsignal(signal):
sig=[]
qtite=0
for i in signal:
if i > 0 :
sig.append(i)
qtite+=1
elif i < 0:
if qtite >= abs(i) :
sig.append(i)
qtite+=-i
else:
sig.append(0)
else:
sig.append(0)
return sig
def dietz_court(signal,close):
date=close.index
date=date.values
close=np.array(close)
signal=np.array(signal.fillna(0))
qti=signal.cumsum()
CF=signal*close
V1=qti[-1]*close[-1]
V0=CF[0]
w=[]
for i in range(len(close)):
w.append((date[-1]-date[i])/(date[-1]-date[0]))
dcourt=(V1-V0-CF[1:].sum())/(V0+(w[1:]*CF[1:]).sum())
return dcourt
| [
"noreply@github.com"
] | noreply@github.com |
4477b9fb1695e3b060ea98d5438539eb5f051705 | d8430588f0272f21032646f950e733cb1eee2c88 | /tests/controllertest.py | c0851909643fa22251b4e3a1d8bcc90388c74f93 | [
"MIT"
] | permissive | Global-Biofoundries-Alliance/DNA-scanner | d45be63a96f342deb5f1da12cb5427cb424a1bed | e5be9685b6f3d34057282a59716541f69ad74a87 | refs/heads/master | 2023-03-08T17:02:22.695234 | 2022-02-28T11:04:46 | 2022-02-28T11:04:46 | 218,476,001 | 15 | 6 | MIT | 2023-03-02T07:47:40 | 2019-10-30T08:15:42 | Python | UTF-8 | Python | false | false | 36,529 | py | import unittest
from itertools import combinations
from sys import maxsize
from Controller.app import app
from Controller.configurator import YmlConfigurator as Configurator
from Controller.session import InMemorySessionManager
from Pinger.Entities import SequenceInformation, SequenceVendorOffers, Currency
from Pinger.Pinger import CompositePinger
from flask import json
from random import random
import random as rand
class TestController(unittest.TestCase):
name = "TestController"
iterations = 100 # How many iterations to perform on iterated tests
def setUp(self) -> None:
app.config['TESTING'] = True
self.sequence_path = 'examples/ComponentDefinitionOutput_gl.xml'
self.config = Configurator("config.yml")
with app.test_client() as client:
self.client = client
self.vendors = []
for vendor in self.config.vendors:
self.vendors.append(vendor.key)
def tearDown(self) -> None:
pass
def test_api_prefix(self):
print("\nTesting /api/ subdomain routing")
resp = self.client.get('/ping')
self.assertTrue(b'The page requested does not exist' in resp.data)
resp = self.client.get('/api/ping')
self.assertTrue(b'The page requested does not exist' not in resp.data)
resp = self.client.get('/upload')
self.assertTrue(b'The page requested does not exist' in resp.data)
resp = self.client.get('/api/upload')
self.assertTrue(b'The page requested does not exist' not in resp.data)
resp = self.client.get('/nonexistent')
self.assertTrue(b'The page requested does not exist' in resp.data)
resp = self.client.get('/api/nonexistent')
self.assertTrue(b'The page requested does not exist' not in resp.data)
def test_upload_endpoint(self) -> None:
print("\nTesting /upload endoint")
for i in range(self.iterations):
handle = open(self.sequence_path, 'rb')
response = self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
self.assertIn(b"upload successful", response.data)
def test_filter_endpoint(self) -> None:
print("\nTesting /filter endpoint")
for i in range(self.iterations):
# prepare session
handle = open(self.sequence_path, 'rb')
self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
response = self.client.post('/api/filter', data='{"banana": "neigh"}')
self.assertIn("error", response.get_json())
filter = '{filter: {}}'
response = self.client.post('/api/filter', data={"filter": filter})
self.assertIn(b'Invalid filter request: Data must be in JSON format', response.data)
# test filtering vendors
for r in range(1, len(self.vendors)):
for vendorTuple in combinations(self.vendors, r):
vendors = list(vendorTuple)
filter = {
"filter": {"vendors": vendors, "price": [0, 100], "deliveryDays": 50, "preselectByPrice": True, \
"preselectByDeliveryDays": False}}
response = self.client.post('/api/filter', content_type='application/json', data=json.dumps(filter))
self.assertIn(b"filter submission successful", response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for res in response_json["result"]:
for vendor in res["vendors"]:
# Data present implies relevant vendor
self.assertTrue(not vendor["offers"] or vendor["key"] in vendors)
# test filtering vendors with redundant and invalid ones
tainted_vendors = vendors + vendors + [-872150987209, 666, -1]
filter = {"filter": {"vendors": tainted_vendors, "price": [0, 100], "deliveryDays": 50,
"preselectByPrice": True, \
"preselectByDeliveryDays": False}}
response = self.client.post('/api/filter', content_type='application/json', data=json.dumps(filter))
self.assertIn(b"filter submission successful", response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for res in response_json["result"]:
for vendor in res["vendors"]:
# Data present implies relevant vendor
self.assertTrue(not vendor["offers"] or vendor["key"] in vendors,
"Vendor " + str(vendor["key"]) + " not in " + str(vendors))
# test filtering by price
filter = {
"filter": {"vendors": [0, 1, 2], "price": [20, 50], "deliveryDays": 50, "preselectByPrice": True, \
"preselectByDeliveryDays": False}}
response = self.client.post('/api/filter', content_type='application/json', data=json.dumps(filter))
self.assertIn(b"filter submission successful", response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for res in response_json["result"]:
for vendor in res["vendors"]:
for offer in vendor["offers"]:
if (offer["price"] >= 0.0): # negative values are placeholders and must stay in
self.assertLessEqual(offer["price"], 50)
self.assertGreaterEqual(offer["price"], 20)
# test filtering by delivery days
filter = {"filter": {"vendors": [0, 1, 2], "price": [0, 100], "deliveryDays": 5, "preselectByPrice": True, \
"preselectByDeliveryDays": False}}
response = self.client.post('/api/filter', content_type='application/json', data=json.dumps(filter))
self.assertIn(b"filter submission successful", response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for res in response_json["result"]:
for vendor in res["vendors"]:
for offer in vendor["offers"]:
self.assertLessEqual(offer["turnoverTime"], 5)
def test_results_endpoint(self) -> None:
print("\nTesting /results endpoint")
# Sequence names and IDs that have already occured; Used to ensure unique names IDs
sequenceNames = []
sequenceIDs = []
for i in range(self.iterations):
handle = open(self.sequence_path, 'rb')
self.client.post('/api/upload', content_type='multipart/form-data',
data={'seqfile': handle, 'prefix': "Zucchini" + str(i)})
filter = '{"filter": {"vendors": [1],"price": [0, 100],"deliveryDays": 5,"preselectByPrice": True,"preselectByDeliveryDays": False}}'
self.client.post('/api/filter', data=filter)
searchResult = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
self.assertNotIn("error", searchResult, "Results endpoint returned error: " + str(searchResult))
expectedCount = 0
self.assertIn("size", searchResult.keys())
self.assertIn("count", searchResult.keys())
self.assertIn("offset", searchResult.keys())
self.assertIn("result", searchResult.keys())
self.assertIn("globalMessage", searchResult.keys())
# AdvancedMockPingers are used for testing so there should be warning messages present.
self.assertTrue(searchResult["globalMessage"])
self.assertIn("vendorMessage", searchResult.keys())
messageVendors = [] # tracks the vendors for which messages have already been encountered
for vendor in searchResult["vendorMessage"]:
self.assertIn("vendorKey", vendor.keys())
self.assertIn("messages", vendor.keys())
self.assertFalse(vendor["vendorKey"] in messageVendors)
messageVendors.append(vendor["vendorKey"])
for result in searchResult["result"]:
expectedCount = expectedCount + 1
self.assertIn("sequenceInformation", result.keys())
self.assertIn("id", result["sequenceInformation"].keys())
self.assertIn("name", result["sequenceInformation"].keys())
self.assertIn("sequence", result["sequenceInformation"].keys())
self.assertIn("length", result["sequenceInformation"].keys())
# Test uniqueness of names and IDs
self.assertNotIn(result["sequenceInformation"]["name"], sequenceNames)
self.assertNotIn(result["sequenceInformation"]["id"], sequenceIDs)
sequenceNames.append(result["sequenceInformation"]["name"])
sequenceNames.append(result["sequenceInformation"]["name"])
sequenceIDs.append(result["sequenceInformation"]["id"])
self.assertTrue(result["sequenceInformation"]["name"].startswith(
str(result["sequenceInformation"]["id"]) + "_Zucchini" + str(i) + "_"))
self.assertIn("vendors", result.keys())
for vendor in result["vendors"]:
self.assertIn("key", vendor.keys())
self.assertIn("offers", vendor.keys())
for offer in vendor["offers"]:
self.assertIn("price", offer.keys())
self.assertIn("currency", offer.keys())
self.assertIn(offer["currency"], [currency.symbol() for currency in Currency])
self.assertIn("turnoverTime", offer.keys())
self.assertIn("offerMessage", offer.keys())
for message in offer["offerMessage"]:
self.assertIn("text", message)
self.assertIn("messageType", message)
self.assertEqual(expectedCount, searchResult["count"],
"Mismatch between declared and actual sequence count!")
def test_vendor_endpoint(self) -> None:
print("\nTesting /vendors endpoint")
for i in range(self.iterations):
resp = self.client.get("/api/vendors")
vendors = eval(resp.data)
expectedKey = 0
for vendor in vendors:
self.assertIn("name", vendor.keys())
self.assertIn("shortName", vendor.keys())
self.assertIn("key", vendor.keys())
self.assertEqual(vendor["key"], expectedKey)
expectedKey = expectedKey + 1
# Tests if search results are consistent between queries;
# especially in regards of changing filter settings in between.
def test_result_consistency(self) -> None:
print("\nTesting result consistency")
for i in range(self.iterations):
# upload file
handle = open(self.sequence_path, 'rb')
response = self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
self.assertIn(b"upload successful", response.data)
filter = {"filter": {"vendors": [1, 2], "price": [0, 100], "deliveryDays": 100, "preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json', data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
# Test consistency between subsequent query results without filter change
response = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
response2 = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
self.assertEqual(response.data, response2.data,
"\n\nresponse = " + str(response.data) + "\n\n\nresponse2 = " + str(response2.data))
# ...and after identical filter submission
filter = {"filter": {"vendors": [1, 2], "price": [0, 100], "deliveryDays": 100, "preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json', data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response2 = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
self.assertEqual(response.data, response2.data,
"\n\nresponse = " + str(response.data) + "\n\n\nresponse2 = " + str(response2.data))
responseDB = {}
for r in range(1, len(self.vendors)):
for vendors in combinations(self.vendors, r):
filter = {
"filter": {"vendors": [1, 2], "price": [0, 100], "deliveryDays": 100, "preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
# Test consistency between subsequent query results without filter change
response = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
responseDB[vendors] = response.data
# Try to confuse the server with empty, full and invalid vendor lists
filter = {"filter": {"vendors": [], "price": [0, 100], "deliveryDays": 100, "preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
filter = {"filter": {"vendors": [0, 1, 2], "price": [0, 100], "deliveryDays": 100,
"preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
filter = {"filter": {"vendors": [666, -42, 0, 0, 0, 0], "price": [0, 100], "deliveryDays": 100,
"preselectByPrice": True, "preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
# Test if the response is still consistent to what it was before
for r in range(1, len(self.vendors)):
for vendors in combinations(self.vendors, r):
filter = {
"filter": {"vendors": [1, 2], "price": [0, 100], "deliveryDays": 100, "preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
# Test consistency between subsequent query results without filter change
response = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0})
self.assertEqual(responseDB[vendors], response.data,
"\n\nresponse:\n" + str(response.data) + "\n\n\nresponse from before:\n" + str(
responseDB[vendors]))
def test_sorting(self) -> None:
print("\nTesting offer sorting")
for i in range(self.iterations):
# upload file
handle = open(self.sequence_path, 'rb')
response = self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
self.assertIn(b"upload successful", response.data)
# test sorting by price
filter = {
"filter": {"vendors": [0, 1, 2], "price": [0, 100], "deliveryDays": 100,
"preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
# selection criteria by precedence
selector = ("price", "turnoverTime")
for seqoffer in response_json["result"]:
# First create a starting condition that will cause a fail and will be overwritten in any sane scenario
for vendoffers in seqoffer["vendors"]:
prev_offer = (0, 0)
for offer in vendoffers["offers"]:
offer_criteria = (offer[selector[0]] % maxsize, offer[selector[1]] % maxsize)
if offer["offerMessage"]:
offer_criteria = (maxsize, maxsize)
self.assertLessEqual(prev_offer, offer_criteria,
"\n\nSorting failed for: \n" + str(vendoffers["offers"]))
prev_offer = offer_criteria
# Test sorting by delivery days
filter = {
"filter": {"vendors": [0, 1, 2], "price": [0, 100], "deliveryDays": 100,
"preselectByPrice": False,
"preselectByDeliveryDays": True}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
# selection criteria by precedence
selector = ("turnoverTime", "price")
for seqoffer in response_json["result"]:
# First create a starting condition that will cause a fail and will be overwritten in any sane scenario
for vendoffers in seqoffer["vendors"]:
prev_offer = (0, 0)
for offer in vendoffers["offers"]:
offer_criteria = (offer[selector[0]] % maxsize, offer[selector[1]] % maxsize)
if offer["offerMessage"]:
offer_criteria = (maxsize, maxsize)
self.assertLessEqual(prev_offer, offer_criteria,
"\n\nSorting failed for: \n" + str(vendoffers["offers"]))
prev_offer = offer_criteria
def test_preselection(self) -> None:
print("\nTesting preselection")
for i in range(self.iterations):
# upload file
handle = open(self.sequence_path, 'rb')
response = self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
self.assertIn(b"upload successful", response.data)
# Test preselection by price
filter = {
"filter": {"vendors": self.vendors, "price": [0, 100], "deliveryDays": 100, "preselectByPrice": True,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for seqoffer in response_json["result"]:
# First create a starting condition that will cause a fail and will be overwritten in any sane scenario
best = maxsize - 2
best_secondary = maxsize - 2
selected = maxsize - 1
selected_secondary = maxsize - 1
first_time = True
offersPresent = False # Since these are fuzzing tests there is no guarantee that there will be offers to preselect
for vendoffers in seqoffer["vendors"]:
for offer in vendoffers["offers"]:
offersPresent = True
if not offer["offerMessage"] and (offer["price"] % maxsize <= best % maxsize or first_time):
if offer["price"] % maxsize < best % maxsize or \
offer["turnoverTime"] % maxsize < best_secondary % maxsize or first_time:
first_time = False
best = offer["price"]
best_secondary = offer["turnoverTime"]
if offer["selected"]:
self.assertEqual(selected,
maxsize - 1) # If this fails there was probably more than one offer selected
selected = offer["price"]
selected_secondary = offer["turnoverTime"]
if offersPresent and selected != maxsize - 1: # It is possible that nothing is selected due to everything being negative
self.assertEqual(selected, best, "Preselection failed for:" + str(seqoffer["vendors"]))
self.assertGreaterEqual(selected, 0)
self.assertEqual(selected_secondary, best_secondary,
"Preselection failed for:" + str(seqoffer["vendors"]))
# Test preselection by delivery days
filter = {
"filter": {"vendors": [0, 1, 2], "price": [0, 100], "deliveryDays": 100,
"preselectByPrice": False,
"preselectByDeliveryDays": True}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for seqoffer in response_json["result"]:
# First create a starting condition that will cause a fail and will be overwritten in any sane scenario
best = maxsize - 2
best_secondary = maxsize - 2
selected = maxsize - 1
selected_secondary = maxsize - 1
first_time = True
offersPresent = False # Since these are fuzzing tests there is no guarantee that there will be offers to preselect
for vendoffers in seqoffer["vendors"]:
for offer in vendoffers["offers"]:
offersPresent = True
if not offer["offerMessage"] and (
offer["turnoverTime"] % maxsize <= best % maxsize or first_time):
if offer["turnoverTime"] % maxsize < best % maxsize or \
offer["price"] % maxsize < best_secondary % maxsize or first_time:
best = offer["turnoverTime"]
best_secondary = offer["price"]
if offer["selected"]:
self.assertEqual(selected,
maxsize - 1) # If this fails there was probably more than one offer selected
selected = offer["turnoverTime"]
selected_secondary = offer["price"]
first_time = False
if offersPresent and selected != maxsize - 1: # It is possible that nothing is selected due to everything being negative
self.assertEqual(selected, best, "Preselection failed for:" + str(seqoffer["vendors"]))
self.assertGreaterEqual(selected, 0)
self.assertEqual(selected_secondary, best_secondary,
"Preselection failed for:" + str(seqoffer["vendors"]))
def test_in_memory_session(self) -> None:
print("\nTesting in-memory session management")
binary_sequences = [SequenceVendorOffers(SequenceInformation("0", "0", "0"), []),
SequenceVendorOffers(SequenceInformation("1", "1", "1"), [])]
# Set up a few sessions and store copies for later reference
n_sessions = 32
sessions = []
for i in range(0, n_sessions):
session = InMemorySessionManager(i)
session.storePinger(CompositePinger())
session.storeSequences([SequenceInformation(str(i), str(i), str(i))])
session.storeFilter({"vendors": [i], "price": [0, i], "deliveryDays": i,
"preselectByPrice": i % 2 == 0,
"preselectByDeliveryDays": i % 2 == 1})
seqoffers = []
shifter = i
while shifter:
seqoffers.append(binary_sequences[shifter & 1])
shifter = shifter >> 1
session.storeResults(seqoffers)
session.addSearchedVendors([i])
session.addSearchedVendors([i - 1, i + 1])
sessions.append(session)
# Check if the session manager can correctly tell whether an ID is already taken
for i in range(0, n_sessions):
session = InMemorySessionManager(0)
self.assertTrue(session.hasSession(i))
self.assertFalse(session.hasSession(n_sessions + i + 1))
# Check if the values stored in the sessions are still the same as in the reference sessions
for i in range(0, n_sessions):
ref_session = sessions[i]
session = InMemorySessionManager(i)
self.assertTrue(session.loadPinger())
self.assertEqual(ref_session.loadPinger(), session.loadPinger())
self.assertTrue(session.loadSequences())
self.assertEqual(ref_session.loadSequences(), session.loadSequences())
self.assertTrue(session.loadFilter())
self.assertEqual(ref_session.loadFilter(), session.loadFilter())
self.assertEqual(ref_session.loadResults(), session.loadResults())
self.assertTrue(session.loadSearchedVendors())
self.assertEqual(ref_session.loadSearchedVendors(), session.loadSearchedVendors())
# Check if the values stored in the sessions are actually the ones intended
for i in range(0, n_sessions):
session = InMemorySessionManager(i)
sequence = session.loadSequences()[0]
self.assertEqual(sequence.key, str(i))
self.assertEqual(sequence.name, str(i))
self.assertEqual(sequence.sequence, str(i))
filter = session.loadFilter()
self.assertEqual(filter, {"vendors": [i], "price": [0, i], "deliveryDays": i,
"preselectByPrice": i % 2 == 0,
"preselectByDeliveryDays": i % 2 == 1})
# The offers are a bit more elaborate since it was i encoded in binary
seqoffers = []
shifter = i
while shifter:
seqoffers.append(binary_sequences[shifter & 1])
shifter = shifter >> 1
self.assertEqual(session.loadResults(), seqoffers)
searchedVendors = session.loadSearchedVendors()
self.assertIn(i - 1, searchedVendors)
self.assertIn(i, searchedVendors)
self.assertIn(i + 1, searchedVendors)
# Finally, test session freeing
session = InMemorySessionManager(0)
session.free()
for i in range(0, n_sessions):
self.assertFalse(session.hasSession(i))
def testSelectionEndpoint(self) -> None:
print("\nTesting /select endpoint")
for iteration in range(self.iterations):
# upload file
handle = open(self.sequence_path, 'rb')
response = self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
self.assertIn(b"upload successful", response.data)
# This shouldn't select anything
filter = {
"filter": {"vendors": self.vendors, "price": [0, 100], "deliveryDays": 100, "preselectByPrice": False,
"preselectByDeliveryDays": False}}
filter_response = self.client.post('/api/filter', content_type='application/json',
data=json.dumps(filter))
self.assertIn(b"filter submission successful", filter_response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
selection = []
# Verify that nothing is selected and choose what shall be selected next time at random
for sequence in response_json["result"]:
for vendor in sequence["vendors"]:
for offer in vendor["offers"]:
# Check that the offer was not selected by the dry run
self.assertFalse(offer["selected"])
# Random selection
if random() <= 0.4:
selection.append(offer["key"])
response = self.client.post("/api/select", content_type='application/json',
data=json.dumps({"selection": selection}))
self.assertIn(b"selection set", response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for sequence in response_json["result"]:
for vendor in sequence["vendors"]:
for offer in vendor["offers"]:
# An offer should be selected if and only if it was in the selection list
self.assertEqual(offer["selected"], offer["key"] in selection)
def test_available_hosts_endpoint(self):
print("\nTesting /available_hosts endpoint")
response_json = self.client.get("/api/available_hosts").get_json()
self.assertGreater(len(response_json), 0)
# NOTE: This must not be made into an iterated test as it accesses the BOOST service
# which we don't want to overload with requests.
def test_codon_optimization(self):
print("\nTesting codon optimization")
host_list = self.client.get("/api/available_hosts").get_json()
strategies = ["Random", "Balanced", "MostlyUsed"]
for strategy in strategies:
response = self.client.post('/api/codon_optimization', content_type='application/json',
data=json.dumps({'host': rand.choice(host_list), 'strategy': strategy}))
self.assertIn(b"codon optimization options set", response.data)
# upload protein sequence file
handle = open("examples/low_temp_yeast.gb", 'rb')
response = self.client.post('/api/upload', content_type='multipart/form-data', data={'seqfile': handle})
self.assertIn(b"upload successful", response.data)
response_json = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
for sequence in response_json["result"]:
self.assertGreater(sequence["sequenceInformation"]["length"], 0)
def test_order_endpoint(self) -> None:
print("\nTesting /order endpoint")
for i in range(self.iterations):
handle = open(self.sequence_path, 'rb')
self.client.post('/api/upload', content_type='multipart/form-data',
data={'seqfile': handle, 'prefix': "Zucchini" + str(i)})
filter = '{"filter": {"vendors": [1],"price": [0, 100],"deliveryDays": 5,"preselectByPrice": True,"preselectByDeliveryDays": False}}'
self.client.post('/api/filter', data=filter)
searchResult = self.client.post('/api/results', content_type='multipart/form-data',
data={'size': 1000, 'offset': 0}).get_json()
self.assertNotIn("error", searchResult, "Results endpoint returned error: " + str(searchResult))
offerkeys = []
for result in searchResult["result"]:
for vendor in result["vendors"]:
for offer in vendor["offers"]:
offerkeys.append(offer["key"])
orderkeys = rand.sample(offerkeys, rand.randint(0, len(offerkeys) - 1))
response = self.client.post("/api/order", content_type="application/json",
data=json.dumps({"offers": orderkeys})).get_json()
for order in response:
self.assertIn("vendor", order.keys())
self.assertIn("type", order.keys())
if(order["type"] == "URL_REDIRECT"):
self.assertIn("url", order.keys())
if __name__ == '__main__':
unittest.main()
| [
"eggileierkopf@gmail.com"
] | eggileierkopf@gmail.com |
fa2b2a1158c3ab146b8c8ab6e8951d485ca786ec | d8292f284a5ce2b21eab72f843a760a90f7071d9 | /utils/dataset.py | 3f995a447fe43b780d6217d302c797ac29c79378 | [] | no_license | Dmitrsl/bengali | c017602e63ce7049a47d34d74700dd82fb8d18c4 | 78d70e20bb3c1285c7c560185c03442eee5ad156 | refs/heads/master | 2021-03-24T08:38:01.379960 | 2020-03-23T20:21:04 | 2020-03-23T20:21:04 | 247,534,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | from torch.utils.data import Dataset, DataLoader
class DatasetMixin(Dataset):
def __init__(self, transform=None):
self.transform = transform
def __getitem__(self, index):
"""Returns an example or a sequence of examples."""
if torch.is_tensor(index):
index = index.tolist()
if isinstance(index, slice):
current, stop, step = index.indices(len(self))
return [self.get_example_wrapper(i) for i in
six.moves.range(current, stop, step)]
elif isinstance(index, list) or isinstance(index, np.ndarray):
return [self.get_example_wrapper(i) for i in index]
else:
return self.get_example_wrapper(index)
def __len__(self):
"""Returns the number of data points."""
raise NotImplementedError
def get_example_wrapper(self, i):
"""Wrapper of `get_example`, to apply `transform` if necessary"""
example = self.get_example(i)
if self.transform:
example['features'] = self.transform(image=example['features'])['image']
return example
def get_example(self, i):
"""Returns the i-th example.
Implementations should override it. It should raise :class:`IndexError`
if the index is invalid.
Args:
i (int): The index of the example.
Returns:
The i-th example.
"""
raise NotImplementedError
class BengaliAIDataset(DatasetMixin):
def __init__(self, images, labels=None, transform=None, indices=None, is_font=True):
super(BengaliAIDataset, self).__init__(transform=transform)
self.images = images
self.labels = labels
if indices is None:
indices = np.arange(len(images))
self.indices = indices
self.train = labels is not None
self.is_font = is_font
def __len__(self):
"""return length of this dataset"""
return len(self.indices)
def get_example(self, i):
"""Return i-th data"""
i = self.indices[i]
x = self.images[i]
if self.is_font == True:
x = x.astype(np.float32) / 255.
else:
x = (255 - x).astype(np.float32) / 255.
if self.train:
y = self.labels[i]
return {'features' : x, 'targets' : y}
else:
return {'features' : x} | [
"daslabuhin@gmail.com"
] | daslabuhin@gmail.com |
85ae3e7423d8300e64ba4d6da39efbbeb00dff83 | 6be0663a30b61f90d1bdfcc19f95360cc5025a8f | /dataset.py | cb73588e6b7106133a438fd12ecafcdcc85c6a62 | [] | no_license | paulo-eusebio/CNN-RD | e191dd97b48ee7c2510c4e2776a7f7f9d7b36013 | d342d584b38814137e8fdd2949f6932284cfbdf5 | refs/heads/master | 2020-09-25T10:29:12.682694 | 2019-12-15T15:56:45 | 2019-12-15T15:56:45 | 225,986,597 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py |
import torchvision.transforms as transforms
import torch
from PIL import Image
import os
#Run dierctory and load images
def get_images_from_path(root):
images = []
for _,_,filename in os.walk(root):
for i, img_name in enumerate(filename):
path = os.path.join(root, img_name)
sample = Image.open(path)
sample = sample.convert(mode='L')
images.append(sample)
#if i > 250: #for debug
# break
print('{} Images loaded'.format(len(images)))
return images
#Custom dataset, only transform PIL to TENSOR
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, root, transform=None):
self.transform = transform
self.data = get_images_from_path(root)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
target = self.data[index]
if self.transform:
target = self.transform(target)
sample = target
sample, target = transforms.ToTensor()(sample), transforms.ToTensor()(target)
return sample, target
# the output of torchivision datasets are PIL images of range [0,1]
# gonna transform them to tensors [-1,1]
def get_dataset(batch_size, dataset, shuffle):
path = '../datasets/' + dataset
trainset = CustomDataset(root=path,
transform=None) #RGB images, loaded with PNG, with pixel values from 0 to 255
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=shuffle, num_workers=0)
return trainloader
| [
"noreply@github.com"
] | noreply@github.com |
a7a7cb295983b8413312ecd07fd113cf47096232 | fb69b8dfa221b21b42ca0e456cfb7d73b10fdf46 | /shadowcraft_ui/models/ArmoryDocument.py | ea7eeda3376aae804a8e55862a55b0e783921dfa | [] | no_license | ShadowCraft/shadowcraft-ui-react | 8a9107d8bb3f828a421f0353b7604720fe694350 | d738618664002f1ad2dac674166554c1eebf4508 | refs/heads/master | 2021-09-20T12:34:01.937359 | 2018-08-09T21:19:05 | 2018-08-09T21:19:05 | 77,808,815 | 6 | 0 | null | 2018-06-09T06:03:55 | 2017-01-02T03:33:50 | JavaScript | UTF-8 | Python | false | false | 2,008 | py | # -*- coding: utf-8 -*-
import re
import os
import requests
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'
class ArmoryError(Exception):
status_code = 200
pass
def get(region, path, params=None):
if region in ['us', 'eu', 'kr', 'tw', 'sea']:
host = '%s.api.battle.net' % region
elif region == 'cn':
host = 'www.api.battlenet.com.cn'
else:
host = 'us.api.battle.net'
if params is None:
params = {}
# TODO: move this out of the environment into a config file of some sort
params['apikey'] = os.environ['BLIZZARD_API_KEY']
url = 'https://%s%s' % (host, path)
headers = {'user-agent': USER_AGENT}
tries = 0
while tries < 3:
try:
resp = requests.get(url, params=params, timeout=7, headers=headers)
if resp.status_code >= 400:
error = ArmoryError('Armory returned %d requesting %s' % (resp.status_code, url))
error.status_code = resp.status_code
raise error
json = resp.json()
if len(json) == 0:
error = ArmoryError('Armory returned empty data')
error.status_code = 500
raise error
return json
except requests.RequestException:
if tries < 3:
tries += 1
else:
raise
def normalize_realm(realm):
new_realm = realm.lower()
new_realm = re.sub(r"['’]", '', new_realm)
new_realm = re.sub(r" ", '-', new_realm)
new_realm = re.sub(r"[àáâãäå]", 'a', new_realm)
new_realm = re.sub(r"[ö]", 'o', new_realm)
return new_realm
def normalize_character(character):
return character.lower()
def test_document():
params = {'fields': 'items'}
print(normalize_realm("Aerie Peak"))
print(get('us', '/wow/character/aerie-peak/tamen', params))
if __name__ == '__main__':
test_document()
| [
"timwoj@ieee.org"
] | timwoj@ieee.org |
74bc86c8f16604ca3cd74876f70d09cfaef95070 | a568e4dc461f71f0ae053fe51e3ddd0fe23bf858 | /development/index_site.py | 1789373a554d1f41d08b10458f9e08a08425dac8 | [
"MIT"
] | permissive | vatlab/sos-docs | 413e344a7581e4e2cef5da3d24345a73f3669c43 | 2b42c280dae0feaeea51161041827c362abe6db0 | refs/heads/master | 2023-06-26T04:30:59.078944 | 2023-06-16T20:26:39 | 2023-06-16T20:26:39 | 105,951,462 | 3 | 15 | MIT | 2023-06-16T20:18:39 | 2017-10-05T23:46:39 | Jupyter Notebook | UTF-8 | Python | false | false | 4,206 | py | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
#
import os
import glob
import re
import argparse
from bs4 import BeautifulSoup
'''
A simple script to create tipue content by searching for documentation
files under the top docs directory of the SoS website.
'''
def parse_html(url, html):
print('Parsing {}'.format(html))
with open(html, 'rb') as content:
soup = BeautifulSoup(content, "html.parser", from_encoding='utf-8')
#
# try to get the title of the page from h1, h2, or title, and
# uses filename if none of them exists.
#
title = soup.find('h1')
if title is None:
title = soup.find('h2')
if title is None:
title = soup.find('title')
if title is None:
title = os.path.basename(html).rsplit('.')[0]
else:
title = title.get_text()
maintitle = soup.find('h1')
if maintitle is None:
maintitle = soup.find('h2')
if maintitle is None:
maintitle = soup.find('title')
if maintitle is None:
maintitle = os.path.basename(html).rsplit('.')[0]
else:
maintitle = maintitle.get_text()
# remove special characters which might mess up js file
title = re.sub(r'[¶^a-zA-Z0-9_\.\-]', ' ', title)
#
# sear
all_text = []
for header in soup.find_all(re.compile('^h[1-6]$')):
# remove special character
part = re.sub(r'[^a-zA-Z0-9_\-=\'".,\\]', ' ',
header.get_text()).replace('"', "'").strip() + "\n"
part = re.sub(r'\s+', ' ', part)
ids = [x for x in header.findAll('a') if x.get('id')]
if ids:
tag = '#' + ids[0].get('id')
else:
hrefs = header.findAll('a', {'class': 'anchor-link'})
if hrefs:
tag = hrefs[0].get('href')
else:
tag = ''
part = '{{"mainTitle": "{}", "title": "{}", "text": "{}", "tags": "", "mainUrl": "{}", "url": "{}"}}'.format(
maintitle.replace('¶', '').strip(),
header.get_text().replace('¶', '').replace('"', r'\"').strip(),
part, url, url + tag.replace('"', r'\"'))
all_text.append(part)
return all_text
def generate_tipue_content(docs_dir):
# get a list of html files and their url
documentations = glob.glob(
os.path.join(docs_dir, 'doc', 'user_guide', '*.html'))
text = [
parse_html(url, html)
for (url, html) in [('https://vatlab.github.io/sos-docs/',
os.path.join(docs_dir, 'index.html')),
('https://vatlab.github.io/sos-docs/running.html',
os.path.join(docs_dir, 'running.html')),
('https://vatlab.github.io/sos-docs/notebook.html',
os.path.join(docs_dir, 'notebook.html')),
('https://vatlab.github.io/sos-docs/workflow.html',
os.path.join(docs_dir, 'workflow.html'))] +
[('https://vatlab.github.io/sos-docs/doc/user_guide/{}'.format(
os.path.basename(x)), x) for x in documentations]
]
# write the output to file.
with open(
os.path.join(docs_dir, 'tipuesearch', 'tipuesearch_content.js'),
'w') as out:
out.write('''\
var tipuesearch = {{"pages": [
{}
]}};
'''.format(',\n'.join(sum(text, []))))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Index SoS website')
parser.add_argument(
'docs_dir',
metavar='DIR',
help='''Path of the top SoS docs directory. This script will parse content of
HTML files under $DOC_DIR (e.g. Overview.html, /doc/documentation/*.html), get
the headers of the files, and write the results in $DOC_DIR/tipuesearch_content.hs
''')
args = parser.parse_args()
generate_tipue_content(args.docs_dir)
| [
"ben.bog@gmail.com"
] | ben.bog@gmail.com |
3fe9d4678fac3c54f94f97299fb2bdb7ba5d3da8 | 7ae2283953f9a5b9673f7517c1368a0a041ca814 | /Tests/abc.py | 3e3eee2d505c46d45273f72fb229cf5b9f790486 | [] | no_license | pipinyusuf/CS361 | cb871c8bfe7db02dcf677cd3a8aa229a50018af1 | 2ddd9deecd992305baca17c21fba989b5603ac9a | refs/heads/master | 2021-01-24T21:42:29.691594 | 2015-10-20T07:09:30 | 2015-10-20T07:09:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | __author__ = 'PipinYusmar'
print "CS361" | [
"pipinyusuf@std.sehir.edu.tr"
] | pipinyusuf@std.sehir.edu.tr |
067493b7e934150857bb49406f36824818f12fbe | 137daf138aeceeec51b520a3bbc4010047060486 | /user/migrations/0002_auto_20210307_1837.py | 4b66b286cc32aa284ce756492adf7b13cf06940c | [] | no_license | Eif-Hui/dicrunner | fbed92adc9c1c72c579b1f1525c8fff358e9a96b | 9030663e480a3e43739b0abf3778e964a8e1d0b1 | refs/heads/master | 2023-04-12T01:07:02.099697 | 2021-05-05T08:50:53 | 2021-05-05T08:50:53 | 349,745,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # Generated by Django 2.0.2 on 2021-03-07 10:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
]
| [
"a1048946288@qq.com"
] | a1048946288@qq.com |
8cb2e3178a46236934f06608ccab0a3968851124 | e38c7cd23c3971a40a40c8bba0ca5aca97baebbd | /manage.py | 6e47ab5d065a706feaa9dc48aa517260242db7d1 | [] | no_license | trungbom1997/ok | c59bd5e1c37fb8dee86cb05364a57ce638d985f4 | 262f1922320b3f2b2a4b1af6bd9db32a1817568c | refs/heads/master | 2023-03-31T20:48:03.883455 | 2021-04-14T04:03:50 | 2021-04-14T04:03:50 | 357,766,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoonline.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"trungbom1997@gmail.com"
] | trungbom1997@gmail.com |
09e27f917b5d099dfb19c7fc2e98bf5b871335c1 | 5d5aec6b9790cfebd1ab6175a9c37807e7aba943 | /Sipy_Test/wifiTest.py | ca403fd8faa835a6d9bc041e102bf51c07a5134e | [] | no_license | gjsanchez26/Lantern | 99f2e1a8907d98bb9445bdf3f5e8a90ac95f19f8 | bb82c9490a85a34f7047cfaea2d206991c2c1c00 | refs/heads/master | 2020-03-19T07:11:15.252041 | 2018-06-14T22:56:47 | 2018-06-14T22:56:47 | 136,094,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | import network
import time
import socket
import http_client
# setup as a station
def connectWifi(SSID = 'LANTERN', PASS ='electric'):
wlan = network.WLAN(mode=network.WLAN.STA)
wlan.connect(SSID, auth=(network.WLAN.WPA2, PASS))
while not wlan.isconnected():
time.sleep_ms(50)
print(wlan.ifconfig())
def do_connect(SSID = 'LANTERN', PASS ='electric'):
wlan = network.WLAN(mode=network.WLAN.STA)
nets = wlan.scan()
for net in nets:
if net.ssid == SSID:
print('Network found!')
wlan.connect(net.ssid, auth=(net.sec,PASS ), timeout=5000)
while not wlan.isconnected():
machine.idle() # save power while waiting
print('WLAN connection succeeded!')
break
#connectWifi()
do_connect()
#http_get('http://micropython.org/ks/test.html')
#r = http_client.post('https://api-test.hunt-r.tech/thingworx/availability/shifts', json={"time_zone": "America/CostaRica","start_date": 0,"end_date": 2})
#r = http_client.post('https://iot.lantern.tech/api/v1/sg1Ul5mQZxBCSndtqVuY/telemetry',json={"Device": "4D5490","Temperature": 25,"Humidity": 50})
r2 = http_client.post('https://iot.lantern.tech/api/v1/sg1Ul5mQZxBCSndtqVuY/telemetry',json={"Device": "4D5490","Temperature": 30,"Humidity": 75})
#print(r.json())
'''
r = http_client.get('http://micropython.org/ks/test.html')
r.raise_for_status()
print(r.status_code)
print(r.text) # r.content for raw bytes
'''
| [
"erick.cobo@lantern.tech"
] | erick.cobo@lantern.tech |
493d6259c636e88410b4cd0efa0fe9d4c20a1543 | d6918b8b3fb70e67028fd198f88dc48d84f9281b | /timer/migrations/0011_auto_20160825_0935.py | 55f465341651e00fb759fff8847983f60c739982 | [] | no_license | Joepriesto/Dashboard | aa129b7cba30c5d7cba34491591742f1d2a68c9c | f0a337de7408bfb3ea253c03b0165f8525664fa6 | refs/heads/master | 2020-09-18T15:43:03.008895 | 2016-09-02T16:50:22 | 2016-09-02T16:50:22 | 66,042,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-25 08:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timer', '0010_auto_20160823_1523'),
]
operations = [
migrations.AlterField(
model_name='batch',
name='start_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='date started'),
),
migrations.AlterField(
model_name='batch',
name='start_time',
field=models.DateTimeField(verbose_name='start time'),
),
]
| [
"joe.priestman@googlemail.com"
] | joe.priestman@googlemail.com |
ea2793abe07a25467fb61292b764ddc1f7d4ac4c | 68263c011d12b19d6ff17f0f2420fe497ef28fc2 | /api/tacticalrmm/core/views.py | 93d53c22c421640a769e0772dfab93f3222aa002 | [
"MIT"
] | permissive | bradhawkins85/tacticalrmm | 79ec6f003b559c96d15a5bd0621a2e968d2ea53d | 4371f270569a6eb094dda834f2d1b14ed62af5e4 | refs/heads/develop | 2023-05-21T13:19:47.187899 | 2020-09-02T18:52:40 | 2020-09-02T18:52:40 | 292,421,792 | 0 | 0 | MIT | 2021-05-05T05:55:52 | 2020-09-03T00:06:11 | null | UTF-8 | Python | false | false | 1,723 | py | import os
from django.conf import settings
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.exceptions import ParseError
from rest_framework.parsers import FileUploadParser
from rest_framework.views import APIView
from .models import CoreSettings
from .serializers import CoreSettingsSerializer
from tacticalrmm.utils import notify_error
class UploadMeshAgent(APIView):
parser_class = (FileUploadParser,)
def put(self, request, format=None):
if "meshagent" not in request.data:
raise ParseError("Empty content")
f = request.data["meshagent"]
mesh_exe = os.path.join(settings.EXE_DIR, "meshagent.exe")
with open(mesh_exe, "wb+") as j:
for chunk in f.chunks():
j.write(chunk)
return Response(status=status.HTTP_201_CREATED)
@api_view()
def get_core_settings(request):
settings = CoreSettings.objects.first()
return Response(CoreSettingsSerializer(settings).data)
@api_view(["PATCH"])
def edit_settings(request):
settings = CoreSettings.objects.first()
serializer = CoreSettingsSerializer(instance=settings, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response("ok")
@api_view()
def version(request):
return Response(settings.APP_VER)
@api_view()
def email_test(request):
core = CoreSettings.objects.first()
r = core.send_mail(
subject="Test from Tactical RMM", body="This is a test message", test=True
)
if not isinstance(r, bool) and isinstance(r, str):
return notify_error(r)
return Response("Email Test OK!")
| [
"dcparsi@gmail.com"
] | dcparsi@gmail.com |
7a0c7d1bbc17334988f6acac5e540b63d9db0355 | 68422192efbc69368fed7e4d92ce46bb85f3356e | /drops.py | c1a14c9250ee0351641ff67f21a8153ec0adf960 | [] | no_license | sodooj777/isepuniversidad | 3f544c8aafae13720775a101e92f197015ac1378 | 236812d8d7584a2a7fb38995165d3d3626373c8b | refs/heads/master | 2023-03-01T17:09:23.735161 | 2021-02-11T06:34:08 | 2021-02-11T06:34:08 | 337,925,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # lista y se convierte el numero en una cadena dentro de la lista con la funcion string
variable = [(str(39)),str(45),str(546),str(11)]
# se crea un diccionario para las gotas de agua
p1={1:'Plic',2:'Plac',3:'Ploc'}
# imprimimos por pantalla la lista con el numero de la posicion concatenada con el dicionario que queremos mostrar
print(variable[0],"tiene 3 como factor pero no 7,5 el resultado seria :"+ p1[1] + "\n")
print(variable[1],"tiene 3,5 como factor pero no 7 el resultado seria :"+ p1[2],p1[1] + "\n")
print(variable[2],"tiene 3,7 como factor pero no 5 el resultado seria :"+ p1[2],p1[1] + "\n")
print(variable[3],"no tiene 3,5 y 7 como factor ") | [
"carlosed1995@gmail.com"
] | carlosed1995@gmail.com |
c6cf25eb1bcbfe72a8329aee900d3f0232f5d326 | 29b431b04f44cc2e8c3aec80642d1acc537f38d9 | /paddle/fluid/operators/generator/generate_op.py | dad5df7430d477de862c1184468579a0bfc5c9cc | [
"Apache-2.0"
] | permissive | phlrain/Paddle | 0a2fd2275a2484f8f97bf3a5a212c2981821e65b | 55745273a914f5d303fc09e42caee132c693670c | refs/heads/develop | 2023-08-08T01:28:49.413870 | 2023-06-05T03:29:56 | 2023-06-05T03:29:56 | 153,714,070 | 0 | 2 | Apache-2.0 | 2022-11-24T02:11:22 | 2018-10-19T02:15:54 | Python | UTF-8 | Python | false | false | 28,795 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import os
from pathlib import Path
import yaml
from filters import (
assert_dense_or_sr,
cartesian_prod_mapping,
delete_last_underline,
find_optinal_inputs_name,
get_infer_var_type_func,
to_composite_grad_opmaker_name,
to_input_name,
to_int_array_tensor_name,
to_int_array_tensors_name,
to_op_attr_type,
to_opmaker_name,
to_opmaker_name_cstr,
to_pascal_case,
to_scalar_tensor_name,
to_variable_names,
)
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from parse_utils import to_named_dict
from tests_utils import (
is_base_op,
is_composite_op,
is_initializer_list,
is_only_composite_op,
is_scalar,
is_vec,
supports_inplace,
supports_no_need_buffer,
)
file_loader = FileSystemLoader(Path(__file__).parent / "templates")
env = Environment(
loader=file_loader,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True,
undefined=StrictUndefined,
extensions=['jinja2.ext.do'],
)
env.filters["to_op_attr_type"] = to_op_attr_type
env.filters["to_opmaker_name"] = to_opmaker_name
env.filters["to_pascal_case"] = to_pascal_case
env.filters["to_scalar_tensor_name"] = to_scalar_tensor_name
env.filters["to_int_array_tensor_name"] = to_int_array_tensor_name
env.filters["to_int_array_tensors_name"] = to_int_array_tensors_name
env.filters["to_input_name"] = to_input_name
env.filters["to_opmaker_name_cstr"] = to_opmaker_name_cstr
env.filters["cartesian_prod_mapping"] = cartesian_prod_mapping
env.filters["to_composite_grad_opmaker_name"] = to_composite_grad_opmaker_name
env.filters["to_variable_names"] = to_variable_names
env.filters["get_infer_var_type_func"] = get_infer_var_type_func
env.filters["assert_dense_or_sr"] = assert_dense_or_sr
env.filters["find_optinal_inputs_name"] = find_optinal_inputs_name
env.tests["base_op"] = is_base_op
env.tests["composite_op"] = is_composite_op
env.tests["only_composite_op"] = is_only_composite_op
env.tests["vec"] = is_vec
env.tests["scalar"] = is_scalar
env.tests["initializer_list"] = is_initializer_list
env.tests["supports_inplace"] = supports_inplace
env.tests["supports_no_need_buffer"] = supports_no_need_buffer
def restruct_io(op):
op["input_dict"] = to_named_dict(op["inputs"])
op["attr_dict"] = to_named_dict(op["attrs"])
op["output_dict"] = to_named_dict(op["outputs"])
return op
def process_scalar(op_item, scalar_configs):
scalar_map = {
'Scalar': 'float',
'Scalar(float)': 'float',
'Scalar(int)': 'int',
'Scalar(int64_t)': 'int64_t',
}
if scalar_configs is not None:
for attr_item in op_item['attrs']:
if attr_item['name'] in scalar_configs:
attr_type = attr_item['typename']
assert (
attr_type in scalar_map
), f"{op_item['name']}'s scalar in op_compat.yaml is error, the data_type of {attr_item['name']} is expected to be one of Scalar, Scalar(float), Scalar(int) or Scalar(int64_t), but now is {attr_type}."
scalar_config = scalar_configs[attr_item['name']]
attr_item['is_support_tensor'] = (
True
if 'support_tensor' in scalar_config
and scalar_config['support_tensor']
else False
)
attr_item['data_type'] = (
scalar_config['data_type']
if 'data_type' in scalar_config
else scalar_map[attr_type]
)
if attr_item['is_support_tensor'] is False:
attr_item['tensor_name'] = scalar_config['tensor_name']
def process_int_array(op_item, int_array_configs):
data_type_map = {
'int': 'std::vector<int>',
'int64_t': 'std::vector<int64_t>',
}
if int_array_configs is not None:
for attr_item in op_item['attrs']:
if attr_item['name'] in int_array_configs:
attr_type = attr_item['typename']
assert (
attr_item['typename'] == "IntArray"
), f"{op_item['name']}'s int_array in op_compat.yaml is error, the data_type of {attr_item['name']} is expected to be one of IntArray, but now is {attr_type}."
int_array_config = int_array_configs[attr_item['name']]
attr_item['is_support_tensor'] = (
True
if 'support_tensor' in int_array_config
and int_array_config['support_tensor']
else False
)
attr_item['data_type'] = (
data_type_map[int_array_config['data_type']]
if 'data_type' in int_array_config
else 'std::vector<int64_t>'
)
if attr_item['is_support_tensor'] is False:
attr_item['manual_flag'] = True
if 'tensor_name' in int_array_config:
attr_item['tensor_name'] = int_array_config[
'tensor_name'
]
if 'tensors_name' in int_array_config:
attr_item['tensors_name'] = int_array_config[
'tensors_name'
]
def add_composite_info(ops, backward_ops, backward_op_dict):
# add backward composite name in forward
for op in ops + backward_ops:
if (
op["backward"] in backward_op_dict
and "composite" in backward_op_dict[op["backward"]]
):
op["backward_composite"] = op["backward"]
else:
op["backward_composite"] = None
# add whether only composite
if (
op["backward_composite"] is not None
and "invoke" not in backward_op_dict[op["backward"]]
and "kernel" not in backward_op_dict[op["backward"]]
):
op["only_backward_composite"] = True
else:
op["only_backward_composite"] = False
# add fluid name in ops and backward ops info
def add_fluid_name(dict_list):
for item in dict_list:
item["fluid_name"] = item["name"]
# add fluid name of op and params for OpMaker
def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
def get_phi_and_fluid_op_name(op_item):
names = op_item.split('(')
if len(names) == 1:
return names[0].strip(), names[0].strip()
else:
return names[0].strip(), names[1].split(')')[0].strip()
def add_op_param_name(op_args, args_alias_map):
for item in op_args:
if item['name'] in args_alias_map:
item['fluid_name'] = args_alias_map[item['name']]
else:
item['fluid_name'] = item['name']
def add_grad_args_name(op_args, args_alias_map):
for item in op_args:
if (
item['name'].endswith('_grad')
and item['name'][:-5] in args_alias_map
):
args_alias_map[item['name']] = (
args_alias_map[item['name'][:-5]] + '_grad'
)
item['fluid_name'] = args_alias_map[item['name'][:-5]] + '_grad'
elif (
item['name'].endswith('_grad')
and item['name'][:-5] not in args_alias_map
):
item['fluid_name'] = item['name']
def get_param_list_alias(param_list, args_map):
return [
args_map[param] if param in args_map else param
for param in param_list
]
def update_common_params_name(
op_item, args_name_map, scalar_configs, int_array_configs
):
if 'inplace' in op_item and op_item['inplace']:
inplace_map = {}
for key, val in op_item['inplace'].items():
if key in args_map:
key = args_map[key]
if val in args_map:
val = args_map[val]
inplace_map[key] = val
op_item['inplace'] = inplace_map
if 'no_need_buffer' in op_item and op_item['no_need_buffer']:
op_item['no_need_buffer'] = get_param_list_alias(
op_item['no_need_buffer'], args_map
)
if 'data_transform' in op_item and op_item['data_transform']:
data_trans_item = op_item['data_transform']
if 'skip_transform' in data_trans_item:
data_trans_item['skip_transform'] = get_param_list_alias(
data_trans_item['skip_transform'], args_map
)
if 'support_trans_dtype' in data_trans_item:
data_trans_item['support_trans_dtype'] = get_param_list_alias(
data_trans_item['support_trans_dtype'], args_map
)
process_scalar(op_item, scalar_configs)
process_int_array(op_item, int_array_configs)
if 'invoke' in op_item:
op_item['invoke']['args'] = [
args_map[param.strip()]
if param.strip() in args_map
else param.strip()
for param in op_item['invoke']['args'].split(',')
]
return
elif 'composite' in op_item and 'kernel' not in op_item:
return
op_item['infer_meta']['param'] = get_param_list_alias(
op_item['infer_meta']['param'], args_name_map
)
op_item['kernel']['param'] = get_param_list_alias(
op_item['kernel']['param'], args_name_map
)
if op_item['kernel']['data_type']:
op_item['kernel']['data_type']['candidates'] = get_param_list_alias(
op_item['kernel']['data_type']['candidates'], args_name_map
)
if op_item['kernel']['backend']:
op_item['kernel']['backend']['candidates'] = get_param_list_alias(
op_item['kernel']['backend']['candidates'], args_name_map
)
if op_item['kernel']['layout']:
op_item['kernel']['layout']['candidates'] = get_param_list_alias(
op_item['kernel']['layout']['candidates'], args_name_map
)
def add_grad_op_compat_name(grad_op_item, args_name_map):
add_op_param_name(grad_op_item['inputs'], args_name_map)
add_op_param_name(grad_op_item['outputs'], args_name_map)
add_op_param_name(grad_op_item['attrs'], args_name_map)
add_op_param_name(grad_op_item['forward']['inputs'], args_name_map)
add_op_param_name(grad_op_item['forward']['outputs'], args_name_map)
add_op_param_name(grad_op_item['forward']['attrs'], args_name_map)
add_grad_args_name(grad_op_item['inputs'], args_map)
add_grad_args_name(grad_op_item['outputs'], args_map)
for op_args in op_fluid_map_list:
new_op_name, op_name = get_phi_and_fluid_op_name(op_args['op'])
if new_op_name not in forward_op_dict:
continue
forward_op_item = forward_op_dict[new_op_name]
has_backward = True if forward_op_item['backward'] else False
if has_backward:
backward_op_item = backward_op_dict[forward_op_item['backward']]
if new_op_name != op_name:
forward_op_item['op_name'] = op_name
# add complex promote infomation
if "complex_promote" in op_args:
forward_op_item["complex_promote"] = op_args["complex_promote"]
if has_backward:
backward_op_item["complex_promote"] = op_args["complex_promote"]
scalar_configs = None
int_array_configs = None
if 'scalar' in op_args:
scalar_configs = op_args['scalar']
if 'int_array' in op_args:
int_array_configs = op_args['int_array']
if 'extra' in op_args and 'outputs' in op_args['extra']:
for out_item in forward_op_item['outputs']:
if out_item['name'] in op_args['extra']['outputs']:
out_item['is_extra'] = True
key_set = ['inputs', 'attrs', 'outputs']
args_map = {}
for key in key_set:
if key in op_args:
args_map.update(op_args[key])
for args_item in forward_op_item[key]:
if args_item['name'] in op_args[key]:
if (
scalar_configs
and args_item['name'] in scalar_configs
):
scalar_configs[
op_args[key][args_item['name']]
] = scalar_configs[args_item['name']]
if (
int_array_configs
and args_item['name'] in int_array_configs
):
int_array_configs[
op_args[key][args_item['name']]
] = int_array_configs[args_item['name']]
args_item['fluid_name'] = op_args[key][
args_item['name']
]
update_common_params_name(
forward_op_item, args_map, scalar_configs, int_array_configs
)
if has_backward:
# update fluid info in backward
add_grad_op_compat_name(backward_op_item, args_map)
update_common_params_name(
backward_op_item, args_map, scalar_configs, int_array_configs
)
if 'backward' not in op_args:
continue
backward_op_list = op_args['backward'].split(',')
phi_bw_op_name, bw_op_name = get_phi_and_fluid_op_name(
backward_op_list[0]
)
if (
forward_op_item["backward_composite"] is not None
and phi_bw_op_name != bw_op_name
):
forward_op_item["backward_composite"] = bw_op_name
forward_op_item['backward'] = bw_op_name
backward_op_item['op_name'] = bw_op_name
# for double grad
if len(backward_op_list) > 1:
(
phi_double_grad_op_name,
double_grad_op_name,
) = get_phi_and_fluid_op_name(backward_op_list[1])
double_grad_item = backward_op_dict[phi_double_grad_op_name]
if (
backward_op_item["backward_composite"] is not None
and phi_double_grad_op_name != double_grad_op_name
):
backward_op_item["backward_composite"] = double_grad_op_name
backward_op_item['backward'] = double_grad_op_name
double_grad_item['op_name'] = double_grad_op_name
add_grad_op_compat_name(double_grad_item, args_map)
update_common_params_name(
double_grad_item,
args_map,
scalar_configs,
int_array_configs,
)
# for triple grad
if len(backward_op_list) > 2:
(
phi_triple_grad_op_name,
triple_grad_op_name,
) = get_phi_and_fluid_op_name(backward_op_list[2])
triple_grad_item = backward_op_dict[phi_triple_grad_op_name]
if (
double_grad_item["backward_composite"] is not None
and phi_triple_grad_op_name != triple_grad_op_name
):
double_grad_item[
"backward_composite"
] = triple_grad_op_name
double_grad_item['backward'] = triple_grad_op_name
triple_grad_item['op_name'] = triple_grad_op_name
add_grad_op_compat_name(triple_grad_item, args_map)
update_common_params_name(
triple_grad_item,
args_map,
scalar_configs,
int_array_configs,
)
def process_invoke_op(forward_op_dict, backward_op_dict):
for bw_op in backward_op_dict.values():
if 'invoke' in bw_op:
invoke_op = bw_op['invoke']['func']
args_list = bw_op['invoke']['args']
args_index = 0
# backward invoke forward
if invoke_op in forward_op_dict:
reuse_op = forward_op_dict[invoke_op]
bw_op['invoke']['func'] = reuse_op['op_name']
bw_op['invoke']['inputs'] = []
bw_op['invoke']['attrs'] = []
bw_op['invoke']['outputs'] = []
for input_item in reuse_op['inputs']:
bw_op['invoke']['inputs'].append(
{
'fluid_name': input_item['fluid_name'],
'name': input_item['name'],
'value': args_list[args_index],
}
)
args_index = args_index + 1
bw_fluid_attrs_set = [
item['fluid_name'] for item in bw_op['attrs']
]
for attr in reuse_op['attrs']:
if args_index < len(args_list):
attr_value = (
f"this->GetAttr(\"{args_list[args_index]}\")"
if args_list[args_index] in bw_fluid_attrs_set
else args_list[args_index]
)
bw_op['invoke']['attrs'].append(
{
'name': attr['name'],
'fluid_name': attr['fluid_name'],
'value': attr_value,
}
)
args_index = args_index + 1
else:
break
for idx, output_item in enumerate(reuse_op['outputs']):
bw_op['invoke']['outputs'].append(
{
'name': output_item['name'],
'fluid_name': output_item['fluid_name'],
'value': bw_op['outputs'][idx]['fluid_name'],
}
)
def parse_drop_empty_grad(op_fluid_list: list, bw_op_dict: dict):
for op_comp_map in op_fluid_list:
if 'drop_empty_grad' in op_comp_map:
bw_names = [
bw_name.split('(')[0].strip()
for bw_name in op_comp_map['backward'].split(',')
]
for bw_name in bw_names:
# static_ops.yaml and ops.yaml use the common op_compat.yaml
if bw_name in bw_op_dict:
for out_grad in op_comp_map['drop_empty_grad']:
assert (
out_grad in bw_op_dict[bw_name]['output_dict']
), f'''
{bw_name} with {out_grad} is not existed in output_dict '''
bw_op_dict[bw_name]['output_dict'][out_grad][
'drop_empty_grad'
] = False
def parse_get_expected_kerneltype(
op_fluid_list: list, fw_op_dict: dict, bw_op_dict: dict
):
for op_comp_map in op_fluid_list:
if 'get_expected_kernel_type' in op_comp_map:
fw_name = op_comp_map['op'].split('(')[0].strip()
# deal the last underline of function name in op_comp_map['get_expected_kernel_type']
new_get_expected_kernel_type_func_map = {}
for (key, value) in op_comp_map['get_expected_kernel_type'].items():
new_get_expected_kernel_type_func_map[
delete_last_underline(key)
] = value
op_comp_map[
'get_expected_kernel_type'
] = new_get_expected_kernel_type_func_map
if fw_name in op_comp_map['get_expected_kernel_type']:
# static_ops.yaml and ops.yaml use the common op_compat.yaml
if fw_name in fw_op_dict:
fw_op_dict[fw_name][
"get_expected_kernel_type"
] = op_comp_map['get_expected_kernel_type'][fw_name]
if "backward" in op_comp_map:
bw_names = [
bw_name.split('(')[0].strip()
for bw_name in op_comp_map['backward'].split(',')
]
for bw_name in bw_names:
# static_ops.yaml and ops.yaml use the common op_compat.yaml
if (
bw_name in bw_op_dict
and bw_name in op_comp_map['get_expected_kernel_type']
):
bw_op_dict[bw_name][
"get_expected_kernel_type"
] = op_comp_map['get_expected_kernel_type'][bw_name]
def parse_keep_signature(
op_fluid_list: list, fw_op_dict: dict, bw_op_dict: dict
):
for op_comp_map in op_fluid_list:
if 'manual_signature' in op_comp_map:
for op_name in op_comp_map['manual_signature']:
op_name_without_last_underline = delete_last_underline(op_name)
if op_name_without_last_underline in fw_op_dict:
fw_op_dict[op_name_without_last_underline][
"manual_signature"
] = True
elif op_name_without_last_underline in bw_op_dict:
bw_op_dict[op_name_without_last_underline][
"manual_signature"
] = True
def split_ops_list(ops, backward_op_dict, split_num):
new_ops_list = []
new_bw_ops_list = []
list_size = math.ceil(len(ops) / split_num)
tmp_ops_list = []
tmp_bw_ops_list = []
for idx, op in enumerate(ops):
tmp_ops_list.append(op)
current_op = op
while (
'backward' in current_op
and current_op['backward'] in backward_op_dict
):
tmp_bw_ops_list.append(backward_op_dict[current_op['backward']])
current_op = backward_op_dict[current_op['backward']]
if (idx + 1) % list_size == 0 or idx == len(ops) - 1:
new_ops_list.append(tmp_ops_list)
new_bw_ops_list.append(tmp_bw_ops_list)
tmp_ops_list = []
tmp_bw_ops_list = []
return new_ops_list, new_bw_ops_list
def to_phi_and_fluid_op_name_without_underline(op_item):
'''
If the op_name ends with '_', delete the last '_'. For an example, 'sgd_' becomes 'sgd
'''
names = op_item.split('(')
if len(names) == 1:
op_kernel_name = delete_last_underline(names[0].strip())
return op_kernel_name
else:
op_name = delete_last_underline(names[0].strip())
kernel_name = delete_last_underline(names[1].split(')')[0].strip())
return op_name + '(' + kernel_name + ')'
def main(
ops_yaml_path,
backward_yaml_path,
op_compat_yaml_path,
op_version_yaml_path,
output_op_path,
output_arg_map_path,
):
with open(ops_yaml_path, "rt") as f:
ops = yaml.safe_load(f)
ops = [restruct_io(op) for op in ops]
forward_op_dict = to_named_dict(ops, True)
with open(backward_yaml_path, "rt") as f:
backward_ops = yaml.safe_load(f)
backward_ops = [restruct_io(op) for op in backward_ops]
backward_op_dict = to_named_dict(backward_ops, True)
with open(op_version_yaml_path, "rt") as f:
op_versions = yaml.safe_load(f)
# add op version info into op
for op_version in op_versions:
if op_version['op'] in forward_op_dict:
forward_op_dict[op_version['op']]['version'] = op_version['version']
with open(op_compat_yaml_path, "rt") as f:
op_fluid_map_list = yaml.safe_load(f)
for op_args in op_fluid_map_list:
op_args["op"] = to_phi_and_fluid_op_name_without_underline(
op_args["op"]
)
for op in ops:
op['op_name'] = op['name']
add_fluid_name(op['inputs'])
add_fluid_name(op['attrs'])
add_fluid_name(op['outputs'])
for bw_op in backward_ops:
bw_op['op_name'] = bw_op['name']
add_fluid_name(bw_op['inputs'])
add_fluid_name(bw_op['attrs'])
add_fluid_name(bw_op['outputs'])
add_fluid_name(bw_op['forward']['inputs'])
add_fluid_name(bw_op['forward']['attrs'])
add_fluid_name(bw_op['forward']['outputs'])
for bw_output in bw_op['outputs']:
bw_output['drop_empty_grad'] = True
# deal the drop_empty_grad of bw_op by op_compat.yaml
parse_drop_empty_grad(op_fluid_map_list, backward_op_dict)
parse_get_expected_kerneltype(
op_fluid_map_list, forward_op_dict, backward_op_dict
)
parse_keep_signature(op_fluid_map_list, forward_op_dict, backward_op_dict)
add_composite_info(ops, backward_ops, backward_op_dict)
add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict)
# prepare for invoke case
process_invoke_op(forward_op_dict, backward_op_dict)
# fill backward field for an op if another op claims it as forward
for name, backward_op in backward_op_dict.items():
forward_name = backward_op["forward"]["name"]
if forward_name in backward_op_dict:
forward_op = backward_op_dict[forward_name]
if forward_op["backward"] is None:
forward_op["backward"] = name
op_dict = {}
op_dict.update(forward_op_dict)
op_dict.update(backward_op_dict)
if len(ops) == 0 and len(backward_ops) == 0:
if os.path.isfile(output_op_path):
os.remove(output_op_path)
if os.path.isfile(output_arg_map_path):
os.remove(output_arg_map_path)
return
op_template = env.get_template('op.c.j2')
backward_fluid_op_dict = {}
for bw_op in backward_ops:
backward_fluid_op_dict[bw_op['op_name']] = bw_op
output_op_files_num = len(output_op_path)
new_ops_list, new_bw_ops_list = split_ops_list(
ops, backward_fluid_op_dict, output_op_files_num
)
for idx, output_op_file in enumerate(output_op_path):
with open(output_op_file, "wt") as f:
msg = op_template.render(
ops=new_ops_list[idx],
backward_ops=new_bw_ops_list[idx],
op_dict=op_dict,
)
f.write(msg)
ks_template = env.get_template('ks.c.j2')
with open(output_arg_map_path, 'wt') as f:
msg = ks_template.render(ops=ops, backward_ops=backward_ops)
f.write(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate operator file from op yaml."
)
parser.add_argument(
'--ops_yaml_path', type=str, help="parsed ops yaml file."
)
parser.add_argument(
'--backward_yaml_path', type=str, help="parsed backward ops yaml file."
)
parser.add_argument(
'--op_compat_yaml_path', type=str, help="ops args compat yaml file."
)
parser.add_argument(
'--op_version_yaml_path', type=str, help="ops version yaml file."
)
parser.add_argument(
"--output_op_path",
type=str,
nargs='+',
help="path to save generated operators.",
)
parser.add_argument(
"--output_arg_map_path",
type=str,
help="path to save generated argument mapping functions.",
)
args = parser.parse_args()
main(
args.ops_yaml_path,
args.backward_yaml_path,
args.op_compat_yaml_path,
args.op_version_yaml_path,
args.output_op_path,
args.output_arg_map_path,
)
| [
"noreply@github.com"
] | noreply@github.com |
de8074fe4170e2bd14801b70bceb614046f97b3e | 4b68243d9db908945ee500174a8a12be27d150f9 | /pogoprotos/settings/trading_global_settings_pb2.py | d2d9db44611b0f5eebc2b3e22c3a68f670146ab3 | [] | no_license | ykram/pogoprotos-py | 7285c86498f57dcbbec8e6c947597e82b2518d80 | a045b0140740625d9a19ded53ece385a16c4ad4a | refs/heads/master | 2020-04-20T10:19:51.628964 | 2019-02-02T02:58:03 | 2019-02-02T02:58:03 | 168,787,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 2,630 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/trading_global_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/trading_global_settings.proto',
package='pogoprotos.settings',
syntax='proto3',
serialized_pb=_b('\n1pogoprotos/settings/trading_global_settings.proto\x12\x13pogoprotos.settings\"I\n\x15TradingGlobalSettings\x12\x16\n\x0e\x65nable_trading\x18\x01 \x01(\x08\x12\x18\n\x10min_player_level\x18\x02 \x01(\rb\x06proto3')
)
_TRADINGGLOBALSETTINGS = _descriptor.Descriptor(
name='TradingGlobalSettings',
full_name='pogoprotos.settings.TradingGlobalSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_trading', full_name='pogoprotos.settings.TradingGlobalSettings.enable_trading', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_player_level', full_name='pogoprotos.settings.TradingGlobalSettings.min_player_level', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=147,
)
DESCRIPTOR.message_types_by_name['TradingGlobalSettings'] = _TRADINGGLOBALSETTINGS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TradingGlobalSettings = _reflection.GeneratedProtocolMessageType('TradingGlobalSettings', (_message.Message,), dict(
DESCRIPTOR = _TRADINGGLOBALSETTINGS,
__module__ = 'pogoprotos.settings.trading_global_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.TradingGlobalSettings)
))
_sym_db.RegisterMessage(TradingGlobalSettings)
# @@protoc_insertion_point(module_scope)
| [
"mark@noffle.net"
] | mark@noffle.net |
bca085e398edd382371aeb211b3f719cc7861cb3 | 3f3701e14260f56df52423508975cafe5685ad56 | /systems/admin.py | 44e45a3caa3c5ecd8c8e71932347399ac35277c4 | [
"MIT"
] | permissive | alexhong121/ai_cupboard | 199c09dbe570361de18495d6d667a4ee4b350409 | 50baa791c969b951de5b47d980e19c0df3c04e7f | refs/heads/main | 2023-04-05T22:37:18.568888 | 2021-04-25T09:03:18 | 2021-04-25T09:03:18 | 341,413,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from django.contrib import admin
from systems.models import Configuration,Information
# Register your models here.
admin.site.register(Configuration)
admin.site.register(Information) | [
"atom12342@gmail.cpm"
] | atom12342@gmail.cpm |
6429a80d8b4822fab1dc829284b02d46bbf89cd2 | d7e0e7cfd2ddb9bed1dcacf0eb80f84bf8911863 | /task.py | fe2874f353715dfd7a677c0022d905a0c7630081 | [] | no_license | k1995/github-trending | 468b5d1881bc25b5e567b8aa2e06bc1022efda99 | f03981d80630b06ac32a475bbc48d00177d9094e | refs/heads/master | 2023-04-14T16:44:52.697707 | 2023-04-01T08:10:31 | 2023-04-01T08:10:31 | 221,254,111 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | import subprocess
import time
import os
from datetime import datetime
from git import Repo
def push2github():
repo = Repo(os.path.split(os.path.realpath(__file__))[0])
remote = repo.remote()
remote.pull()
mod_count = 0
for untracked_file in repo.untracked_files:
if untracked_file.startswith("archive/"):
repo.index.add(untracked_file)
mod_count += 1
for modified in repo.index.diff(None):
if modified.a_path.startswith("archive/"):
repo.index.add(modified.a_path)
mod_count += 1
if mod_count > 0:
repo.index.commit("crawler auto commit")
remote.push()
while True:
# Run every hour
now = datetime.now()
if now.minute == 0:
subprocess.call(["scrapy", "crawl", "trending"])
# Commit every 3h
if now.hour % 3 == 0:
try:
push2github()
except:
pass
time.sleep(60 * 50)
else:
time.sleep(1)
| [
"k1995328@gmail.com"
] | k1995328@gmail.com |
0e9564a5a20175f0da9657953bf2153785a0027c | adf6efc6323fa2e046813fd383c022262691b8e5 | /generate_galaxy_catalog.py | 167a6c2174f8bc771127de5ba6ce0eb485b6ca62 | [
"MIT"
] | permissive | wdpozzo/bbh_cosmology | 3e30495d7d06921010d36d325f6e6cfcdbafa3e1 | 8c497204829caae08b2542d6396409d7f87e84a2 | refs/heads/master | 2021-01-19T23:20:32.388770 | 2017-04-21T09:27:43 | 2017-04-21T09:27:43 | 88,965,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,889 | py | import numpy as np
import emcee
import readdata
import cosmology as cs
from scipy.misc import logsumexp
from scipy.interpolate import interp1d
import sys
import os
from optparse import OptionParser
def lnprior(theta, DistanceFunction, VolumeFunction):
ra,dec,z,M = theta
if 0.0 < z < 2.0 and 0.0 < ra < 2.0*np.pi and -np.pi/2. < dec < np.pi/2. and -30. < M < -10.:
d = DistanceFunction(z)
# M = absolute_magnitude(m,d)
return np.log(VolumeFunction(z))+np.log(np.cos(dec))+np.log(SchecterFunction(M,Mstar,alpha,phistar))
return -np.inf
def lnlike(theta, pdf, DistanceFunction):
ra,dec,z,mu = theta
dl = DistanceFunction(z)
return logsumexp([prob.logL(cs.SphericalToCartesian(dl,ra,dec))+np.log(pdf[0][ind]) for ind,prob in enumerate(pdf[1])])
def lnprob(theta, pdf, DistanceFunction, VolumeFunction):
lp = lnprior(theta, DistanceFunction, VolumeFunction)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, pdf, DistanceFunction)
def SchecterFunction(m,Mstar,alpha,phistar):
return 0.4*np.log(10.0)*phistar*pow(10.0,-0.4*(alpha+1.0)*(m-Mstar))*np.exp(-pow(10,-0.4*(m-Mstar)))
def absolute_magnitude(m,d):
return m-5.0*np.log10(1e5*d)
def apparent_magnitude(M,d):
return M+5.0*np.log10(1e5*d)
"""
typical values for the r band (http://arxiv.org/abs/0806.4930)
"""
Mstar = -20.73 + 5.*np.log10(0.7)
alpha = -1.23
phistar = 0.009 * (0.7*0.7*0.7) #Mpc^3
def sample_dpgmm(pdf, id, output = None, threshold = 20, debug = False):
# check if the full catalog exists already
ndim = 4
if os.path.isfile(os.path.join(output,'galaxy_catalog_%04d.txt'%id)):
print "full catalog exists, not resampling"
samples = np.loadtxt(os.path.join(output,'galaxy_catalog_%04d.txt'%id))
(idy,) = np.where(samples[:,3] < threshold)
x = np.array([samples[i,:] for i in idy])
np.savetxt(os.path.join(output,'galaxy_catalog_threshold_%d_%04d.txt'%(threshold,id)),x)
else:
ndim, nwalkers = 4, 64
CL_s = np.genfromtxt('confidence_levels/CL_%d.txt'%id)
N = np.maximum(1,phistar*CL_s[6,1]) # we take 1 sigma volume
width = 100
nsteps = np.maximum(2*np.int(N/nwalkers),100)
print "sampling %d galaxies. sampler initialised to do %d steps"%(N,nsteps)
p0 = [[np.random.uniform(0.0,2.0*np.pi),
np.random.uniform(-np.pi/2.,np.pi/2.),
np.random.uniform(0.0,2.0),
np.random.uniform(-30.0,-10.0)]
for i in range(nwalkers)]
O = cs.CosmologicalParameters(0.7,0.3,0.7)
# make some interpolants for speed
z = np.linspace(0.0,3.0,1000)
dV = [O.ComovingVolumeElement(zi) for zi in z]
VolumeInterpolant = interp1d(z,dV)
Dl = [O.LuminosityDistance(zi) for zi in z]
DistanceInterpolant = interp1d(z,Dl)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[pdf,DistanceInterpolant,VolumeInterpolant])
for i, result in enumerate(sampler.sample(p0, iterations=nsteps)):
n = int((width+1) * float(i) / nsteps)
sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
sys.stdout.write("\n")
samples = sampler.chain[:, nsteps/2:, :].reshape((-1, ndim))
samples = np.vstack({tuple(row) for row in samples})
d = DistanceInterpolant(samples[:,2])
samples[:,3] = apparent_magnitude(samples[:,3],d)
(idy,) = np.where(samples[:,3] < threshold)
try:
idx = np.random.choice(idy,replace=False,size=N)
except:
idx = idy
x = np.array([samples[i,:] for i in idx])
if output is None:
os.system('mkdir -p galaxy_catalogs')
else:
os.system('mkdir -p %s'%output)
np.savetxt(os.path.join(output,'galaxy_catalog_threshold_%d_%04d.txt'%(threshold,id)),x)
np.savetxt(os.path.join(output,'galaxy_catalog_%04d.txt'%id),samples)
if debug:
import matplotlib.pyplot as pl
for i in range(ndim):
pl.figure()
pl.hist(samples[::10,i], 100, color="k", histtype="step")
pl.title("Dimension {0:d}".format(i))
pl.figure()
y = np.genfromtxt('Galaxies/galaxies_flat-%04d.txt'%id)
pl.hist(samples[:,2],label='all',alpha=0.5,normed=True)
pl.hist(y[:,2],label='old',alpha=0.5,normed=True)
pl.hist(x[:,2],label='new',alpha=0.5,normed=True)
pl.xlabel('redshift')
pl.legend()
pl.figure()
pl.plot(samples[:,2],samples[:,3],'.r',alpha=0.5)
pl.plot(samples[idy,2],samples[idy,3],'.b',alpha=0.5)
pl.xlabel('redshift')
pl.ylabel('absolute magnitude')
pl.figure()
pl.plot(samples[:,0],samples[:,1],'.r',alpha=0.5)
pl.plot(samples[idy,0],samples[idy,1],'.b',alpha=0.5)
pl.xlabel('right ascension')
pl.ylabel('declination')
pl.show()
return samples
if __name__ == "__main__":
np.seterr(divide='ignore', invalid='ignore')
parser=OptionParser()
parser.add_option('-o','--out-dir',default=None,type='string',metavar='DIR',help='Directory for output')
parser.add_option('-d','--data',default=None,type='string',metavar='data',help='DPGMM data location')
parser.add_option('-e','--event',default=1,type='int',metavar='event',help='event id')
parser.add_option('-t','--threshold',default=20,type='float',metavar='threshold',help='telescope detection threshold')
(opts,args)=parser.parse_args()
pdfs,ids = readdata.find_events(opts.data)
k = ids.index(opts.event)
id = ids[k]
p = pdfs[k]
print "processing %s"%id
pdf = readdata.load_dpgmm_data([os.path.join(opts.data,'%s'%p)])[0]
sample_dpgmm(pdf, id, output = opts.out_dir, threshold = opts.threshold, debug = True)
| [
"noreply@github.com"
] | noreply@github.com |
f74f7e447d0e2893e7a6107cd1c1c90ba43d92aa | 15760fec2e8ccdc9c654ee685bdae7ca8126935f | /info/modules/wx/views.py | 76eff5aa14e994d187196048f316c8390c2db667 | [] | no_license | Python-Full-Stack-Team1/Multi_dockers_GPU_Visualized_Management | e3e76ac9266e558e6b19db17bf57672d62f2ed36 | a4522be836425d3cccdf2c108f56ba6cbc10e26d | refs/heads/master | 2022-03-31T07:37:50.987119 | 2020-01-14T09:04:08 | 2020-01-14T09:04:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | import os
from flask import render_template, jsonify
from info.modules.wx import wx_blu
@wx_blu.route('/source_data')
def source_data():
# 获取服务器上面的数据
terminal = 'docker stats --no-stream --format "{\\"container\\":\\"{{ .Container }}\\",\\"name\\":\\"{{ .Name }}\\",\\"memory\\":{\\"raw\\":\\"{{ .MemUsage }}\\",\\"percent\\":\\"{{ .MemPerc }}\\"},\\"cpu\\":\\"{{ .CPUPerc }}\\",\\"pid\\":\\"{{ .PIDs }}\\",\\"diskio\\":\\"{{ .BlockIO }}\\"}"'
# print(terminal)
result = os.popen(terminal).read().strip()
# print(result)
my_list = result.split('\n')
# print(my_list)
source_list = []
for i in my_list:
i = eval(i)
p_source = {
'container': i.get('container'),
'name': i.get('name'),
'memory_use': i.get('memory')['raw'],
'memory_per': i.get('memory')['percent'],
'cpu': i.get('cpu'),
'pid': i.get('pid'),
'diskio': i.get('diskio'),
}
source_list.append(p_source)
data = {
'code': 0,
'count': len(source_list),
"data": source_list,
}
return jsonify(data)
@wx_blu.route('/source_ctrl')
def source_ctrl():
return render_template('source_ctrl.html')
| [
"zhaoxingrong@outlook.com"
] | zhaoxingrong@outlook.com |
3dcc56e34644f42ea06d92fb7188107801b668d2 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/YZYQ/yzyq_144.py | 83432d061fadacfc51640039e13e534672cfe407 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class yzyq_144(xtp_test_case):
# yzyq_144
def test_yzyq_144(self):
title = '默认3:订单报价未超过涨跌幅限制-沪A对手方最优转限价买入=涨停价 重启oms'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
b4f24ffaaea2e92ee984aa16bfcbf7e2cb5c2f75 | 1d1d08f15976b1e961f08f89c9f5200cff540972 | /python/lcd/lcd.py | c129cf9775875c5a637e24d441ace9c6bf995b6d | [] | no_license | yamayuu-ics/raspi | a3bcc95948eb63ee73915687eff8f1fdcf0ff89d | 077543764008a70d7abc450af4b5fd46719bfab9 | refs/heads/master | 2021-04-03T10:06:25.718346 | 2019-07-07T04:13:49 | 2019-07-07T04:13:49 | 125,212,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | #!/usr/bin/python3
# config:utf-8
import smbus
import time
LCD_ADDR = 0x3e
LCD_SET_DDRAM_ADDR = 0x80
class i2cLcd:
i2c = ""
addr = ""
reg_setting = 0x00
reg_display = 0x40
col_num = 16
row_num = 2
def __init__(self,bus_num,addr):
self.addr = addr
self.i2c = smbus.SMBus(bus_num)
# LCD init
self.i2c.write_i2c_block_data( self.addr, self.reg_setting, [0x38, 0x39, 0x14, 0x70, 0x56, 0x6c] )
self.i2c.write_i2c_block_data( self.addr, self.reg_setting, [0x38, 0x0c, 0x01] )
def __del__(self):
self.clear_display()
def setCursor(self,col,row):
row_offset = [0x00,0x40]
self.i2c.write_byte_data( self.addr, self.reg_setting, LCD_SET_DDRAM_ADDR | (col + row_offset[row]) )
def write_scroll(self,str):
#str_copy = str
length = len(str)
counter = 0
#self.setCursor(0,0)
self.write(str[counter:self.col_num+counter])
time.sleep(1)
counter += 1
while length >= counter:
self.clear_display()
self.setCursor(0,0)
self.write(str[counter:self.col_num+counter])
time.sleep(0.4)
counter += 1;
#print("End")
def write_smart(self,str):
self.setCursor(0,0)
length = len(str)
if length > self.col_num*self.row_num:
self.write_scroll(str)
else:
self.write(str)
def write(self, str):
counter = 0
row = 1
for c in list(str):
self.i2c.write_byte_data( self.addr, self.reg_display, ord(c) )
counter += 1
if counter >= (self.col_num*self.row_num):
break
elif counter >= (self.col_num*row):
self.setCursor(0,row)
row += 1
if row > self.row_num:
break
def clear_display(self):
self.i2c.write_byte_data(self.addr,self.reg_setting,0x01)
| [
"noreply@github.com"
] | noreply@github.com |
52e94929af409e629b58e4f74e3792dea52e1c6e | 6a56d2a22722613ad9f691426195a091c58ce3f2 | /networks/lr_schedule.py | 84ebe67b86483d603d3060bd03da5c8d7c8e9418 | [] | no_license | xungeer29/iMet-Collection2019-FGVC6-Baseline | 673e4f48e8294111ccc4261316dfe9e425e64d9b | 3d250522962c4f313c619d69d81223369d066ca2 | refs/heads/master | 2020-05-15T11:23:04.209876 | 2019-04-19T09:42:11 | 2019-04-19T09:42:11 | 182,225,104 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py |
def half_lr(init_lr, ep):
lr = init_lr / 2**ep
return lr
def step_lr(ep):
if ep < 20:
lr = 0.01
elif ep < 50:
lr = 0.001
elif ep < 100:
lr = 0.0001
elif ep < 150:
lr = 0.00001
elif ep < 200:
lr = 0.000001
else:
ep = 0.0000001
return lr
| [
"fuxungao@163.com"
] | fuxungao@163.com |
82e3753314c788b6d6036ebc089b277b3b47233f | d1488b7a6db143cbd131bb2daa9246394828191d | /kodovi/genalg.py | eed937d1d693638b9d072865678e92d7cce40fde | [] | no_license | nynazdravkovic/Evolution-of-population-which-plays-cooperative-games | db4f1eff9c08f6ef9866edc4e6621c14edf91e4a | b01c1f7a195c392903d581ce30e78e6f14e3e339 | refs/heads/master | 2022-07-31T17:46:00.669436 | 2019-07-20T10:47:14 | 2019-07-20T10:47:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,769 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 10 23:41:01 2018
@author: nina
"""
import random
import numpy
from copy import copy, deepcopy
import matplotlib.pyplot as plt
from math import sqrt
import xlsxwriter
brojJedinki=100
brojCiklusa=100
koeficijentKrosovera=0.05
brojGeneracija=1000
cc=3
cd=2
dc=5
dd=1
razliciteStrategije=64
matrica= numpy.zeros([brojGeneracija,64], dtype=int)
strategije=list(range(0,63))
def bit (broj, m):
a=((1<<m) & broj)>>m
return a
def birajClan(niz, peti, sesti):
if peti==0:
if sesti==0:
clan=bit(niz,5)
else:
clan=bit(niz,4)
else:
if sesti==0:
clan=bit(niz,3)
else:
clan=bit(niz,2)
return clan
def svakaSaSvakom():#pravim praznu matricu poena
populacija1=[]
matricaPoena=numpy.zeros([64,64], dtype=int)
for i in range (64): #pravim strategije
populacija1.append(i)
istorijaSukoba = numpy.zeros([64,64], dtype=int)
for j1 in range(64):
for j2 in range(j1, 64):
for x in range(brojCiklusa):
if x==0:
petij1=bit(populacija1[j1],1)
sestij1=bit(populacija1[j1],0)
petij2=bit(populacija1[j2],1)
sestij2=bit(populacija1[j2],0)
else:
petij1=istorijaSukoba[j1][j2]
petij2=istorijaSukoba[j2][j1]
sestij1=petij2
sestij2=petij1
clan1=birajClan(populacija1[j1], petij1, sestij1)
clan2=birajClan(populacija1[j2], petij2, sestij2)
istorijaSukoba[j1][j2]=clan1
istorijaSukoba[j2][j1]=clan2
if (clan1==1):
if (clan2==1):
if j1!=j2:
matricaPoena[j1][j2]=matricaPoena[j1][j2]+cc
matricaPoena[j2][j1]=matricaPoena[j2][j1]+cc
else:
matricaPoena[j2][j1]=matricaPoena[j2][j1]+cc
else:
matricaPoena[j1][j2]=matricaPoena[j1][j2]+cd
matricaPoena[j2][j1]=matricaPoena[j2][j1]+dc
else:
if (clan2==1):
matricaPoena[j1][j2]=matricaPoena[j1][j2]+dc
matricaPoena[j2][j1]=matricaPoena[j2][j1]+cd
else:
if j1!=j2:
matricaPoena[j1][j2]=matricaPoena[j1][j2]+dd
matricaPoena[j2][j1]=matricaPoena[j2][j1]+dd
else:
matricaPoena[j2][j1]=matricaPoena[j2][j1]+dd
return (matricaPoena)
def kreirajPopulaciju():
for x in range(37):
strategije.append(random.choice(strategije))
return (strategije)
def dodavanjePoena(pop):
poeni=numpy.zeros(brojJedinki)
for i1 in range (brojJedinki):
for i2 in range (i1, brojJedinki):
a = pop[i1]
b = pop[i2]
poeni[i1]=poeni[i1] + matricaPoena[a][b]
poeni[i2]=poeni[i2] + matricaPoena[b][a]
return(poeni)
def razmnozavanje(poeni,pop):
#lpoeni=list(poeni)
populacija2=deepcopy(pop)
populacija2=[x for _, x in sorted(zip(poeni,populacija2))]
populacija2=populacija2[::-1]
poeni=sorted(poeni)
lpoeni=poeni[::-1]
for n in range (5):
populacija2.append(populacija2[n])
lpoeni.append(poeni[-n])
populacija2=[x for _, x in sorted(zip(lpoeni,populacija2))]
populacija2=populacija2[5:]
pop=deepcopy(populacija2)
return (pop)
def mutacije(pop,koef):
for i in range (brojJedinki):
a=random.uniform(0,1)
if a<=koef:
b=random.randint(0,5)#random prelomno mesto
pop[i]=pop[i]^(1<<b)
return(pop)
def krosover(pop):
for i in range (brojJedinki):
g=random.uniform(0,1)
if g<=koeficijentKrosovera:
a=random.randint(0,brojJedinki-1)
b=random.randint(0,brojJedinki-1)
c=random.randint(0,6) #biramo mesto na kome se lome strategije
mask=(1<<(c+1))-1
donji1=pop[a] & mask
donji2=pop[b] & mask
gornji1=pop[a]-donji1
gornji2=pop[b]-donji2
pop[a]=gornji1+donji2
pop[b]=gornji2+donji1
return(pop)
def column(matrix, k):
return [row[k] for row in matrix]
def genetskiAlgoritam(koef):
vreme=list(range(0,brojGeneracija))
populacija=kreirajPopulaciju()
nizSrednjihPoena=[]
for t in range (brojGeneracija):
poeni=dodavanjePoena(populacija)
populacija=razmnozavanje(poeni,populacija)
populacija=mutacije(populacija,koef)
populacija=krosover(populacija)
nizSrednjihPoena.append (numpy.mean(poeni)/(99*brojCiklusa))
##ovde se plotuje histogram poena
# plt.hist(poeni/ (99*brojCiklusa))
# plt.ylabel('Broj jedinki')
# plt.xlabel('Poeni')
# plt.title('Grafik zastupljenosti poena u generaciji')
# plt.show()
#k=numpy.rot90(matrica)
return nizSrednjihPoena
def sve(koeficijentMutacije):
srednja=[]
vreme=list(range(brojGeneracija))
c=[]
for x in range (10):
s=[]
s1=[]
s2=[]
srednjaVrednost=genetskiAlgoritam(koeficijentMutacije)
srednja.append(srednjaVrednost)
for i in range (brojGeneracija):
if srednjaVrednost[i]>2.9:
c.append(i)
break
d=numpy.mean(c)
print (d)
f=numpy.std(c)/len(c)
numpy.rot90(srednja,3)
for x in range (brojGeneracija):
n=column(srednja,x)
m=numpy.mean(n)
m1=numpy.std(n)/sqrt(10)
m2=numpy.std(n)
s.append(m)
s1.append(m1)
s2.append(m2)
plt.plot(vreme,s)
plt.errorbar(vreme, s, s1)
axes = plt.gca()
axes.set_ylim([0,5])
plt.ylabel('Srednji poeni')
plt.xlabel('Generacija')
plt.title('Grafik srednjih poena po generaciji')
plt.show()
return s, s1, d, f, s2
def svesve():
c=[]
d=[]
j=[]
j1=[]
koeficijentMutacije=0.07
print (koeficijentMutacije)
stab=sve(koeficijentMutacije)
a=numpy.asarray(stab[0])
a=numpy.reshape(a, (1000,1))
b=numpy.asarray(stab[1])
b=numpy.reshape(b,(1000,1))
workbook = xlsxwriter.Workbook("poeniC"+".xlsx")
worksheet = workbook.add_worksheet()
row = 0
for col, data in enumerate(a):
worksheet.write_column(row, col, data)
workbook.close()
workbook = xlsxwriter.Workbook("greske_poenaC"+".xlsx")
row = 0
for col, data in enumerate(b):
worksheet.write_column(row, col, data)
workbook.close()
stabilizacija=stab[2]
greskestab=stab[3]
j.append(stabilizacija)
j1.append(greskestab)
for i in range (brojGeneracija):
if b[i]/a[i]<0.01:
p=numpy.mean(a[i:])
g=numpy.std(a[i:])/sqrt(5)
c.append(p)
d.append(g)
break
plt.plot(koeficijentMutacije, c)
plt.errorbar(koeficijentMutacije, c, d)
plt.ylabel('Srednji broj poena posle stabilizacije')
plt.xlabel('Koeficijent mutacije')
plt.show()
plt.plot(koeficijentMutacije, j)
plt.errorbar(koeficijentMutacije, j, j1)
plt.ylabel('Generacija u kojoj se populacija stabilizovala')
plt.xlabel('Koeficijent mutacije')
plt.title('Grafik stabilacije poena u zavisnosti od koeficijenta mutacije')
matricaPoena=svakaSaSvakom()
svesve() | [
"nyna.zdravkovic@gmail.com"
] | nyna.zdravkovic@gmail.com |
553a4c0b4ef5b63e713ccd759d4de0d80786768e | d4172bf3becd7d0935b4634a20fa0e075ba96516 | /lunar | 50955e9e0c7c2cb01fdd56b1d57ea98ef3ae7652 | [] | no_license | zer4tul/utils | a0b0afb247aa23a9e69b17e086eac9b88d7562e1 | 0f821286bc2fa48e2a58f78fb439a82ecf928298 | refs/heads/master | 2020-05-17T23:50:34.018366 | 2012-12-15T19:13:07 | 2012-12-15T19:13:07 | 5,438,826 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,749 | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
'''
Usage: ccal Month [4-Digit-Year]
or: ccal 4-Digit-Year Month
This Python script is to show Solar and Lunar calender at the
same time. You need to have Python (2.0 or above) installed.
Acceptable date range: 1900/2 -- 2049/12
Output contains Chinese characters (mainland GB2312 encoding),
must be viewed in a Chinese-enabled system or "cxterm" etc.
programms under UNIX X-Windows.
The major reference for me to compose this program is:
lunar-2.1.tgz (1992), composed by
Fung F. Lee <[email]lee@umunhum.stanford.edu[/email]> and
Ricky Yeung <[email]Ricky.Yeung@Eng.Sun.Com[/email]> .
And Lee and Yeung refered to:
1. "Zhong1guo2 yin1yang2 ri4yue4 dui4zhao4 wan4nian2li4"
by Lin2 Qi3yuan2. 《中国阴阳日月对照万年历》.林
2. "Ming4li3 ge2xin1 zi3ping2 cui4yan2" by Xu2 Le4wu2.
《命理革新子平粹言》.徐
3. Da1zhong4 wan4nian2li4. 《大众万年历》
License:
GNU General Public License (GPL, see [url]http://www.gnu.org[/url]).
In short, users are free to use and distribute this program
in whole. If users make revisions and distribute the revised
one, they are required to keep the revised source accessible
to the public.
Version:
0.2.0, Jan/6/2002, ShengXiao(生肖), lunar leap month(闰月)
added.
0.1.0, Jan/4/2002
--- Changsen Xu <[email]xucs007@yahoo.com[/email]>
'''
#remember, in this program:
# month=0 means Januaray, month=1 means February ...;
# day=0 means the first day of a month, day=1 means the second day,
# so as to easy to manipulate Python lists.
# year=0 is 1900, until the last step to output
daysInSolarMonth= [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lunarMonthDays = [29,30] # a short (long) lunar month has 29 (30) days */
shengXiaoEn = ["Mouse", "Ox", "Tiger", "Rabbit", "Dragon", "Snake",
"Horse", "Goat", "Monkey", "Rooster", "Dog", "Pig"]
shengXiaoGB = ["鼠", "牛", "虎", "兔", "龙", "蛇", "马", "羊", "猴", "鸡",
"狗", "猪"]
zhiGB = ["子", "丑", "寅", "卯", "辰", "巳", "午", "未", "申", "酉",
"戌", "亥"]
ganGB = ["甲", "乙", "丙", "丁", "戊", "己", "庚", "辛", "壬", "癸"]
monthEn = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
weekdayEn = ["Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday"]
weekdayGB = ["一", "二", "三", "四", "五", "六", "日"]
numGB = ['〇', "一", "二", "三", "四", "五", "六", "七", "八", "九",
"十"]
lunarHoliday = {'0_0':'春节', '4_4':'端午', '7_14':'中秋', '8_8':'重阳',
'0_14':'元宵'}
# encoding:
# b bbbbbbbbbbbb bbbb
# bit# 1 111111000000 0000
# 6 543210987654 3210
# . ............ ....
# month# 000000000111
# M 123456789012 L
#
# b_j = 1 for long month, b_j = 0 for short month
# L is the leap month of the year if 1<=L<=12; NO leap month if L = 0.
# The leap month (if exists) is long one if M = 1.
yearCode = [
0x04bd8, # 1900
0x04ae0, 0x0a570, 0x054d5, 0x0d260, 0x0d950, # 1905
0x16554, 0x056a0, 0x09ad0, 0x055d2, 0x04ae0, # 1910
0x0a5b6, 0x0a4d0, 0x0d250, 0x1d255, 0x0b540, # 1915
0x0d6a0, 0x0ada2, 0x095b0, 0x14977, 0x04970, # 1920
0x0a4b0, 0x0b4b5, 0x06a50, 0x06d40, 0x1ab54, # 1925
0x02b60, 0x09570, 0x052f2, 0x04970, 0x06566, # 1930
0x0d4a0, 0x0ea50, 0x06e95, 0x05ad0, 0x02b60, # 1935
0x186e3, 0x092e0, 0x1c8d7, 0x0c950, 0x0d4a0, # 1940
0x1d8a6, 0x0b550, 0x056a0, 0x1a5b4, 0x025d0, # 1945
0x092d0, 0x0d2b2, 0x0a950, 0x0b557, 0x06ca0, # 1950
0x0b550, 0x15355, 0x04da0, 0x0a5d0, 0x14573, # 1955
0x052d0, 0x0a9a8, 0x0e950, 0x06aa0, 0x0aea6, # 1960
0x0ab50, 0x04b60, 0x0aae4, 0x0a570, 0x05260, # 1965
0x0f263, 0x0d950, 0x05b57, 0x056a0, 0x096d0, # 1970
0x04dd5, 0x04ad0, 0x0a4d0, 0x0d4d4, 0x0d250, # 1975
0x0d558, 0x0b540, 0x0b5a0, 0x195a6, 0x095b0, # 1980
0x049b0, 0x0a974, 0x0a4b0, 0x0b27a, 0x06a50, # 1985
0x06d40, 0x0af46, 0x0ab60, 0x09570, 0x04af5, # 1990
0x04970, 0x064b0, 0x074a3, 0x0ea50, 0x06b58, # 1995
0x055c0, 0x0ab60, 0x096d5, 0x092e0, 0x0c960, # 2000
0x0d954, 0x0d4a0, 0x0da50, 0x07552, 0x056a0, # 2005
0x0abb7, 0x025d0, 0x092d0, 0x0cab5, 0x0a950, # 2010
0x0b4a0, 0x0baa4, 0x0ad50, 0x055d9, 0x04ba0, # 2015
0x0a5b0, 0x15176, 0x052b0, 0x0a930, 0x07954, # 2020
0x06aa0, 0x0ad50, 0x05b52, 0x04b60, 0x0a6e6, # 2025
0x0a4e0, 0x0d260, 0x0ea65, 0x0d530, 0x05aa0, # 2030
0x076a3, 0x096d0, 0x04bd7, 0x04ad0, 0x0a4d0, # 2035
0x1d0b6, 0x0d250, 0x0d520, 0x0dd45, 0x0b5a0, # 2040
0x056d0, 0x055b2, 0x049b0, 0x0a577, 0x0a4b0, # 2045
0x0aa50, 0x1b255, 0x06d20, 0x0ada0 # 2049
]
yearsCoded = len(yearCode)
from sys import argv, exit, stdout
from time import time, localtime
ow=stdout.write
class LunarYearInfo:
def __init__(self):
self.yearDays = 0
self.monthDays = [0]*13
self.leapMonth = -1 # -1 means no lunar leap month
yearInfo = [0]*yearsCoded #global variable
for i in range(yearsCoded):
yearInfo[i] = LunarYearInfo()
class Date:
def __init__(self, year, month, day, weekday=-1, gan=-1, zhi=-1):
self.year =year
self.month =month
self.day =day
self.weekday=weekday
self.gan =gan
self.zhi =zhi
solar1st = Date(0, 0, 30, weekday=2) #Wednesday, January 31, 1900
lunar1st = Date(0, 0, 0, weekday=2, gan=6, zhi=0)
#Wednesday, First day, First month, 1900, 庚子年
def error(msg):
print 'Error:', msg; exit(0)
def isSolarLeapYear (year):
year=year+1900
return (year%4 == 0) and (year%100 != 0) or (year%400 == 0)
baseYear=1201 - 1900
# in fact, real baseYear=1201. In order to ease calculation of
# leap years. real baseYear must conform to:
# realBaseYear%4==1 and realBaseYear%400==1.
# Assert realBaseYear < solar1st.year .
# Compute the number of days from the Solar First Date
# month=0 means January, ...
def solarDaysFromBaseYear(d): #d is a Date class
delta = d.year - baseYear
offset = delta*365 + delta/4 - delta/100 + delta/400
for i in range(d.month):
offset += daysInSolarMonth[i];
if d.month>1 and isSolarLeapYear(d.year):
offset += 1
offset += d.day
## print '___', year, month, day, 'offset=', offset ########
return offset
# Compute the number of days from the Solar First Date
# month=0 means January, ..., year=0 means 1900, ...
def solarDaysFromFirstDate (d): #d is a Date class
return solarDaysFromBaseYear (d) - solarDaysFromBaseYear (solar1st)
def calcLunarDaysPerMonth(iYear):
code = yearCode[iYear]
leapMonth = code&0xf #leapMonth==0 means no lunar leap month
code >>= 4
for iMonth in range(12):
yearInfo[iYear].monthDays[11-iMonth] = lunarMonthDays [code&0x1]
code >>= 1
if leapMonth>0:
yearInfo[iYear].leapMonth = leapMonth-1
yearInfo[iYear].monthDays.insert (leapMonth,
lunarMonthDays [code & 0x1])
def calcAllLunarYearsInfo():
for iYear in range(yearsCoded):
calcLunarDaysPerMonth (iYear)
for iMonth in range(13):
yearInfo[iYear].yearDays += yearInfo[iYear].monthDays[iMonth]
#input dateSolar, return (dateLunar, isLunarMonthOrNot)
def solar2Lunar(d): #d is a Date class
dLunar = Date(-1, -1, -1) #unknown lunar Date class
offset = solarDaysFromFirstDate(d)
dLunar.weekday = (offset + solar1st.weekday)%7
for iYear in range(yearsCoded):
if offset < yearInfo[iYear].yearDays:
dLunar.year = iYear; break
offset -= yearInfo[iYear].yearDays
if dLunar.year == -1: error ("Date out of range.")
dLunar.gan = (dLunar.year + lunar1st.gan) % 10
dLunar.zhi = (dLunar.year + lunar1st.zhi) % 12
for iMonth in range(13):
if offset< yearInfo[dLunar.year].monthDays[iMonth]:
dLunar.month = iMonth; break
offset -= yearInfo[dLunar.year].monthDays[iMonth]
dLunar.day = offset
isLeapMonth=0
if yearInfo[dLunar.year].leapMonth >=0:
if dLunar.month == yearInfo[iYear].leapMonth + 1:
isLeapMonth=1
if dLunar.month > yearInfo[dLunar.year].leapMonth:
dLunar.month -= 1
return (dLunar, isLeapMonth)
def getSolarDaysInMonth (year, month):
if isSolarLeapYear(year) and month==1:
return 29
else: return daysInSolarMonth[month]
def num2GB (num):
if num==10:
return '十'
elif num>10 and num<20:
return '十' + numGB[num-10]
tmp=''
while num>10:
tmp = numGB[num%10] + tmp
num = int(num/10)
tmp = numGB[num] + tmp
return tmp
def lunarDate2GB (dLunar, isLeapMonth):
tmp = str(dLunar.month)+'_'+str(dLunar.day)
if lunarHoliday.has_key( tmp ):
return '%s '% lunarHoliday[tmp] + ' '*(6-len(lunarHoliday[tmp]))
elif dLunar.day==0:
tmp2 = '闰'*isLeapMonth + num2GB(dLunar.month+1) +'月'
return '%s' % tmp2 + ' '*(8-len(tmp2))
elif dLunar.day<10:
return '初' + num2GB(dLunar.day+1)
else:
return num2GB(dLunar.day+1)
def outputCalendar(year, month):
dLunar = Date(-1,-1,-1)
ow ('\n 阳历%d年%d月 ' % (year+1900, month+1) )
for iDay in range( getSolarDaysInMonth(year, month) ):
dSolar = Date(year, month, iDay)
dLunar, isLeapMonth = solar2Lunar (dSolar)
if iDay==0:
ow ('始于 阴历%s年%s%s月 (%s%s年, 生肖属%s)\n' %
( num2GB(dLunar.year+1900), '闰'*isLeapMonth,
num2GB(dLunar.month+1),
ganGB [dLunar.gan], zhiGB[dLunar.zhi], shengXiaoGB[dLunar.zhi]
))
ow ('='*74 + '\n')
for i in range(7):
ow ("%3s %2s " % (weekdayEn[i][:3], weekdayGB[i]) )
ow('\n\n')
for i in range(dLunar.weekday): ow(' '*11)
elif dLunar.weekday==0: ow('\n')
ow ( "%2d %-8s" %(iDay+1, lunarDate2GB(dLunar, isLeapMonth) ) )
ow('\n\n')
def checkArgv (argv):
argc = len(argv)
if argc==1 or argv[1] in ('-h', '--help'):
print __doc__; exit(0)
#in case people input arguments as "4-digit-year month"
if argc==3 and len(argv[1]) == 4 and len(argv[2]) in (1,2):
argv[1], argv[2] = argv[2], argv[1]
#Get month
month=-1
for iMonth in range(12):
if argv[1].lower() == monthEn[iMonth].lower() or argv[1].lower() == monthEn[iMonth][:3].lower():
month = iMonth+1; break
if month==-1:
month = eval(argv[1])
if month<1 or month>12: error ("Month not within 1--12.")
#Get year
if argc==2: year = localtime(time())[0]
else:
if len(argv[2]) != 4: error ("Year must be 4 digits.")
year = eval(argv[2])
if year<1900 or year>= 1900+yearsCoded or (year==1900 and month==1):
error ("Year must be within %d--%d, excluding 1900/1."
% (1900, 1900 + yearsCoded-1) )
return year-1900, month-1
year, month = checkArgv(argv)
calcAllLunarYearsInfo()
outputCalendar(year, month)
| [
"zer4tul@gmail.com"
] | zer4tul@gmail.com | |
20016baa26d09806a500f854ac0ec23afa286bf3 | c973eaab508bc8c8d73dac9d031b421198dfaa7d | /static/python/main.py | 5824b40e2a3b2dc24201ebc34333eb3af2847aa8 | [
"Apache-2.0"
] | permissive | synbiozis/site | 3862abcd121e9ec230e72a7133db0fcd78a34f4b | 227d062c564f9bde39cc9e1db4aee6b8cfcb1b88 | refs/heads/master | 2016-09-15T20:48:11.415573 | 2014-05-27T12:08:34 | 2014-05-27T12:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #!usr/bin/python
#coding: UTF-8
from time import time
from pop import population
pop = population()
print 'population start with a fittest of', pop.getFitness(pop.getFittest())
gen = 1
vals = []
fitness = 0
popMax = pop.getMaxFit()
while fitness < popMax:
start = time()
fittest = pop.getFittest()
fitness = pop.getFitness(fittest)
print 'Generation', gen, ': fittest =', fitness
pop.evolve()
gen += 1
vals.append(time() - start)
print "\nFind solution in", sum(vals), "s."
print "Average time for a generation :", sum(vals) / len(vals), "s." | [
"synbiozisnetwork@gmail.com"
] | synbiozisnetwork@gmail.com |
330610aa83168adce0eca72d95abdabe6dfa1b24 | c0cb5fa9757af4e7ce73950a99c8156f889b3fc9 | /src/impl/convert.py | e3486062645ba3aecc71d568cc3de51d0e9fb366 | [] | no_license | wangpeijian/python_image_compress | 6e6d4771b34d63e83874715791cd036b05dd0b6d | 6c40da328a0cbf865fc71d2a185887781d56f334 | refs/heads/master | 2020-05-16T01:58:56.039423 | 2019-04-22T07:36:42 | 2019-04-22T07:36:42 | 182,615,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | def byte2m(size):
return size / (1024 * 1024)
| [
"wangpj@citic.com"
] | wangpj@citic.com |
271c94deb9f0b01b0ffc2866d015a0c9cf3ce405 | 74405b9d430eb33a76a7f74fdc7c01791553538a | /RangeVisualiser.py | 236e45700bacfd51e0269c658b3156258682e204 | [] | no_license | timmac98/PokerRangeVisualiser | 9416ffadf34af06215484d44ed2f612011cfaee5 | c611266e2b1386db4024e7981259686a853af520 | refs/heads/master | 2022-11-14T09:09:19.326737 | 2020-07-11T13:18:06 | 2020-07-11T13:18:06 | 278,861,045 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,745 | py | import sys
import matplotlib.pyplot as plt
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QMainWindow, QSizePolicy, QPushButton
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from RangeFunctions import *
class ApplicationWindow(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'Holdem Range Visualiser'
self.filename = 'BB3Bet'
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.main_widget = QtWidgets.QWidget(self)
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
h_plot = PlotCanvas(self.main_widget)
button = QPushButton('Browse', self)
button.clicked.connect(self.getfiles)
self.layout.addWidget(button)
self.layout.addWidget(h_plot)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
def getfiles(self):
self.filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Single File', QtCore.QDir.currentPath(),
'*.csv')
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi, facecolor=(53 / 255, 53 / 255, 53 / 255), tight_layout=True)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.filename = 'BB3Bet'
self.plot_data()
self.draw()
def plot_data(self):
columns = ['A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3', '2']
rows = ['A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3', '2']
cell_text = [[b + a if columns.index(a) > columns.index(b) else a + b for a in columns] for b in columns]
self.ax = self.figure.add_subplot()
self.ax.axis('tight')
self.ax.axis('off')
self.ax.set_facecolor('black')
hand_dict = create_dict(filename='BB3bet', stat='3Bet PF')
col_data, label = create_array(hand_dict, hand_list)
df = pd.DataFrame(col_data, index=rows, columns=columns)
vals = np.around(df.values, 2)
norm = plt.Normalize(vals.min() - 1, vals.max() + 1)
colours = plt.cm.Greens(norm(vals))
visual = self.ax.table(cellText=cell_text, cellLoc='center', loc='center', cellColours=colours)
visual.scale(1.0, 1.5)
if __name__ == '__main__':
App = QApplication(sys.argv)
App.setStyle('Fusion')
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))
palette.setColor(QtGui.QPalette.WindowText, QtCore.Qt.white)
palette.setColor(QtGui.QPalette.Base, QtGui.QColor(15, 15, 15))
palette.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
palette.setColor(QtGui.QPalette.ToolTipBase, QtCore.Qt.white)
palette.setColor(QtGui.QPalette.ToolTipText, QtCore.Qt.white)
palette.setColor(QtGui.QPalette.Text, QtCore.Qt.white)
palette.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
palette.setColor(QtGui.QPalette.ButtonText, QtCore.Qt.white)
palette.setColor(QtGui.QPalette.BrightText, QtCore.Qt.red)
palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(142, 45, 197).lighter())
palette.setColor(QtGui.QPalette.HighlightedText, QtCore.Qt.black)
App.setPalette(palette)
aw = ApplicationWindow()
aw.show()
sys.exit(App.exec_())
| [
"noreply@github.com"
] | noreply@github.com |
166bbf770ff6bda83d03c33605fba8e31f857028 | ba1061443f83d65033347c8e8896618005fbb32e | /236A/236A.py | 77017c03e2dc298d69ed8f4b7933cd0b98e3b6fe | [] | no_license | epidersis/olymps | 9388f690d4cc282bb5af2b8f57094a5bacce77eb | ff22f97d8cc7e6779dc8533e246d0d651e96033e | refs/heads/master | 2022-07-31T01:50:42.950753 | 2022-07-18T21:51:47 | 2022-07-18T21:51:47 | 130,722,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | print('CHAT WITH HER!' if len(set(input())) % 2 == 0 else 'IGNORE HIM!')
| [
"epidersis@gmail.com@"
] | epidersis@gmail.com@ |
9eaffcb6afaae736bde3cf8a5eae7997e0a27763 | d6a8e914bc63dbe836d2ba37d80cf1003dafcf7a | /autoencoder.py | f490e637f195b707de08da77fa20013478e5b01c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | msofiperez/Efficient-Segmentation-Networks-BDD100K | 5601c82f52ebf4928841a7993e5266139eefc5ba | 5e0e23f19e3f893716ffb56c1eb28db17a19f21e | refs/heads/master | 2023-03-16T23:03:25.722363 | 2021-01-06T12:20:59 | 2021-01-06T12:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,366 | py | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as T
from torchsummary import summary
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage.transform import resize
import pickle
from tqdm import tqdm
class CAE(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size=2, return_indices=True)
self.unpool = nn.MaxUnpool2d(kernel_size=2)
self.l1 = nn.Sequential(
nn.Conv2d(1,32,kernel_size=3, padding=2), #out is of size (178,322)
nn.ReLU(),
)
self.l2 = nn.Sequential(
nn.Conv2d(32,16,kernel_size=3, padding=2), #
nn.ReLU(),
)
self.l3 = nn.Sequential(
nn.Conv2d(16,8,kernel_size=3, padding=2),
nn.ReLU(),
)
self.l4 = nn.Sequential(
nn.Conv2d(8,4,kernel_size=3, padding=1),
nn.ReLU(),
)
self.l5 = nn.Sequential(
nn.Conv2d(4,1,kernel_size=3, padding=1),
nn.ReLU(),
)
self.drop_out = nn.Dropout(p=0.2)
self.up1 = nn.ConvTranspose2d(1,4,kernel_size=3, padding=1)
self.up2 = nn.ConvTranspose2d(4,8,kernel_size=3, padding=1)
self.up3 = nn.ConvTranspose2d(8,16,kernel_size=3, padding=2)
self.up4 = nn.ConvTranspose2d(16,32,kernel_size=3, padding=2)
self.up5 = nn.ConvTranspose2d(32,1,kernel_size=3, padding=2)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.l1(x)
x, i1 = self.pool(x)
x = self.l2(x)
x, i2 = self.pool(x)
x = self.l3(x)
x, i3 = self.pool(x)
x = self.l4(x)
x, i4 = self.pool(x)
x = self.l5(x)
x, i5 = self.pool(x)
#x = self.drop_out(x)
bottleneck = torch.flatten(x)
x = self.unpool(x, i5, output_size=(11,20))
x = self.up1(x, output_size=(11,20))
x = self.relu(x)
x = self.unpool(x, i4, output_size=(23,41))
x = self.up2(x, output_size=(23,41))
x = self.relu(x)
x = self.unpool(x, i3, output_size=(47,83))
x = self.up3(x, output_size=(45,81))
x = self.relu(x)
x = self.unpool(x, i2, output_size=(91,163))
x = self.up4(x, output_size=(89,161))
x = self.relu(x)
x = self.unpool(x, i1, output_size=(178,322))
x = self.up5(x, output_size=(176,320))
x = self.sigmoid(x)
return x, bottleneck
class VAE(nn.Module):
def __init__(self, latent_dim=32):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size=2, return_indices=True)
self.unpool = nn.MaxUnpool2d(kernel_size=2)
self.l1 = nn.Sequential(
nn.Conv2d(1,32,kernel_size=3, padding=2), #out is of size (178,322)
nn.ReLU(),
)
self.l2 = nn.Sequential(
nn.Conv2d(32,16,kernel_size=3, padding=2), #
nn.ReLU(),
)
self.l3 = nn.Sequential(
nn.Conv2d(16,8,kernel_size=3, padding=2),
nn.ReLU(),
)
self.l4 = nn.Sequential(
nn.Conv2d(8,4,kernel_size=3, padding=1),
nn.ReLU(),
)
self.l5 = nn.Sequential(
nn.Conv2d(4,1,kernel_size=3, padding=1),
nn.ReLU(),
)
self.fc1 = nn.Linear(50, latent_dim)
self.fc2 = nn.Linear(50, latent_dim)
self.fc3 = nn.Linear(latent_dim, 50)
self.up1 = nn.ConvTranspose2d(1,4,kernel_size=3, padding=1)
self.up2 = nn.ConvTranspose2d(4,8,kernel_size=3, padding=1)
self.up3 = nn.ConvTranspose2d(8,16,kernel_size=3, padding=2)
self.up4 = nn.ConvTranspose2d(16,32,kernel_size=3, padding=2)
self.up5 = nn.ConvTranspose2d(32,1,kernel_size=3, padding=2)
self.flt = nn.Sequential(nn.Flatten())
self.unflt = nn.Sequential(nn.Unflatten(1, torch.Size([1,5,10])))
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def reparameterize(self, mu, sigma):
std = sigma.mul(0.5).exp_()
# return torch.normal(mu, std)
esp = torch.randn(*mu.size())
z = mu + std * esp
return z
def forward(self, x):
x = self.l1(x)
x, i1 = self.pool(x)
x = self.l2(x)
x, i2 = self.pool(x)
x = self.l3(x)
x, i3 = self.pool(x)
x = self.l4(x)
x, i4 = self.pool(x)
x = self.l5(x)
x, i5 = self.pool(x)
x = self.flt(x)
mu = self.fc1(x)
sigma = self.fc1(x)
bottleneck = self.reparameterize(mu, sigma)
x = self.fc3(bottleneck)
x = self.unflt(x)
x = self.unpool(x, i5, output_size=(11,20))
x = self.up1(x, output_size=(11,20))
x = self.relu(x)
x = self.unpool(x, i4, output_size=(23,41))
x = self.up2(x, output_size=(23,41))
x = self.relu(x)
x = self.unpool(x, i3, output_size=(47,83))
x = self.up3(x, output_size=(45,81))
x = self.relu(x)
x = self.unpool(x, i2, output_size=(91,163))
x = self.up4(x, output_size=(89,161))
x = self.relu(x)
x = self.unpool(x, i1, output_size=(178,322))
x = self.up5(x, output_size=(176,320))
x = self.sigmoid(x)
return x, mu, sigma, bottleneck
def loss_fn(recon_x, x, mu, sigma):
BCE = F.binary_cross_entropy(recon_x, x, reduction='sum')
KLD = -0.5 * torch.mean(1 + sigma - mu.pow(2) - sigma.exp())
return BCE + KLD, BCE, KLD
##### DATALOADER #####
trf = T.Compose([T.ToTensor()])
from torch.utils.data import Dataset, DataLoader, sampler
from pathlib import Path
class BDD100K(Dataset):
def __init__(self,img_dir,gt_dir,seg_model=None,gt=False,pytorch=True):
super().__init__()
# Loop through the files in red folder and combine, into a dictionary, the other bands
self.files = [self.combine_files(f, gt_dir) for f in img_dir.iterdir() if not f.is_dir()]
self.pytorch = pytorch
self.gt = gt
def combine_files(self, img_file: Path, gt_dir):
files = {'img': img_file,
'gt': Path(str(gt_dir/img_file.name).split('.')[0] + '_drivable_id.png')}
return files
def __len__(self):
return len(self.files)
def __getitem__(self, index):
if self.gt:
trf2 = T.Compose([T.Resize((176,320)),T.ToTensor()])
return 0, trf2(Image.open(self.files[index]['gt']))
else:
datas = pickle.load(open('./dataset/inform/bdd100k_inform.pkl', "rb"))
img = Image.open(self.files[index]['img'])
image = np.asarray(img, np.float32)
image = resize(image, (176,320), order=1, preserve_range=True)
image -= datas['mean']
# image = image.astype(np.float32) / 255.0
image = image[:, :, ::-1] # revert to RGB
image = image.transpose((2, 0, 1)) # HWC -> CHW
image = torch.from_numpy(image.copy())
segmentation.eval()
y = segmentation(image.unsqueeze(0))
y = y.cpu().data[0].numpy()
y = y.transpose(1, 2, 0)
y = np.asarray(np.argmax(y, axis=2), dtype=np.float32)
y[y==2] = 0
y = torch.from_numpy(y.copy()).unsqueeze(0)
return trf(img), y
##############
def train_CAE(model,dl,criterion,optimizer,epochs):
l = []
for epoch in range(1,epochs+1):
train_loss = 0.0
t = tqdm(total=len(dl),desc='Episodes')
i=0
for _, images in dl:
optimizer.zero_grad()
outputs, _ = model(images)
loss = criterion(outputs,images)
loss.backward()
optimizer.step()
train_loss += loss.item()*images.size(0)
i+=1
if i%50 == 0:
torch.save(model, "../seg_weights/last_CAE.pt")
t.set_description(f'Episodes (loss: {round(float(loss),6)})')
t.update(1)
t.close()
train_loss = train_loss/len(dl)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
torch.save(model, "../seg_weights/CAE_"+str(epoch)+".pt")
l.append(train_loss)
return l
def train_VAE(model,dl,criterion,optimizer,epochs):
l = []
for epoch in range(1,epochs+1):
train_loss = 0.0
t = tqdm(total=len(dl),desc='Episodes')
i=0
for _, images in dl:
optimizer.zero_grad()
outputs, mu, sigma, _ = model(images)
loss, bce, kld = criterion(outputs, images, mu, sigma)
loss.backward()
optimizer.step()
train_loss += loss.item()*images.size(0)
i+=1
if i%50 == 0:
torch.save(model, "../seg_weights/last_VAE.pt")
t.set_description("Epoch[{}/{}] Loss: {:.3f} {:.3f} {:.3f}".format(epoch,
epochs+1, loss.item()/images.size(0), bce.item()/images.size(0), kld.item()/images.size(0)))
t.update(1)
t.close()
train_loss = train_loss/len(dl)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
torch.save(model, "../seg_weights/VAE_"+str(epoch)+".pt")
l.append(train_loss)
return l
##############
if __name__ == "__main__":
from builders.model_builder import build_model
autoencoder = VAE().cpu()
#print(summary(CAE().cuda(),(1,176,320)))
segmentation = build_model('FastSCNN',num_classes=3)
checkpoint = torch.load('./checkpoint/bdd100k/FastSCNNbs200gpu1_train/model_8.pth', map_location=torch.device('cpu'))
segmentation.load_state_dict(checkpoint['model'])
train_ds = BDD100K( Path('./dataset/bdd100k/images/100k/train/'),
Path('./dataset/bdd100k/drivable_maps/labels/train/'),
segmentation,True
)
valid_ds = BDD100K( Path('./dataset/bdd100k/images/100k/val/'),
Path('./dataset/bdd100k/drivable_maps/labels/val/'),
segmentation,True
)
train_dl = DataLoader(train_ds, batch_size=32, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=12, shuffle=True)
######TRAINING##########
training_mode = False
if training_mode:
autoencoder = torch.load("../seg_weights/last_VAE.pt")
opt = torch.optim.Adam(autoencoder.parameters(),lr=0.001)
train_loss = train_VAE(autoencoder, train_dl, loss_fn, opt, epochs=15)
pickle.dump(train_loss, open("../seg_weights/loss_stats_autoencoder.pkl","wb"))
torch.save(autoencoder,"../seg_weights/last_autoencoder.pt")
#######TESTING###########
trained_autoencoder = torch.load("../seg_weights/last_VAE.pt")
trans = T.ToPILImage(mode='RGB')
trans2 =T.ToPILImage(mode='L')
x,y = train_ds[2]
#plt.imshow(trans(x.squeeze())); plt.show()
plt.imshow(trans2(y.squeeze())); plt.show()
print(y.shape,y)
start = time.time()
pred, _,_,bottleneck = autoencoder(y.unsqueeze(0))
print(time.time()-start,'seconds')
print(bottleneck)
plt.imshow(trans2(pred.squeeze())); plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
98ed9deade0bd75c9da1b0f10f1790c75fe07f02 | b96160a6f178d2e2f5b777ed3bc674febd73f764 | /website/movies/migrations/0006_auto_20170701_2355.py | e3bc7bb4f4dbdb6841a3b3db8afca74238dd4af9 | [] | no_license | kotzila/website | d7d6e5d01b695f35d397a7b9e0bd4c7e47dedf52 | 78261e32b5413986dd519a106b032f17d5ab5440 | refs/heads/master | 2020-09-15T13:57:45.924940 | 2017-08-11T22:26:37 | 2017-08-11T22:26:37 | 94,479,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-01 23:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0005_auto_20170701_2340'),
]
operations = [
migrations.AlterField(
model_name='info',
name='vote_count',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
| [
"n.kotiuk@levi9.com"
] | n.kotiuk@levi9.com |
3d426320b86cfd35d5ba9c006870dfb9a7fe29d8 | bae212aeaaa962a78fb733e59a1dccb4e5490b17 | /pay.py | d8ecc6f949df39524c2110e5f5270e7d2d31d2f1 | [] | no_license | caicaicmd/42_02 | 6153e6fbd4c57475b62015b293b063a5b24c34cf | 99a5b9e7c935cc448fc2ba35d05da3d68213cf6b | refs/heads/master | 2023-01-24T07:19:06.250492 | 2020-12-08T01:47:04 | 2020-12-08T01:47:04 | 319,214,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | pay = 1
pay =2
pay =3
| [
"317280271@qq.com"
] | 317280271@qq.com |
3b49b64e1dae8135fb4981d0de98b06174694804 | 4313db3203a9b0a7ffbe6210038c16c0ae87c3a1 | /DBMS/o.py | 8a9d023b0969a96f0049fac6339acc6848824173 | [] | no_license | krishna0631/program1 | 761e9b069e502d50a69b06700b0ff413a8ba2a32 | 577fb348d6335456015574c422a8a98e0e134a86 | refs/heads/master | 2023-07-27T21:34:23.004169 | 2021-09-12T12:34:01 | 2021-09-12T12:34:01 | 397,205,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | f = open("application.txt" , "w")
f.write("semester")
f.close() | [
"321910306039@gitam.in"
] | 321910306039@gitam.in |
61bc779a1546701f5153a4a6635e4020d619e8cb | 22c5fc7dd52149ebd4338a487ae9ab0db0e43f01 | /tests/test_dynunet.py | 39371c0e1dc31ecb08ea630eefe8443f705fb731 | [
"Apache-2.0"
] | permissive | precision-medicine-um/MONAI-Deep_Learning | 3d3f547dd9815152561a6853f8d4727b0e5ca4c4 | d94c4d3a2c465717ba3fae01b7acea7fada9885b | refs/heads/master | 2022-12-28T07:04:07.768415 | 2020-10-17T13:11:56 | 2020-10-17T13:11:56 | 305,346,962 | 3 | 0 | Apache-2.0 | 2022-12-27T15:44:13 | 2020-10-19T10:30:07 | Python | UTF-8 | Python | false | false | 5,036 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Any, Sequence, Union
import torch
from parameterized import parameterized
from monai.networks.nets import DynUNet
strides: Sequence[Union[Sequence[int], int]]
kernel_size: Sequence[Any]
expected_shape: Sequence[Any]
TEST_CASE_DYNUNET_2D = []
for kernel_size in [(3, 3, 3, 1), ((3, 1), 1, (3, 3), (1, 1))]:
for strides in [(1, 1, 1, 1), (2, 2, 2, 1)]:
for in_channels in [2, 3]:
for res_block in [True, False]:
out_channels = 2
in_size = 64
spatial_dims = 2
expected_shape = (1, out_channels, *[in_size // strides[0]] * spatial_dims)
test_case = [
{
"spatial_dims": spatial_dims,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"strides": strides,
"upsample_kernel_size": strides[1:],
"norm_name": "batch",
"deep_supervision": False,
"res_block": res_block,
},
torch.randn(1, in_channels, in_size, in_size),
expected_shape,
]
TEST_CASE_DYNUNET_2D.append(test_case)
TEST_CASE_DYNUNET_3D = [] # in 3d cases, also test anisotropic kernel/strides
for out_channels in [2, 3]:
for res_block in [True, False]:
in_channels = 1
in_size = 64
expected_shape = (1, out_channels, 64, 32, 64)
test_case = [
{
"spatial_dims": 3,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": (3, (1, 1, 3), 3, 3),
"strides": ((1, 2, 1), 2, 2, 1),
"upsample_kernel_size": (2, 2, 1),
"norm_name": "instance",
"deep_supervision": False,
"res_block": res_block,
},
torch.randn(1, in_channels, in_size, in_size, in_size),
expected_shape,
]
TEST_CASE_DYNUNET_3D.append(test_case)
TEST_CASE_DEEP_SUPERVISION = []
for spatial_dims in [2, 3]:
for res_block in [True, False]:
for deep_supr_num in [1, 2]:
for strides in [(1, 2, 1, 2, 1), (2, 2, 2, 1), (2, 1, 1, 2, 2)]:
test_case = [
{
"spatial_dims": spatial_dims,
"in_channels": 1,
"out_channels": 2,
"kernel_size": [3] * len(strides),
"strides": strides,
"upsample_kernel_size": strides[1:],
"norm_name": "group",
"deep_supervision": True,
"deep_supr_num": deep_supr_num,
"res_block": res_block,
},
torch.randn(1, 1, *[in_size] * spatial_dims),
]
scale = 1
all_expected_shapes = []
for stride in strides[: 1 + deep_supr_num]:
scale *= stride
deep_out_shape = (1, 2, *[in_size // scale] * spatial_dims)
all_expected_shapes.append(deep_out_shape)
test_case.append(all_expected_shapes)
TEST_CASE_DEEP_SUPERVISION.append(test_case)
class TestDynUNet(unittest.TestCase):
@parameterized.expand(TEST_CASE_DYNUNET_2D + TEST_CASE_DYNUNET_3D)
def test_shape(self, input_param, input_data, expected_shape):
net = DynUNet(**input_param)
net.eval()
with torch.no_grad():
result = net(input_data)
self.assertEqual(result.shape, expected_shape)
class TestDynUNetDeepSupervision(unittest.TestCase):
@parameterized.expand(TEST_CASE_DEEP_SUPERVISION)
def test_shape(self, input_param, input_data, expected_shape):
net = DynUNet(**input_param)
with torch.no_grad():
results = net(input_data)
self.assertEqual(len(results), len(expected_shape))
for idx in range(len(results)):
result, sub_expected_shape = results[idx], expected_shape[idx]
self.assertEqual(result.shape, sub_expected_shape)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
762eb5522286793c28ee067dc804473cca9f7b95 | 801f367bd19b8f2ab08669fd0a85aad7ace961ac | /project/experiments/exp_025_pns_start_identity/src/tmp_which_nodes_are_slow_results.py | 2b351dbe039f6593b70e34ad3375078ad22ad97b | [
"MIT"
] | permissive | Wendong-Huo/thesis-bodies | d91b694a6b1b6a911476573ed1ed27eb27fb000d | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | refs/heads/main | 2023-04-17T18:32:38.541537 | 2021-03-12T19:53:23 | 2021-03-12T19:53:23 | 623,471,326 | 1 | 0 | null | 2023-04-04T12:45:48 | 2023-04-04T12:45:47 | null | UTF-8 | Python | false | false | 2,040 | py | import pandas as pd
with open("output_data/tmp/which_nodes_are_slow.txt", "r") as f:
grep_results = f.readlines()
for idx, line in enumerate(grep_results):
if "1785959" in line:
print(grep_results[idx-1])
print(line)
break
# exit(0)
l = len("output_data/tensorboard/")
df_results = pd.read_pickle("output_data/tmp/which_nodes_are_slow")
df_results["node"] = ""
df_results["num_bodies"] = 0
for idx_df, row in df_results.iterrows():
path = row["path"][l:]
df_results.at[idx_df, "path"] = path
df_results.at[idx_df, "num_bodies"] = len(path.split("-"))-3
node = ""
for idx, line in enumerate(grep_results):
if path in line:
job_id = line[:7]
if int(job_id)<1785585 or int(job_id)>1786224:
continue # I started exp_012 several times
_tmp = grep_results[idx-1].split(":")[-1]
node = _tmp.split(".")[0]
break
if node=="":
print("not found.")
else:
df_results.at[idx_df, "node"] = node
df_results = df_results.sort_values(by="node")
df_results.to_csv("output_data/tmp/who_slow.csv")
# df_results = df_results[df_results["path"].str.len()>90]
# print(sorted(df_results["path"].str.len().unique()))
# print(df_results.shape)
# df_results["node_prefix"] = df_results["node"].str.slice(start=0, stop=5)
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# sns.barplot(data=df_results, x="node_prefix", y="min_fps", ax=ax)
sns.barplot(data=df_results, x="node", y="min_fps", ax=ax)
plt.xticks(rotation=45)
# ax1 = ax.twinx()
# ax.set_ylim(0,350)
# ax1.set_ylim(0,350)
# sns.lineplot(x=[-0.5,df_results.shape[0]], y=[34.7,34.7], color="black", ax=ax1)
plt.show()
df_results = df_results.sort_values(by="min_fps")
print(df_results.iloc[0])
# df_slow = df_results[df_results["min_fps"]<80]
# print(df_slow["node"].unique())
# for node in df_slow["node"].unique():
# print(df_results[df_results["node"]==node])
# print(df_results.iloc[-1]) | [
"sliu1@uvm.edu"
] | sliu1@uvm.edu |
aea833987932118bb08607201ce8e15b3c3632c2 | 02b0c773bb2c12088e9a8b6f365318d39766f10d | /python/sparkdl/utils/__init__.py | 459489e1357c778f7e6b8074d745eef6d80bb8a0 | [
"Apache-2.0"
] | permissive | mateiz/spark-deep-learning | ad08a3644d49201695f5654f4cbfa263e9b32e99 | dee8fbff74c807bdbf72edf3588d4d5f64f36b0b | refs/heads/master | 2021-01-21T22:05:22.956166 | 2017-06-20T20:29:16 | 2017-06-20T20:29:16 | 95,158,292 | 21 | 4 | null | 2017-06-22T21:32:09 | 2017-06-22T21:32:08 | null | UTF-8 | Python | false | false | 583 | py | #
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| [
"sueann@databricks.com"
] | sueann@databricks.com |
f4a3e0804a70173539fe6532e76bd4f2271b729f | 0ff6b85d089b619ab0e5f4050a899492dfb69a54 | /Shopping/forms.py | 83d9da8bb809e8614a660efb34bba86ece08934b | [
"MIT"
] | permissive | gaybro8777/EarnWhileShop | 06ff5fecac5909c2e2dcff2bdecf5f26165c777d | 87a512c8f222d84c90cb8c934c1588a7d96c9cce | refs/heads/master | 2021-09-15T14:43:22.533176 | 2018-06-04T18:43:49 | 2018-06-04T18:43:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from django import forms
from Shopping.models import Purchase
class PurchaseForm(forms.ModelForm):
class Meta:
model = Purchase
fields = ['link', 'quantity', 'paytm_number']
def __init__(self, *args, **kwargs):
super(PurchaseForm, self).__init__(*args, **kwargs)
| [
"reetikasingla@Reetikas-MacBook-Pro.local"
] | reetikasingla@Reetikas-MacBook-Pro.local |
98fe52e38140e1691a95e0a3e3e42abfcfd8ead4 | d96289f157e2bbbf6f3560f3cc327e490df84b54 | /exp_configs/__init__.py | c98ed0eabbf9a5cd320bccd9a1242a1ddc6f5ad4 | [] | no_license | IssamLaradji/semantic_segmentation_template | 74e8766ce3265ba7fc9416f9c85811d05dca39f9 | f7286eaafb5d5bc81e2f7d6bb87f6e24db026a08 | refs/heads/main | 2023-08-22T09:53:37.381702 | 2021-10-14T21:45:42 | 2021-10-14T21:45:42 | 417,287,252 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from . import issam_exps, sandra_exps
EXP_GROUPS = {}
EXP_GROUPS.update(issam_exps.EXP_GROUPS)
EXP_GROUPS.update(sandra_exps.EXP_GROUPS)
| [
"issam.laradji@gmail.com"
] | issam.laradji@gmail.com |
afe3a9ba15f4ac055951ce002500dff757f98992 | cca3251c362bbdcd7033c4839466c6afd2c1bbbb | /src/chap13-Serverless/cdk-lambda-dynamodb-fargate/lambda/decimalencoder.py | 192aacfece549673a6b1897f7732e101959f4f29 | [
"MIT"
] | permissive | paiml/python_devops_book | 6dec6929b22042a640b041674b9a07ae271e0262 | 0c8f543aa243d1484deb7f01ffe6876a4176d376 | refs/heads/master | 2023-08-17T01:21:37.838933 | 2023-08-10T12:05:22 | 2023-08-10T12:05:22 | 185,512,695 | 444 | 313 | MIT | 2020-07-20T18:26:14 | 2019-05-08T02:26:06 | Jupyter Notebook | UTF-8 | Python | false | false | 288 | py | import decimal
import json
# This is a workaround for: http://bugs.python.org/issue16535
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return int(obj)
return super(DecimalEncoder, self).default(obj)
| [
"grig@gheorghiu.net"
] | grig@gheorghiu.net |
33324b6f85ce808ce752672c7be977d539d6b991 | 75b0e3f962ad2f2247c0ec663211711446b6f4f4 | /tempotesthaha.py | 4eaff2a135e14f64b4085a04396cadfa12767ae0 | [] | no_license | bomaru83/Diary | 71ee2187da55a39f30fd873cdbf1684a04dc3a93 | ebe1e8fa2df118d51382966e0cd94f7070ebcf2f | refs/heads/master | 2022-12-28T04:00:53.111916 | 2020-09-06T09:02:37 | 2020-09-06T09:02:37 | 380,447,555 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | import os
print() | [
"multirkh@gmail.com"
] | multirkh@gmail.com |
0e5abee4de9cf7ba42739fc0944d99ae5f5e4d29 | 9aef279969fdcede35f6656afb1b0cd2fd3e10e2 | /weather_xml_sax.py | 0dc49400e0983065689ce91d9ce0a97deccca1ac | [] | no_license | namespace123/-Python3Study | 6cd7dfa334157ac67b02279fddbc37ddc78019f8 | b151baa63c4c49ab497cc7bf0de6a841edb97697 | refs/heads/master | 2020-05-04T16:48:29.894204 | 2019-04-11T12:41:32 | 2019-04-11T12:41:32 | 179,288,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,536 | py | # -*- coding:utf-8 -*-
from xml.parsers.expat import ParserCreate
weather_dict = {} # 定义天气字典
which_day = 0 # 哪一天
# 定义解析类 这三个函数在廖雪峰老师xml这一节中介绍了
# 包括三个主要函数:start_element(),end_element(),char_data()
class WeatherSaxHandler(object):
def start_element(self, name, attrs): # 定义start_element函数
global weather_dict, which_day
if name == 'yweather:location': # 判断并获取XML文档中地理位置信息
weather_dict['city'] = attrs['city'] # 将本行XML代码中'city'属性值赋予字典weather_dict中的'city'
weather_dict['country'] = attrs['country'] # 执行结束后此时,weather_dict={'city':'Beijing','country'='China'}
if name == 'yweather:forecast': # 同理获取天气预测信息
which_day += 1 # 第一天天气,获取气温、天气
if which_day == 1:
weather_today = {'text': attrs['text'],
'low': int(attrs['low']),
'high': int(attrs['high'])
}
weather_dict['today'] = weather_today # 此时weather_dict出现二维字典
# weather_dict={'city': 'Beijing', 'country': 'China', 'today': {'text': 'Partly Cloudy', 'low': 20, 'high': 33}}
elif which_day == 2: # 第二天相关信息
weather_today = {
'text': attrs['text'],
'low': int(attrs['low']),
'high': int(attrs['high'])
}
weather_dict['tomorrow'] = weather_today
# weather_dict={'city': 'Beijing', 'country': 'China', 'today': {'text': 'Partly Cloudy', 'low': 20, 'high': 33}, 'tomorrow': {'text': 'Sunny', 'low': 21, 'high': 34}}
def end_element(self, name): # end_element函数
pass
def char_data(self, text): # char_data函数
pass
def parse_weather(xml):
handler = WeatherSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml)
return weather_dict
# XML文档,输出结果的数据来源
# 将XML文档赋值给data
data = r'''<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0" xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#">
<channel>
<title>Yahoo! Weather - Beijing, CN</title>
<lastBuildDate>Wed, 27 May 2015 11:00 am CST</lastBuildDate>
<yweather:location city="Beijing" region="" country="China"/>
<yweather:units temperature="C" distance="km" pressure="mb" speed="km/h"/>
<yweather:wind chill="28" direction="180" speed="14.48" />
<yweather:atmosphere humidity="53" visibility="2.61" pressure="1006.1" rising="0" />
<yweather:astronomy sunrise="4:51 am" sunset="7:32 pm"/>
<item>
<geo:lat>39.91</geo:lat>
<geo:long>116.39</geo:long>
<pubDate>Wed, 27 May 2015 11:00 am CST</pubDate>
<yweather:condition text="Haze" code="21" temp="28" date="Wed, 27 May 2015 11:00 am CST" />
<yweather:forecast day="Wed" date="27 May 2015" low="20" high="33" text="Partly Cloudy" code="30" />
<yweather:forecast day="Thu" date="28 May 2015" low="21" high="34" text="Sunny" code="32" />
<yweather:forecast day="Fri" date="29 May 2015" low="18" high="25" text="AM Showers" code="39" />
<yweather:forecast day="Sat" date="30 May 2015" low="18" high="32" text="Sunny" code="32" />
<yweather:forecast day="Sun" date="31 May 2015" low="20" high="37" text="Sunny" code="32" />
</item>
</channel>
</rss>
'''
# 实例化类
weather = parse_weather(data)
# 检查条件是否为True
assert weather['city'] == 'Beijing', weather['city']
assert weather['country'] == 'China', weather['country']
assert weather['today']['text'] == 'Partly Cloudy', weather['today']['text']
assert weather['today']['low'] == 20, weather['today']['low']
assert weather['today']['high'] == 33, weather['today']['high']
assert weather['tomorrow']['text'] == 'Sunny', weather['tomorrow']['text']
assert weather['tomorrow']['low'] == 21, weather['tomorrow']['low']
assert weather['tomorrow']['high'] == 34, weather['tomorrow']['high']
# 打印到屏幕
print('Weather:', str(weather))
| [
"2319175156@qq.com"
] | 2319175156@qq.com |
226afbd5b0df5e31e93c21680612dff96643d534 | 0afddba1fff35c8045ad12d08a8818182fd0401e | /maya/plug-ins/mtoa_1.2.7.3_maya2015/scripts/arnold/ai_params.py | 161b97e36105a9b67404db7dab2ef685e1026702 | [] | no_license | zhihuijarr/th_repos | b0e648f2b55389970caa878dddebc2cc90328a6e | 013b6c8d6aa510a13c6fa507a457d5df920558fc | refs/heads/master | 2021-01-23T06:26:05.678942 | 2017-06-03T12:49:45 | 2017-06-03T12:49:45 | 93,022,517 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,073 | py |
from ctypes import *
from .arnold_common import ai, NullToNone
from .ai_matrix import *
from .ai_array import *
from .ai_enum import *
from .ai_color import *
from .ai_vector import *
from .ai_types import *
# Parameter types
#
AI_TYPE_BYTE = 0x00 ## Byte (an 8-bit sized unsigned integer)
AI_TYPE_INT = 0x01 ## Integer
AI_TYPE_UINT = 0x02 ## Unsigned integer
AI_TYPE_BOOLEAN = 0x03 ## Boolean (either TRUE or FALSE)
AI_TYPE_FLOAT = 0x04 ## Single-precision floating point number
AI_TYPE_RGB = 0x05 ## RGB struct
AI_TYPE_RGBA = 0x06 ## RGBA struct
AI_TYPE_VECTOR = 0x07 ## XYZ vector
AI_TYPE_POINT = 0x08 ## XYZ point
AI_TYPE_POINT2 = 0x09 ## XY point
AI_TYPE_STRING = 0x0A ## C-style character string
AI_TYPE_POINTER = 0x0B ## Arbitrary pointer
AI_TYPE_NODE = 0x0C ## Pointer to an Arnold node
AI_TYPE_ARRAY = 0x0D ## AtArray
AI_TYPE_MATRIX = 0x0E ## 4x4 matrix
AI_TYPE_ENUM = 0x0F ## Enumeration (see \ref AtEnum)
AI_TYPE_UNDEFINED = 0xFF ## Undefined, you should never encounter a parameter of this type
AI_TYPE_NONE = 0xFF ## No type
# Parameter categories
#
AI_USERDEF_UNDEFINED = 0 ## Undefined, you should never encounter a parameter of this category
AI_USERDEF_CONSTANT = 1 ## User-defined: per-object parameter
AI_USERDEF_UNIFORM = 2 ## User-defined: per-face parameter
AI_USERDEF_VARYING = 3 ## User-defined: per-vertex parameter
AI_USERDEF_INDEXED = 4 ## User-defined: per-face-vertex parameter
class AtParamValue(Union):
_fields_ = [("BYTE", AtByte),
("INT", c_int),
("UINT", c_uint),
("BOOL", c_bool),
("FLT", c_float),
("RGB", AtRGB),
("RGBA", AtRGBA),
("VEC", AtVector),
("PNT", AtPoint),
("PNT2", AtPoint2),
("STR", AtString),
("PTR", c_void_p),
("ARRAY", POINTER(AtArray)),
("pMTX", POINTER(AtMatrix))]
class AtParamEntry(Structure):
pass
_AiParamGetName = ai.AiParamGetName
_AiParamGetName.argtypes = [POINTER(AtParamEntry)]
_AiParamGetName.restype = AtString
def AiParamGetName(param_entry):
return AtStringToStr(_AiParamGetName(param_entry))
AiParamGetType = ai.AiParamGetType
AiParamGetType.argtypes = [POINTER(AtParamEntry)]
AiParamGetType.restype = c_int
_AiParamGetDefault = ai.AiParamGetDefault
_AiParamGetDefault.argtypes = [POINTER(AtParamEntry)]
_AiParamGetDefault.restype = c_void_p
def AiParamGetDefault(pentry):
return NullToNone(_AiParamGetDefault(pentry), POINTER(AtParamValue))
AiParamGetEnum = ai.AiParamGetEnum
AiParamGetEnum.argtypes = [POINTER(AtParamEntry)]
AiParamGetEnum.restype = AtEnum
_AiParamGetTypeName = ai.AiParamGetTypeName
_AiParamGetTypeName.argtypes = [AtByte]
_AiParamGetTypeName.restype = AtString
def AiParamGetTypeName(index):
return AtStringToStr(_AiParamGetTypeName(index))
AiParamGetTypeSize = ai.AiParamGetTypeSize
AiParamGetTypeSize.argtypes = [AtByte]
AiParamGetTypeSize.restype = c_int
class AtUserParamEntry(Structure):
pass
_AiUserParamGetName = ai.AiUserParamGetName
_AiUserParamGetName.argtypes = [POINTER(AtUserParamEntry)]
_AiUserParamGetName.restype = AtString
def AiUserParamGetName(user_param_entry):
return AtStringToStr(_AiUserParamGetName(user_param_entry))
AiUserParamGetType = ai.AiUserParamGetType
AiUserParamGetType.argtypes = [POINTER(AtUserParamEntry)]
AiUserParamGetType.restype = c_int
AiUserParamGetArrayType = ai.AiUserParamGetArrayType
AiUserParamGetArrayType.argtypes = [POINTER(AtUserParamEntry)]
AiUserParamGetArrayType.restype = c_int
AiUserParamGetCategory = ai.AiUserParamGetCategory
AiUserParamGetCategory.argtypes = [POINTER(AtUserParamEntry)]
AiUserParamGetCategory.restype = c_int
AiUserParamGetIndex = ai.AiUserParamGetIndex
AiUserParamGetIndex.argtypes = [POINTER(AtUserParamEntry)]
AiUserParamGetIndex.restype = c_int
| [
"61692940@qq.com"
] | 61692940@qq.com |
f4bff5cd10b131b2a0d7ac0bf7e2d36014f08278 | 6160586aa239eada16e735d40d57970dedbe1dfc | /modules/app_additional/app_customaction/app_customaction_delete.py | c182fad8429e4802d3dfe6058d3c4d97757f8530 | [] | no_license | showgea/AIOT | 7f9ffcd49da54836714b3342232cdba330d11e6c | fe8275aba1c4b5402c7c2c2987509c0ecf49f330 | refs/heads/master | 2020-07-23T10:19:37.478456 | 2019-09-23T12:25:59 | 2019-09-23T12:25:59 | 207,525,184 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | import requests
from config import readcfg
header_Gary = readcfg.header_Gary
header_Jenny = readcfg.header_Jenny
url = readcfg.url
def app_customaction_delete(customActionId):
url_ = url + "/app/v1.0/lumi/app/customaction/delete"
params_ = {
"customActionId": customActionId
}
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'}
print("请求数据:%s" % params_)
r = requests.get(url=url_, params=params_, headers=header_Gary, proxies=proxies, verify=False)
return r
if __name__ == '__main__':
result_main = app_customaction_delete("123")
print(result_main.text)
| [
"tangguobing2011@163.com"
] | tangguobing2011@163.com |
d1361e5603dbcad0458945a81f77ece19988ca14 | 4e59c2444334c67e419dbc97a2fd326115f15555 | /db_orm_models/blocking/presence/browsing_intent_snapshot/methods.py | 64c2c313d23e2bb2069b8e73e40c8bdb2a79cfe0 | [
"MIT"
] | permissive | bbcawodu/nav-online-backend | cebf41fd3373606ac880b1fc4935885d13948c86 | 3085ad686b253ea82478eb2fc365f51dda6d9d96 | refs/heads/master | 2021-01-22T04:44:13.105412 | 2018-08-14T16:40:55 | 2018-08-14T16:40:55 | 102,269,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | def filter_query_obj_by_session_id(query_obj, obj_model, rqst_session_id, list_of_ids):
if isinstance(rqst_session_id, unicode) and rqst_session_id.lower() == "all":
query_obj = query_obj.order_by(obj_model.presence_browsing_session_data_id)
else:
query_obj = query_obj.filter(obj_model.presence_browsing_session_data_id.in_(list_of_ids)).\
order_by(obj_model.presence_browsing_session_data_id)
return query_obj
def filter_query_obj_by_intent(query_obj, obj_model, rqst_intent):
query_obj = query_obj.filter(obj_model.calculated_intent == rqst_intent).order_by(obj_model.presence_browsing_session_data_id)
return query_obj | [
"awodubradley@gmail.com"
] | awodubradley@gmail.com |
a67f9471dfad52bb32c3cf6d82951add395fd2e1 | 023d1e8996a1df353274ed90ca3d152b769fdba5 | /metrics-collector/utils/send_email.py | 18505be3544bbab101a390c09e77796e8d001fce | [] | no_license | vpineda7/cron-metrics | e158d9f6f1d7fe8d6694ff816ef1950ffd9e39b1 | 5d846d76d6b1612d80aefa9984e1fccaf277b617 | refs/heads/master | 2021-04-15T12:10:09.572258 | 2015-06-04T00:11:41 | 2015-06-04T00:11:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.Utils import COMMASPACE, formatdate
from config import get_io_config
env = get_io_config('email')
EMAIL_SERVER, EMAIL_PORT, EMAIL_PASS, EMAIL_USER, EMAIL_DOMAIN = env.EMAIL_SERVER,\
env.EMAIL_PORT, env.EMAIL_PASS, env.EMAIL_USER, env.EMAIL_DOMAIN
def send_email(to, subject, text, user_from, files=[], cc=[], bcc=[],
server=EMAIL_SERVER, port = EMAIL_PORT, user = EMAIL_USER,
password = EMAIL_PASS, domain = EMAIL_DOMAIN):
message = MIMEMultipart()
message['From'] = user_from
message['To'] = COMMASPACE.join(to)
message['Date'] = formatdate(localtime=True)
message['Subject'] = subject
message['Cc'] = COMMASPACE.join(cc)
message.attach(MIMEText(text))
for f in files:
part = MIMEBase('application', 'octet-stream')
part.set_payload(f)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % \
'report.csv')
message.attach(part)
addresses = []
for x in to:
addresses.append(x)
for x in cc:
addresses.append(x)
for x in bcc:
addresses.append(x)
s = smtplib.SMTP_SSL(server, port, domain)
s.login(user, password)
s.sendmail(user_from, addresses, message.as_string())
if __name__ == '__main__':
send_email(['test@test.com'], 'Test Subject', EMAIL_USER, \
files = [], server = EMAIL_SERVER, port = EMAIL_PORT)
| [
"manuel.garrido.pena@gmail.com"
] | manuel.garrido.pena@gmail.com |
640bf576ae100bad50a362e33563bbee122794d6 | f7dbe475e882e6bd74242c6ef286c17f1950727d | /logwebserver.py | d21929a738398a1c76155db5d74970e17f9b045d | [
"MIT"
] | permissive | snowytoxa/logwebserver | d1e1c99bd8f625e382c675b6c0dfa23cdd99e336 | d7089707eb45609251efbb00ebcf2ad66077444e | refs/heads/master | 2021-01-24T12:37:16.348497 | 2018-02-27T15:03:17 | 2018-02-27T15:03:17 | 123,143,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | #!/usr/bin/env python3
'''
Simple webserver that stores POST requests and let you browse it
'''
from datetime import datetime
import logging
import os
import os.path
import tempfile
import warnings
from flask import Flask, request, make_response # pylint: disable=import-error
from flask.exthook import ExtDeprecationWarning # pylint: disable=import-error
warnings.simplefilter('ignore', ExtDeprecationWarning)
from flask_autoindex import AutoIndex # pylint: disable=import-error,wrong-import-position
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)s %(name)s: %(message)s')
LOG = logging.getLogger(__name__)
LOGS_DIR = tempfile.mkdtemp()
FLASK_APP = Flask(__name__)
AutoIndex(FLASK_APP, browse_root=LOGS_DIR)
@FLASK_APP.errorhandler(405)
def log_post(e):
'''
Function will log all incoming POST requests
Args:
e (code or exception): exception
Returns:
str: always returns "OK"
'''
if request.method == 'POST':
path = request.path
logfile = os.path.join(LOGS_DIR,\
os.path.join(path,\
datetime.now().isoformat()).replace('/', '_'))
if request.is_json:
data = request.data
open(logfile + '.json', 'wb').write(data)
else:
data = request.stream.read()
open(logfile + '.bin', 'wb').write(data)
return 'OK %d'%(len(data))
return make_response(e)
if __name__ == '__main__':
LOG.info('Will store all files into %s', LOGS_DIR)
FLASK_APP.run()
| [
"asapozhnikov@spotify.com"
] | asapozhnikov@spotify.com |
d4557af3b7c995ca54723859be3c135e46354a04 | c5e9a375c56344506fceac680a9f892331a6d3c9 | /flask/venv/bin/easy_install-2.7 | bed0b5ad2f21edf4597cacd4326fa715203b69a2 | [] | no_license | JackyGuo1/python-repos | 7bca293ee0c1ee4c75a0ae97a0d60f1c116aac4d | 4e5ea0ff1479694afb6533e44a2398fe74afea95 | refs/heads/master | 2016-09-05T13:56:19.436035 | 2015-01-04T06:47:20 | 2015-01-04T06:47:20 | 26,110,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | 7 | #!/home/jiaqi/flask/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.24','console_scripts','easy_install-2.7'
__requires__ = 'distribute==0.6.24'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.24', 'console_scripts', 'easy_install-2.7')()
)
| [
"leon.k.scott@hotmail.com"
] | leon.k.scott@hotmail.com |
0687859952c1036a7b340e2b2af5c0511016df40 | 9f09ecfed34f5014116a1c7afadec2b9c07e9971 | /example_project/some_modules/third_modules/a53.py | 4271b8389a1366db2940c7006acd0fb466cfcb5a | [
"MIT"
] | permissive | Yuriy-Leonov/cython_imports_limit_issue | a04ce73e8c750f3a61d7aaacaf58665273bf4a49 | 2f9e7c02798fb52185dabfe6ce3811c439ca2839 | refs/heads/master | 2020-09-11T23:57:56.677138 | 2019-11-18T17:48:50 | 2019-11-18T17:51:07 | 222,232,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | class A53:
pass
| [
"tavoseg@gmail.com"
] | tavoseg@gmail.com |
94705d4e2128f06e29bf796bf924325c991c560f | 48b0742def8a12162b272fbf984bc33ec28cb2fb | /magedu/9.上下文.py | 49f327b3a7f767058d17b73d91abc5f7084f8c46 | [] | no_license | AprilJW/python_test | 3e6ddb90827ac6ba9d441861739d04d05cf21813 | 20510c5500d26776e33b101063998776f01f7566 | refs/heads/master | 2020-08-11T00:26:31.141732 | 2019-11-29T14:20:15 | 2019-11-29T14:20:15 | 214,453,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,470 | py | import time
import datetime
def timeit(add):
def wrapper(x, y):
t1 = datetime.datetime.now()
result = add(x, y)
delta = (datetime.datetime.now() - t1).total_seconds()
print(delta)
return result
return wrapper
@timeit #add = wrapper(add)
def add(x, y):
#time.sleep(2)
return x + y
#
# class TimeIt:
# def __init__(self, obj):
# self.obj = obj
#
# #self.calulatetime(obj)
#
# def calulatetime(self, obj):
# t1 = time.time()
# print(obj(1, 2))
# return time.time() - t1
#
#
# def __enter__(self):
# return self.calulatetime(self.obj)
#
# def __exit__(self, exc_type, exc_val, exc_tb):
# pass
#
# with TimeIt(add) as t:
# print(t)
# 方法1
class TimeIt:
def __init__(self, obj=None):
self.obj = obj
def __enter__(self):
self.start = datetime.datetime.now()
return self.obj
def __exit__(self, exc_type, exc_val, exc_tb):
delta = (datetime.datetime.now() - self.start).total_seconds()
print('time:', delta)
# with TimeIt(add):
# add(1, 2)
#print('class decorate')
# 通过类装饰器实现
from functools import update_wrapper, wraps
class TimeIt:
"""
122234
"""
def __init__(self, fn):
self.fn = fn
# self.__name__ = fn.__name__
# self.__doc__ = fn.__doc__
# update_wrapper(self, fn)
wraps(fn)(self)
def __call__(self, *args, **kwargs):
ret = self.fn(*args, **kwargs)
print(ret, self.__name__, self.__doc__)
return ret
@TimeIt # add = TimeIt(add)
def add(x, y):
""" doc add"""
#time.sleep(2)
return x + y
#print(add(1, 2))
# 魔术方法都是存在类的字典中,属于类的方法
# 实例调用类的方法,实例会作为第一参数注入
# 实例调用实例的方法,实例不会作为第一参数注入(无bound)
# 实例的方法,不可以被类的调用
#
# l
# f = open("/Users/jw/PycharmProjects/python_test/magedu/9.上下文.py", encoding='utf8')
# with f as p:
# print(f is p) # True
# print(f == p) # False
#
# class Point:
# def __init__(self):
# pass
#
# def __enter__(self):
# return self # 添加返回值
#
# def __exit__(self, exc_type, exc_val, exc_tb):
# print('1')
# return 123
#
#
# p = Point()
# with p as f:
# raise Exception('Error')
# print('2')
# class TimeIt:
# def __init__(self, fn):
# self.fn = fn
#
# def __call__(self, x, y):
# start = datetime.datetime.now()
# ret = self.fn(x, y)
# print((datetime.datetime.now() - start).total_seconds())
# return ret
#
# @TimeIt # add = TimeTt(add)
# def add(x, y):
# #time.sleep(2)
# return x + y
#
# print(add(1, 2))
# from functools import wraps, update_wrapper
# import time
# import datetime
# def timeit(fn):
# #@wraps(fn) #wraps(fn)(wrapper)
# def wrapper(*args, **kwargs):
# start = datetime.datetime.now()
# ret = fn(*args, **kwargs)
# delta = (datetime.datetime.now() - start).total_seconds()
# update_wrapper(wrapper, fn)
# print(fn.__name__, delta)
# return ret
# return wrapper
#
# @timeit
# def add(x, y):
# # time.sleep(2)
# return x + y
#
# print(add(1, 2))
| [
"1078059455@qq.com"
] | 1078059455@qq.com |
f9f3ba0f86cb31c0305308331904a6391387ee1a | 523703bf78e0199f1ad63f1a103723f1ba2452aa | /my_debugger_defines.py | 3e25f60628cba64df9ee1ff7efa42fa398628854 | [] | no_license | MaskedFox/GrayHatPython | f1ba154b27b362daa2fec4aa2e6793f1cbc81f6c | 4ab762f79c821454fcc686a5a58f1f8de35a324a | refs/heads/master | 2020-08-27T10:44:28.058881 | 2015-04-28T04:28:35 | 2015-04-28T04:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,192 | py | from ctypes import *
# Let's map the Microsoft types to ctypes for clarity
BYTE = c_ubyte
WORD = c_ushort
DWORD = c_ulong
LPBYTE = POINTER(c_ubyte)
LPTSTR = POINTER(c_char)
HANDLE = c_void_p
PVOID = c_void_p
LPVOID = c_void_p
UINT_PTR = c_ulong
SIZE_T = c_ulong
# Constants
DEBUG_PROCESS = 0x00000001
CREATE_NEW_CONSOLE = 0x00000010
PROCESS_ALL_ACCESS = 0x001F0FFF
INFINITE = 0xFFFFFFFF
DBG_CONTINUE = 0x00010002
# Debug event constants
EXCEPTION_DEBUG_EVENT = 0x1
CREATE_THREAD_DEBUG_EVENT = 0x2
CREATE_PROCESS_DEBUG_EVENT = 0x3
EXIT_THREAD_DEBUG_EVENT = 0x4
EXIT_PROCESS_DEBUG_EVENT = 0x5
LOAD_DLL_DEBUG_EVENT = 0x6
UNLOAD_DLL_DEBUG_EVENT = 0x7
OUTPUT_DEBUG_STRING_EVENT = 0x8
RIP_EVENT = 0x9
# debug exception codes.
EXCEPTION_ACCESS_VIOLATION = 0xC0000005
EXCEPTION_BREAKPOINT = 0x80000003
EXCEPTION_GUARD_PAGE = 0x80000001
EXCEPTION_SINGLE_STEP = 0x80000004
# Thread constants for CreateToolhelp32Snapshot()
TH32CS_SNAPHEAPLIST = 0x00000001
TH32CS_SNAPPROCESS = 0x00000002
TH32CS_SNAPTHREAD = 0x00000004
TH32CS_SNAPMODULE = 0x00000008
TH32CS_INHERIT = 0x80000000
TH32CS_SNAPALL = (TH32CS_SNAPHEAPLIST | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD | TH32CS_SNAPMODULE)
THREAD_ALL_ACCESS = 0x001F03FF
# Context flags for GetThreadContext()
CONTEXT_FULL = 0x00010007
CONTEXT_DEBUG_REGISTERS = 0x00010010
# Memory permissions
PAGE_EXECUTE_READWRITE = 0x00000040
# Hardware breakpoint conditions
HW_ACCESS = 0x00000003
HW_EXECUTE = 0x00000000
HW_WRITE = 0x00000001
# Memory page permissions, used by VirtualProtect()
PAGE_NOACCESS = 0x00000001
PAGE_READONLY = 0x00000002
PAGE_READWRITE = 0x00000004
PAGE_WRITECOPY = 0x00000008
PAGE_EXECUTE = 0x00000010
PAGE_EXECUTE_READ = 0x00000020
PAGE_EXECUTE_READWRITE = 0x00000040
PAGE_EXECUTE_WRITECOPY = 0x00000080
PAGE_GUARD = 0x00000100
PAGE_NOCACHE = 0x00000200
PAGE_WRITECOMBINE = 0x00000400
# Structures for CreateProcessA() function
# STARTUPINFO describes how to spawn the process
class STARTUPINFO(Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPTSTR),
("lpDesktop", LPTSTR),
("lpTitle", LPTSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute",DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# PROCESS_INFORMATION receives its information
# after the target process has been successfully
# started.
class PROCESS_INFORMATION(Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# When the dwDebugEventCode is evaluated
class EXCEPTION_RECORD(Structure):
pass
EXCEPTION_RECORD._fields_ = [
("ExceptionCode", DWORD),
("ExceptionFlags", DWORD),
("ExceptionRecord", POINTER(EXCEPTION_RECORD)),
("ExceptionAddress", PVOID),
("NumberParameters", DWORD),
("ExceptionInformation", UINT_PTR * 15),
]
class _EXCEPTION_RECORD(Structure):
_fields_ = [
("ExceptionCode", DWORD),
("ExceptionFlags", DWORD),
("ExceptionRecord", POINTER(EXCEPTION_RECORD)),
("ExceptionAddress", PVOID),
("NumberParameters", DWORD),
("ExceptionInformation", UINT_PTR * 15),
]
# Exceptions
class EXCEPTION_DEBUG_INFO(Structure):
_fields_ = [
("ExceptionRecord", EXCEPTION_RECORD),
("dwFirstChance", DWORD),
]
# it populates this union appropriately
class DEBUG_EVENT_UNION(Union):
_fields_ = [
("Exception", EXCEPTION_DEBUG_INFO),
# ("CreateThread", CREATE_THREAD_DEBUG_INFO),
# ("CreateProcessInfo", CREATE_PROCESS_DEBUG_INFO),
# ("ExitThread", EXIT_THREAD_DEBUG_INFO),
# ("ExitProcess", EXIT_PROCESS_DEBUG_INFO),
# ("LoadDll", LOAD_DLL_DEBUG_INFO),
# ("UnloadDll", UNLOAD_DLL_DEBUG_INFO),
# ("DebugString", OUTPUT_DEBUG_STRING_INFO),
# ("RipInfo", RIP_INFO),
]
# DEBUG_EVENT describes a debugging event
# that the debugger has trapped
class DEBUG_EVENT(Structure):
_fields_ = [
("dwDebugEventCode", DWORD),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
("u", DEBUG_EVENT_UNION),
]
# Used by the CONTEXT structure
class FLOATING_SAVE_AREA(Structure):
_fields_ = [
("ControlWord", DWORD),
("StatusWord", DWORD),
("TagWord", DWORD),
("ErrorOffset", DWORD),
("ErrorSelector", DWORD),
("DataOffset", DWORD),
("DataSelector", DWORD),
("RegisterArea", BYTE * 80),
("Cr0NpxState", DWORD),
]
# The CONTEXT structure which holds all of the
# register values after a GetThreadContext() call
class CONTEXT(Structure):
_fields_ = [
("ContextFlags", DWORD),
("Dr0", DWORD),
("Dr1", DWORD),
("Dr2", DWORD),
("Dr3", DWORD),
("Dr6", DWORD),
("Dr7", DWORD),
("FloatSave", FLOATING_SAVE_AREA),
("SegGs", DWORD),
("SegFs", DWORD),
("SegEs", DWORD),
("SegDs", DWORD),
("Edi", DWORD),
("Esi", DWORD),
("Ebx", DWORD),
("Edx", DWORD),
("Ecx", DWORD),
("Eax", DWORD),
("Ebp", DWORD),
("Eip", DWORD),
("SegCs", DWORD),
("EFlags", DWORD),
("Esp", DWORD),
("SegSs", DWORD),
("ExtendedRegisters", BYTE * 512),
]
# THREADENTRY32 contains information about a thread
# we use this for enumerating all of the system threads
class THREADENTRY32(Structure):
_fields_ = [
("dwSize", DWORD),
("cntUsage", DWORD),
("th32ThreadID", DWORD),
("th32OwnerProcessID", DWORD),
("tpBasePri", DWORD),
("tpDeltaPri", DWORD),
("dwFlags", DWORD),
]
# Supporting struct for the SYSTEM_INFO_UNION union
class PROC_STRUCT(Structure):
_fields_ = [
("wProcessorArchitecture", WORD),
("wReserved", WORD),
]
# Supporting union for the SYSTEM_INFO struct
class SYSTEM_INFO_UNION(Union):
_fields_ = [
("dwOemId", DWORD),
("sProcStruc", PROC_STRUCT),
]
# SYSTEM_INFO structure is populated when a call to
# kernel32.GetSystemInfo() is made. We use the dwPageSize
# member for size calculations when setting memory breakpoints
class SYSTEM_INFO(Structure):
_fields_ = [
("uSysInfo", SYSTEM_INFO_UNION),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", LPVOID),
("lpMaximumApplicationAddress", LPVOID),
("dwActiveProcessorMask", DWORD),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD),
]
# MEMORY_BASIC_INFORMATION contains information about a
# particular region of memory. A call to kernel32.VirtualQuery()
# populates this structure.
class MEMORY_BASIC_INFORMATION(Structure):
_fields_ = [
("BaseAddress", PVOID),
("AllocationBase", PVOID),
("AllocationProtect", DWORD),
("RegionSize", SIZE_T),
("State", DWORD),
("Protect", DWORD),
("Type", DWORD),
]
| [
"grazfather@gmail.com"
] | grazfather@gmail.com |
59700c65c91e5ce73f50aa34b200bbc3f41af236 | 93deecf5c4faf15658ec0739d8e8f852d50bb0d9 | /djangosite/djangosite/wsgi.py | eecc35b59cc6a646755cd1af960f27f663fa833a | [] | no_license | nurnabilarzar/task | fe4e70d417e9ed7ad2585c4cdd3d446a290e57f2 | d308d05f37bab2348efd03642af04892e6896282 | refs/heads/master | 2020-06-10T06:42:19.948871 | 2019-06-25T01:49:57 | 2019-06-25T01:49:57 | 174,452,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for djangosite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangosite.settings')
application = get_wsgi_application()
| [
"nurnabilarzar@gmail.com"
] | nurnabilarzar@gmail.com |
fe67cbd2fbdca0fb9203371b298604412056b63b | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1904/py01/day02/game.py | da175d732f5814bc887260e614a5f974e7b8ad95 | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 778 | py | import random # 导入random模块
# random.choice()从一个序列对象中随机选择一项
computer = random.choice(['石头', '剪刀', '布'])
player = input('请出拳(石头/剪刀/布): ')
print("Your choice: %s, Computer's choice: %s" % (player, computer))
if player == '石头':
if computer == '石头':
print('平局')
elif computer == '剪刀':
print('You WIN!!!')
else:
print('You LOSE!!!')
elif player == '剪刀':
if computer == '石头':
print('You LOSE!!!')
elif computer == '剪刀':
print('平局')
else:
print('You WIN!!!')
else:
if computer == '石头':
print('You WIN!!!')
elif computer == '剪刀':
print('You LOSE!!!')
else:
print('平局')
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
7470470953c66bdaa8f8188857937ce8f7659290 | 7878fe3b15df39aeeab4b4deb5b123e77c77ad6d | /1. Introduction to Deep Reinforcement Learning/monte-carlo/plot_utils.py | 6efc564210fb9726d2ee90247a525dadedc323fa | [] | no_license | st2yang/udacity-deep-reinforcement-learning | 44902094eb4e8403655699a32408022af700173a | 88775432751d1346d912a256b32446e6863e9b72 | refs/heads/master | 2022-04-24T16:44:18.743421 | 2020-04-23T22:04:04 | 2020-04-23T22:04:04 | 246,714,829 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_blackjack_values(V):
def get_Z(x, y, usable_ace):
if (x,y,usable_ace) in V:
return V[x,y,usable_ace]
else:
return 0
def get_figure(usable_ace, ax):
x_range = np.arange(11, 22)
y_range = np.arange(1, 11)
X, Y = np.meshgrid(x_range, y_range)
Z = np.array([get_Z(x,y,usable_ace) for x,y in zip(np.ravel(X), np.ravel(Y))]).reshape(X.shape)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Player\'s Current Sum')
ax.set_ylabel('Dealer\'s Showing Card')
ax.set_zlabel('State Value')
ax.view_init(ax.elev, -120)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(211, projection='3d')
ax.set_title('Usable Ace')
get_figure(True, ax)
ax = fig.add_subplot(212, projection='3d')
ax.set_title('No Usable Ace')
get_figure(False, ax)
plt.show()
def plot_policy(policy):
def get_Z(x, y, usable_ace):
if (x,y,usable_ace) in policy:
return policy[x,y,usable_ace]
else:
return 1
def get_figure(usable_ace, ax):
x_range = np.arange(11, 22)
y_range = np.arange(10, 0, -1)
X, Y = np.meshgrid(x_range, y_range)
Z = np.array([[get_Z(x,y,usable_ace) for x in x_range] for y in y_range])
surf = ax.imshow(Z, cmap=plt.get_cmap('Pastel2', 2), vmin=0, vmax=1, extent=[10.5, 21.5, 0.5, 10.5])
plt.xticks(x_range)
plt.yticks(y_range)
plt.gca().invert_yaxis()
ax.set_xlabel('Player\'s Current Sum')
ax.set_ylabel('Dealer\'s Showing Card')
ax.grid(color='w', linestyle='-', linewidth=1)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(surf, ticks=[0,1], cax=cax)
cbar.ax.set_yticklabels(['0 (STICK)','1 (HIT)'])
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(121)
ax.set_title('Usable Ace')
get_figure(True, ax)
ax = fig.add_subplot(122)
ax.set_title('No Usable Ace')
get_figure(False, ax)
plt.show()
| [
"st2yang@gmail.com"
] | st2yang@gmail.com |
e15797bc9cb101f1d16201755b202b999d36cca0 | a8f91abeeb3898a9fb9525e6c319c500e2f7abba | /Project 5 - Classification/dataClassifier.py | 7ac5c219d40e8f92f71f9f43b5431477ea288a22 | [] | no_license | ThanhThuUet/INT3401-AI1920-Pac-Man-Projects | 28cd954a82c15c9d13b548104c7831bb688c2664 | 8537dae38836ab08468dcfdb1a0b383f9282e854 | refs/heads/master | 2020-07-25T22:59:05.203924 | 2019-11-18T16:47:46 | 2019-11-18T16:47:46 | 208,450,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,090 | py | # dataClassifier.py
# -----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# This file contains feature extraction methods and harness
# code for data classification
import mostFrequent
import naiveBayes
import perceptron
import perceptron_pacman
import mira
import samples
import sys
import util
from pacman import GameState
TEST_SET_SIZE = 100
DIGIT_DATUM_WIDTH=28
DIGIT_DATUM_HEIGHT=28
FACE_DATUM_WIDTH=60
FACE_DATUM_HEIGHT=70
def basicFeatureExtractorDigit(datum):
"""
Returns a set of pixel features indicating whether
each pixel in the provided datum is white (0) or gray/black (1)
"""
a = datum.getPixels()
features = util.Counter()
for x in range(DIGIT_DATUM_WIDTH):
for y in range(DIGIT_DATUM_HEIGHT):
if datum.getPixel(x, y) > 0:
features[(x,y)] = 1
else:
features[(x,y)] = 0
return features
def basicFeatureExtractorFace(datum):
"""
Returns a set of pixel features indicating whether
each pixel in the provided datum is an edge (1) or no edge (0)
"""
a = datum.getPixels()
features = util.Counter()
for x in range(FACE_DATUM_WIDTH):
for y in range(FACE_DATUM_HEIGHT):
if datum.getPixel(x, y) > 0:
features[(x,y)] = 1
else:
features[(x,y)] = 0
return features
def enhancedFeatureExtractorDigit(datum):
"""
Your feature extraction playground.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
##
"""
features = basicFeatureExtractorDigit(datum)
dark_space = checkDarkSpace(datum)
for i in xrange(10):
if i == dark_space:
features[i] = 1
else:
features[i] = 0
return features
def checkDarkSpace(datum):
visitedList = []
count = 0
for x in range(DIGIT_DATUM_WIDTH):
for y in range(DIGIT_DATUM_HEIGHT):
if datum.getPixel(x,y) == 0:
if (x,y) not in visitedList:
count = count + 1
darkSpaceHelper((x,y), visitedList, datum)
return count
def darkSpaceHelper(pixel, visitedList, datum):
if pixel in visitedList:
return
visitedList.append(pixel)
for neighbor in getPixelNeighbors(pixel):
if datum.getPixel(neighbor[0], neighbor[1]) == 0:
darkSpaceHelper(neighbor, visitedList, datum)
else:
return
def getPixelNeighbors(pixel):
x = pixel[0]
y = pixel[1]
neighbors = []
if x+1 in range(DIGIT_DATUM_WIDTH) and y-1 in range(DIGIT_DATUM_HEIGHT):
neighbors.append((x+1, y-1))
if x+1 in range(DIGIT_DATUM_WIDTH) and y+1 in range(DIGIT_DATUM_HEIGHT):
neighbors.append((x+1, y+1))
if x-1 in range(DIGIT_DATUM_WIDTH) and y-1 in range(DIGIT_DATUM_HEIGHT):
neighbors.append((x-1, y-1))
if x-1 in range(DIGIT_DATUM_WIDTH) and y+1 in range(DIGIT_DATUM_HEIGHT):
neighbors.append((x-1, y+1))
return neighbors
def basicFeatureExtractorPacman(state):
"""
A basic feature extraction function.
You should return a util.Counter() of features
for each (state, action) pair along with a list of the legal actions
##
"""
features = util.Counter()
for action in state.getLegalActions():
successor = state.generateSuccessor(0, action)
foodCount = successor.getFood().count()
featureCounter = util.Counter()
featureCounter['foodCount'] = foodCount
features[action] = featureCounter
return features, state.getLegalActions()
def enhancedFeatureExtractorPacman(state):
"""
Your feature extraction playground.
You should return a util.Counter() of features
for each (state, action) pair along with a list of the legal actions
##
"""
features = basicFeatureExtractorPacman(state)[0]
for action in state.getLegalActions():
features[action] = util.Counter(features[action], **enhancedPacmanFeatures(state, action))
return features, state.getLegalActions()
def enhancedPacmanFeatures(state, action):
"""
For each state, this function is called with each legal action.
It should return a counter with { <feature name> : <feature value>, ... }
"""
features = util.Counter()
Successor = state.generateSuccessor(0, action)
GhostPositions = Successor.getGhostPositions()
PacmanPosition = Successor.getPacmanPosition()
Capsules = Successor.getCapsules()
foodState = state.getFood()
food = [(a, b) for a, row in enumerate(foodState) for b, food in enumerate(row) if food]
closest_ghosts = sorted([util.manhattanDistance(PacmanPosition, i) for i in GhostPositions])
closest_capsules = sorted([util.manhattanDistance(PacmanPosition, i) for i in Capsules])
closest_food = sorted([util.manhattanDistance(PacmanPosition, i) for i in food])
for i in xrange(min(len(closest_ghosts), 1)):
features[("ghost", i)] = 1 / (0.1 + closest_ghosts[i])
for i in xrange(min(len(closest_capsules), 1)):
features[("capsule", i)] = 10 / (1 + closest_capsules[i])
for i in xrange(min(len(closest_food), 5)):
if i < 2 :
features[("food", i)] = 1.5 * closest_food[i]
else:
features[("food", i)] = 0.5 * closest_food[i]
return features
def contestFeatureExtractorDigit(datum):
"""
Specify features to use for the minicontest
"""
features = basicFeatureExtractorDigit(datum)
return features
def enhancedFeatureExtractorFace(datum):
"""
Your feature extraction playground for faces.
It is your choice to modify this.
"""
features = basicFeatureExtractorFace(datum)
return features
def analysis(classifier, guesses, testLabels, testData, rawTestData, printImage):
"""
This function is called after learning.
Include any code that you want here to help you analyze your results.
Use the printImage(<list of pixels>) function to visualize features.
An example of use has been given to you.
- classifier is the trained classifier
- guesses is the list of labels predicted by your classifier on the test set
- testLabels is the list of true labels
- testData is the list of training datapoints (as util.Counter of features)
- rawTestData is the list of training datapoints (as samples.Datum)
- printImage is a method to visualize the features
(see its use in the odds ratio part in runClassifier method)
This code won't be evaluated. It is for your own optional use
(and you can modify the signature if you want).
"""
# Put any code here...
# Example of use:
# for i in range(len(guesses)):
# prediction = guesses[i]
# truth = testLabels[i]
# if (prediction != truth):
# print "==================================="
# print "Mistake on example %d" % i
# print "Predicted %d; truth is %d" % (prediction, truth)
# print "Image: "
# print rawTestData[i]
# break
## =====================
## You don't have to modify any code below.
## =====================
class ImagePrinter:
def __init__(self, width, height):
self.width = width
self.height = height
def printImage(self, pixels):
"""
Prints a Datum object that contains all pixels in the
provided list of pixels. This will serve as a helper function
to the analysis function you write.
Pixels should take the form
[(2,2), (2, 3), ...]
where each tuple represents a pixel.
"""
image = samples.Datum(None,self.width,self.height)
for pix in pixels:
try:
# This is so that new features that you could define which
# which are not of the form of (x,y) will not break
# this image printer...
x,y = pix
image.pixels[x][y] = 2
except:
print "new features:", pix
continue
print image
def default(str):
return str + ' [Default: %default]'
USAGE_STRING = """
USAGE: python dataClassifier.py <options>
EXAMPLES: (1) python dataClassifier.py
- trains the default mostFrequent classifier on the digit dataset
using the default 100 training examples and
then test the classifier on test data
(2) python dataClassifier.py -c naiveBayes -d digits -t 1000 -f -o -1 3 -2 6 -k 2.5
- would run the naive Bayes classifier on 1000 training examples
using the enhancedFeatureExtractorDigits function to get the features
on the faces dataset, would use the smoothing parameter equals to 2.5, would
test the classifier on the test data and performs an odd ratio analysis
with label1=3 vs. label2=6
"""
def readCommand( argv ):
"Processes the command used to run from the command line."
from optparse import OptionParser
parser = OptionParser(USAGE_STRING)
parser.add_option('-c', '--classifier', help=default('The type of classifier'), choices=['mostFrequent', 'nb', 'naiveBayes', 'perceptron', 'mira', 'minicontest'], default='mostFrequent')
parser.add_option('-d', '--data', help=default('Dataset to use'), choices=['digits', 'faces', 'pacman'], default='digits')
parser.add_option('-t', '--training', help=default('The size of the training set'), default=100, type="int")
parser.add_option('-f', '--features', help=default('Whether to use enhanced features'), default=False, action="store_true")
parser.add_option('-o', '--odds', help=default('Whether to compute odds ratios'), default=False, action="store_true")
parser.add_option('-1', '--label1', help=default("First label in an odds ratio comparison"), default=0, type="int")
parser.add_option('-2', '--label2', help=default("Second label in an odds ratio comparison"), default=1, type="int")
parser.add_option('-w', '--weights', help=default('Whether to print weights'), default=False, action="store_true")
parser.add_option('-k', '--smoothing', help=default("Smoothing parameter (ignored when using --autotune)"), type="float", default=2.0)
parser.add_option('-a', '--autotune', help=default("Whether to automatically tune hyperparameters"), default=False, action="store_true")
parser.add_option('-i', '--iterations', help=default("Maximum iterations to run training"), default=3, type="int")
parser.add_option('-s', '--test', help=default("Amount of test data to use"), default=TEST_SET_SIZE, type="int")
parser.add_option('-g', '--agentToClone', help=default("Pacman agent to copy"), default=None, type="str")
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0: raise Exception('Command line input not understood: ' + str(otherjunk))
args = {}
# Set up variables according to the command line input.
print "Doing classification"
print "--------------------"
print "data:\t\t" + options.data
print "classifier:\t\t" + options.classifier
if not options.classifier == 'minicontest':
print "using enhanced features?:\t" + str(options.features)
else:
print "using minicontest feature extractor"
print "training set size:\t" + str(options.training)
if(options.data=="digits"):
printImage = ImagePrinter(DIGIT_DATUM_WIDTH, DIGIT_DATUM_HEIGHT).printImage
if (options.features):
featureFunction = enhancedFeatureExtractorDigit
else:
featureFunction = basicFeatureExtractorDigit
if (options.classifier == 'minicontest'):
featureFunction = contestFeatureExtractorDigit
elif(options.data=="faces"):
printImage = ImagePrinter(FACE_DATUM_WIDTH, FACE_DATUM_HEIGHT).printImage
if (options.features):
featureFunction = enhancedFeatureExtractorFace
else:
featureFunction = basicFeatureExtractorFace
elif(options.data=="pacman"):
printImage = None
if (options.features):
featureFunction = enhancedFeatureExtractorPacman
else:
featureFunction = basicFeatureExtractorPacman
else:
print "Unknown dataset", options.data
print USAGE_STRING
sys.exit(2)
if(options.data=="digits"):
legalLabels = range(10)
else:
legalLabels = ['Stop', 'West', 'East', 'North', 'South']
if options.training <= 0:
print "Training set size should be a positive integer (you provided: %d)" % options.training
print USAGE_STRING
sys.exit(2)
if options.smoothing <= 0:
print "Please provide a positive number for smoothing (you provided: %f)" % options.smoothing
print USAGE_STRING
sys.exit(2)
if options.odds:
if options.label1 not in legalLabels or options.label2 not in legalLabels:
print "Didn't provide a legal labels for the odds ratio: (%d,%d)" % (options.label1, options.label2)
print USAGE_STRING
sys.exit(2)
if(options.classifier == "mostFrequent"):
classifier = mostFrequent.MostFrequentClassifier(legalLabels)
elif(options.classifier == "naiveBayes" or options.classifier == "nb"):
classifier = naiveBayes.NaiveBayesClassifier(legalLabels)
classifier.setSmoothing(options.smoothing)
if (options.autotune):
print "using automatic tuning for naivebayes"
classifier.automaticTuning = True
else:
print "using smoothing parameter k=%f for naivebayes" % options.smoothing
elif(options.classifier == "perceptron"):
if options.data != 'pacman':
classifier = perceptron.PerceptronClassifier(legalLabels,options.iterations)
else:
classifier = perceptron_pacman.PerceptronClassifierPacman(legalLabels,options.iterations)
elif(options.classifier == "mira"):
if options.data != 'pacman':
classifier = mira.MiraClassifier(legalLabels, options.iterations)
if (options.autotune):
print "using automatic tuning for MIRA"
classifier.automaticTuning = True
else:
print "using default C=0.001 for MIRA"
elif(options.classifier == 'minicontest'):
import minicontest
classifier = minicontest.contestClassifier(legalLabels)
else:
print "Unknown classifier:", options.classifier
print USAGE_STRING
sys.exit(2)
args['agentToClone'] = options.agentToClone
args['classifier'] = classifier
args['featureFunction'] = featureFunction
args['printImage'] = printImage
return args, options
# Dictionary containing full path to .pkl file that contains the agent's training, validation, and testing data.
MAP_AGENT_TO_PATH_OF_SAVED_GAMES = {
'FoodAgent': ('pacmandata/food_training.pkl','pacmandata/food_validation.pkl','pacmandata/food_test.pkl' ),
'StopAgent': ('pacmandata/stop_training.pkl','pacmandata/stop_validation.pkl','pacmandata/stop_test.pkl' ),
'SuicideAgent': ('pacmandata/suicide_training.pkl','pacmandata/suicide_validation.pkl','pacmandata/suicide_test.pkl' ),
'GoodReflexAgent': ('pacmandata/good_reflex_training.pkl','pacmandata/good_reflex_validation.pkl','pacmandata/good_reflex_test.pkl' ),
'ContestAgent': ('pacmandata/contest_training.pkl','pacmandata/contest_validation.pkl', 'pacmandata/contest_test.pkl' )
}
# Main harness code
def runClassifier(args, options):
featureFunction = args['featureFunction']
classifier = args['classifier']
printImage = args['printImage']
# Load data
numTraining = options.training
numTest = options.test
if(options.data=="pacman"):
agentToClone = args.get('agentToClone', None)
trainingData, validationData, testData = MAP_AGENT_TO_PATH_OF_SAVED_GAMES.get(agentToClone, (None, None, None))
trainingData = trainingData or args.get('trainingData', False) or MAP_AGENT_TO_PATH_OF_SAVED_GAMES['ContestAgent'][0]
validationData = validationData or args.get('validationData', False) or MAP_AGENT_TO_PATH_OF_SAVED_GAMES['ContestAgent'][1]
testData = testData or MAP_AGENT_TO_PATH_OF_SAVED_GAMES['ContestAgent'][2]
rawTrainingData, trainingLabels = samples.loadPacmanData(trainingData, numTraining)
rawValidationData, validationLabels = samples.loadPacmanData(validationData, numTest)
rawTestData, testLabels = samples.loadPacmanData(testData, numTest)
else:
rawTrainingData = samples.loadDataFile("digitdata/trainingimages", numTraining,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)
trainingLabels = samples.loadLabelsFile("digitdata/traininglabels", numTraining)
rawValidationData = samples.loadDataFile("digitdata/validationimages", numTest,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)
validationLabels = samples.loadLabelsFile("digitdata/validationlabels", numTest)
rawTestData = samples.loadDataFile("digitdata/testimages", numTest,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)
testLabels = samples.loadLabelsFile("digitdata/testlabels", numTest)
# Extract features
print "Extracting features..."
trainingData = map(featureFunction, rawTrainingData)
validationData = map(featureFunction, rawValidationData)
testData = map(featureFunction, rawTestData)
# Conduct training and testing
print "Training..."
classifier.train(trainingData, trainingLabels, validationData, validationLabels)
print "Validating..."
guesses = classifier.classify(validationData)
correct = [guesses[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
print str(correct), ("correct out of " + str(len(validationLabels)) + " (%.1f%%).") % (100.0 * correct / len(validationLabels))
print "Testing..."
guesses = classifier.classify(testData)
correct = [guesses[i] == testLabels[i] for i in range(len(testLabels))].count(True)
print str(correct), ("correct out of " + str(len(testLabels)) + " (%.1f%%).") % (100.0 * correct / len(testLabels))
analysis(classifier, guesses, testLabels, testData, rawTestData, printImage)
# do odds ratio computation if specified at command line
if((options.odds) & (options.classifier == "naiveBayes" or (options.classifier == "nb")) ):
label1, label2 = options.label1, options.label2
features_odds = classifier.findHighOddsFeatures(label1,label2)
if(options.classifier == "naiveBayes" or options.classifier == "nb"):
string3 = "=== Features with highest odd ratio of label %d over label %d ===" % (label1, label2)
else:
string3 = "=== Features for which weight(label %d)-weight(label %d) is biggest ===" % (label1, label2)
print string3
printImage(features_odds)
if((options.weights) & (options.classifier == "perceptron")):
for l in classifier.legalLabels:
features_weights = classifier.findHighWeightFeatures(l)
print ("=== Features with high weight for label %d ==="%l)
printImage(features_weights)
if __name__ == '__main__':
# Read input
args, options = readCommand( sys.argv[1:] )
# Run classifier
runClassifier(args, options)
| [
"thanhthu97uet@gmai.com"
] | thanhthu97uet@gmai.com |
3b9ad6f19a771db88721d9a80879d06260a092fb | 0de146cde9cbbb8c731f5b923025e7087beb3d87 | /chaines.py | ac3dd7142e58a26a3e7754b122faaba9f4842fa7 | [] | no_license | Dante126/learnpython | d0a97c467c5476db328946a331883cf3848fa4aa | b47a695bbda20f8b37e73da54b7790a5e7af6dfd | refs/heads/master | 2021-01-13T21:25:28.791368 | 2020-02-23T11:10:27 | 2020-02-23T11:10:27 | 242,499,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py |
def compte_car(chaine):
compteur = 0
for c in chaine:
compteur+=1
return compteur
def compte_voyelles(chaine):
voyelles = ['a', 'e', 'i', 'o', 'u', 'y']
compteur = 0
for c in chaine:
if c in voyelles:
compteur+=1
return compteur
def compte_espaces(chaine):
compteur = 0
for c in chaine:
if c == ' ':
compteur += 1
return compteur
def remplace_espaces(chaine, car):
chaine2 = ""
for c in chaine:
if c == " ":
chaine2 += car
else:
chaine2 += c
return chaine2
chaine = "moi c'est Diallo, moi c'est Diallo"
print(compte_car(chaine))
print(compte_voyelles(chaine))
print(compte_espaces(chaine))
chaine2 = remplace_espaces(chaine, '-')
print(chaine2)
| [
""
] | |
5493b3064d9dd52325a5cf37847a6698fe1676b3 | a079f2221d810291659601c570f87a2a65898404 | /controller.py | d31a870823ad5553540838bb13025ecd966129b5 | [] | no_license | tchatzis/xbox | cad6f8bf33afea6b4089d94905f3260f90ec45d5 | 5bd8960443f8e873a72fba22a8c5d16a2f44972d | refs/heads/master | 2021-01-22T01:05:00.307467 | 2015-01-15T06:04:49 | 2015-01-15T06:04:49 | 29,283,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,044 | py | import ctypes
import sys
import time
from operator import itemgetter, attrgetter
from itertools import count, starmap
from pyglet import event
# structs according to
# http://msdn.microsoft.com/en-gb/library/windows/desktop/ee417001%28v=vs.85%29.aspx
class XINPUT_GAMEPAD(ctypes.Structure):
_fields_ = [
('buttons', ctypes.c_ushort), # wButtons
('left_trigger', ctypes.c_ubyte), # bLeftTrigger
('right_trigger', ctypes.c_ubyte), # bLeftTrigger
('l_thumb_x', ctypes.c_short), # sThumbLX
('l_thumb_y', ctypes.c_short), # sThumbLY
('r_thumb_x', ctypes.c_short), # sThumbRx
('r_thumb_y', ctypes.c_short), # sThumbRy
]
class XINPUT_STATE(ctypes.Structure):
_fields_ = [
('packet_number', ctypes.c_ulong), # dwPacketNumber
('gamepad', XINPUT_GAMEPAD), # Gamepad
]
class XINPUT_VIBRATION(ctypes.Structure):
_fields_ = [("wLeftMotorSpeed", ctypes.c_ushort),
("wRightMotorSpeed", ctypes.c_ushort)]
xinput = ctypes.windll.xinput9_1_0 # this is the Win 8 version ?
# xinput1_2, xinput1_1 (32-bit Vista SP1)
# xinput1_3 (64-bit Vista SP1)
def struct_dict(struct):
"""
take a ctypes.Structure and return its field/value pairs
as a dict.
>>> 'buttons' in struct_dict(XINPUT_GAMEPAD)
True
>>> struct_dict(XINPUT_GAMEPAD)['buttons'].__class__.__name__
'CField'
"""
get_pair = lambda field_type: (
field_type[0], getattr(struct, field_type[0]))
return dict(list(map(get_pair, struct._fields_)))
def get_bit_values(number, size=32):
"""
Get bit values as a list for a given number
>>> get_bit_values(1) == [0]*31 + [1]
True
>>> get_bit_values(0xDEADBEEF)
[1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L]
You may override the default word size of 32-bits to match your actual
application.
>>> get_bit_values(0x3, 2)
[1L, 1L]
>>> get_bit_values(0x3, 4)
[0L, 0L, 1L, 1L]
"""
res = list(gen_bit_values(number))
res.reverse()
# 0-pad the most significant bit
res = [0] * (size - len(res)) + res
return res
def gen_bit_values(number):
"""
Return a zero or one for each bit of a numeric value up to the most
significant 1 bit, beginning with the least significant bit.
"""
number = int(number)
while number:
yield number & 0x1
number >>= 1
ERROR_DEVICE_NOT_CONNECTED = 1167
ERROR_SUCCESS = 0
class XInputJoystick(event.EventDispatcher):
"""
XInputJoystick
A stateful wrapper, using pyglet event model, that binds to one
XInput device and dispatches events when states change.
Example:
controller_one = XInputJoystick(0)
"""
max_devices = 4
def __init__(self, device_number, normalize_axes=True):
values = vars()
del values['self']
self.__dict__.update(values)
super(XInputJoystick, self).__init__()
self._last_state = self.get_state()
self.received_packets = 0
self.missed_packets = 0
# Set the method that will be called to normalize
# the values for analog axis.
choices = [self.translate_identity, self.translate_using_data_size]
self.translate = choices[normalize_axes]
def translate_using_data_size(self, value, data_size):
# normalizes analog data to [0,1] for unsigned data
# and [-0.5,0.5] for signed data
data_bits = 8 * data_size
return float(value) / (2 ** data_bits - 1)
def translate_identity(self, value, data_size=None):
return value
def get_state(self):
"Get the state of the controller represented by this object"
state = XINPUT_STATE()
res = xinput.XInputGetState(self.device_number, ctypes.byref(state))
if res == ERROR_SUCCESS:
return state
if res != ERROR_DEVICE_NOT_CONNECTED:
raise RuntimeError(
"Unknown error %d attempting to get state of device %d" % (res, self.device_number))
# else return None (device is not connected)
def is_connected(self):
return self._last_state is not None
@staticmethod
def enumerate_devices():
"Returns the devices that are connected"
devices = list(
map(XInputJoystick, list(range(XInputJoystick.max_devices))))
return [d for d in devices if d.is_connected()]
def set_vibration(self, left_motor, right_motor):
"Control the speed of both motors seperately"
# Set up function argument types and return type
XInputSetState = xinput.XInputSetState
XInputSetState.argtypes = [ctypes.c_uint, ctypes.POINTER(XINPUT_VIBRATION)]
XInputSetState.restype = ctypes.c_uint
vibration = XINPUT_VIBRATION(
int(left_motor * 65535), int(right_motor * 65535))
XInputSetState(self.device_number, ctypes.byref(vibration))
def dispatch_events(self):
"The main event loop for a joystick"
state = self.get_state()
if not state:
raise RuntimeError(
"Joystick %d is not connected" % self.device_number)
if state.packet_number != self._last_state.packet_number:
# state has changed, handle the change
self.update_packet_count(state)
self.handle_changed_state(state)
self._last_state = state
def update_packet_count(self, state):
"Keep track of received and missed packets for performance tuning"
self.received_packets += 1
missed_packets = state.packet_number - \
self._last_state.packet_number - 1
if missed_packets:
self.dispatch_event('on_missed_packet', missed_packets)
self.missed_packets += missed_packets
def handle_changed_state(self, state):
"Dispatch various events as a result of the state changing"
self.dispatch_event('on_state_changed', state)
self.dispatch_axis_events(state)
self.dispatch_button_events(state)
def dispatch_axis_events(self, state):
# axis fields are everything but the buttons
axis_fields = dict(XINPUT_GAMEPAD._fields_)
axis_fields.pop('buttons')
for axis, type in list(axis_fields.items()):
old_val = getattr(self._last_state.gamepad, axis)
new_val = getattr(state.gamepad, axis)
data_size = ctypes.sizeof(type)
old_val = self.translate(old_val, data_size)
new_val = self.translate(new_val, data_size)
# an attempt to add deadzones and dampen noise
# done by feel rather than following http://msdn.microsoft.com/en-gb/library/windows/desktop/ee417001%28v=vs.85%29.aspx#dead_zone
# ags, 2014-07-01
if ((old_val != new_val and (new_val > 0.08000000000000000 or new_val < -0.08000000000000000) and abs(old_val - new_val) > 0.00000000500000000) or
(axis == 'right_trigger' or axis == 'left_trigger') and new_val == 0 and abs(old_val - new_val) > 0.00000000500000000):
self.dispatch_event('on_axis', axis, new_val)
def dispatch_button_events(self, state):
changed = state.gamepad.buttons ^ self._last_state.gamepad.buttons
changed = get_bit_values(changed, 16)
buttons_state = get_bit_values(state.gamepad.buttons, 16)
changed.reverse()
buttons_state.reverse()
button_numbers = count(1)
changed_buttons = list(
filter(itemgetter(0), list(zip(changed, button_numbers, buttons_state))))
tuple(starmap(self.dispatch_button_event, changed_buttons))
def dispatch_button_event(self, changed, number, pressed):
self.dispatch_event('on_button', number, pressed)
# stub methods for event handlers
def on_state_changed(self, state):
pass
def on_axis(self, axis, value):
pass
def on_button(self, button, pressed):
pass
def on_missed_packet(self, number):
pass
list(map(XInputJoystick.register_event_type, [
'on_state_changed',
'on_axis',
'on_button',
'on_missed_packet',
]))
def determine_optimal_sample_rate(joystick=None):
"""
Poll the joystick slowly (beginning at 1 sample per second)
and monitor the packet stream for missed packets, indicating
that the sample rate is too slow to avoid missing packets.
Missed packets will translate to a lost information about the
joystick state.
As missed packets are registered, increase the sample rate until
the target reliability is reached.
"""
# in my experience, you want to probe at 200-2000Hz for optimal
# performance
if joystick is None:
joystick = XInputJoystick.enumerate_devices()[0]
j = joystick
print("Move the joystick or generate button events characteristic of your app")
print("Hit Ctrl-C or press button 6 (<, Back) to quit.")
# here I use the joystick object to store some state data that
# would otherwise not be in scope in the event handlers
# begin at 1Hz and work up until missed messages are eliminated
j.probe_frequency = 1 # Hz
j.quit = False
j.target_reliability = .99 # okay to lose 1 in 100 messages
@j.event
def on_button(button, pressed):
# flag the process to quit if the < button ('back') is pressed.
j.quit = (button == 6 and pressed)
@j.event
def on_missed_packet(number):
print('missed %(number)d packets' % vars())
total = j.received_packets + j.missed_packets
reliability = j.received_packets / float(total)
if reliability < j.target_reliability:
j.missed_packets = j.received_packets = 0
j.probe_frequency *= 1.5
while not j.quit:
j.dispatch_events()
time.sleep(1.0 / j.probe_frequency)
print("final probe frequency was %s Hz" % j.probe_frequency)
def sample_first_joystick():
"""
Grab 1st available gamepad, logging changes to the screen.
L & R analogue triggers set the vibration motor speed.
"""
joysticks = XInputJoystick.enumerate_devices()
device_numbers = list(map(attrgetter('device_number'), joysticks))
print('found %d devices: %s' % (len(joysticks), device_numbers))
if not joysticks:
sys.exit(0)
j = joysticks[0]
print('using %d' % j.device_number)
@j.event
def on_button(button, pressed):
print('button', button, pressed)
left_speed = 0
right_speed = 0
@j.event
def on_axis(axis, value):
left_speed = 0
right_speed = 0
print('axis', axis, value)
if axis == "left_trigger":
left_speed = value
elif axis == "right_trigger":
right_speed = value
j.set_vibration(left_speed, right_speed)
while True:
j.dispatch_events()
time.sleep(.01)
if __name__ == "__main__":
sample_first_joystick()
| [
"tito.chatzis@gmail.com"
] | tito.chatzis@gmail.com |
f17c25ba0925b314dae43bd4723d1a285cc2c8d4 | 6f8540ba015c224827739ed8f06f3b313be8cda4 | /src/main.py | 493e500cc6b1b7ae21cdc51f65794a992682de0a | [] | no_license | qwteng/Eagle | f84cda7ffb64c5c08029fc807da7892c968633ea | b714ef32c066524c8ee3e561df8237eca40750ca | refs/heads/master | 2020-04-12T19:58:33.447621 | 2016-12-04T09:04:39 | 2016-12-04T09:04:39 | 2,213,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | # coding=utf-8
import sys
import time
from pymongo import MongoClient
from util.parser import *
from util import utils
import json
reload(sys)
sys.setdefaultencoding('utf-8')
def get_stocklist():
url_stocklist = 'http://quote.eastmoney.com/stocklist.html'
dt = DataSource()
list_parser = SockListParser()
content = dt.crawl(url_stocklist)
stock_list = list_parser.parse(content)
return stock_list
def get_stockdata(stockcode):
if stockcode[0] not in '036':
return None
stock = Stock()
stock_data = None
try:
stock_data = stock.parse(code)
except:
pas
return stock_data
def get_holders(stockdata):
if stockdata is None:
return None
rslt = []
basic = {}
basic['code'] = stockdata['code']
# basic['name'] = stockdata['name']
basic['net'] = stockdata['net']
basic['earnings'] = stockdata['earnings']
basic['cash'] = stockdata['cash']
holders = stockdata[u'十大流通股东']
for holder in holders:
h = {}
h.update(basic)
h.update(holder)
md5 = utils.md5(str(h))
h['md5'] = md5
rslt.append(h)
return rslt
def main():
timestr = time.strftime("%Y%m%d%H%M%S", time.localtime())
client = MongoClient()
stockdb = client['stock']
c_stockinfo = stockdb['stockinfo']
c_holders = stockdb['stockholders']
stock_list = get_stocklist()
print(stock_list)
c_stockinfo.insert(stock_list)
stock = Stock()
st_list = c_stockinfo.find()
for st in st_list:
if not st.has_key('code'):
continue
code = st['code']
print code
try:
stock_info = stock.parse(code)
except:
continue
c_stockinfo.update({'code':code}, {"$set":stock_info})
try:
holders = get_holders(stock_info)
except:
continue
for h in holders:
r = c_holders.find_one({'md5':h['md5']})
if r is None:
c_holders.insert(h)
def main_file():
output = open('rslt.txt','w+')
stock_list = get_stocklist()
#print(stock_list)
stock = Stock()
for st in stock_list:
if not st.has_key('code'):
continue
code = st['code']
print code
try:
stock_info = stock.parse(code)
except:
continue
#print(stock_info)
try:
holders = get_holders(stock_info)
except:
continue
for h in holders:
s = {}
holder = {}
s['code'] = h['code']
holder['date'] = h['date']
holder['holdername'] = h['name']
holder['number'] = h['account']
holder['rate'] = h['rate']
holder['change'] = h['change']
s['holder'] = holder
output.write(json.dumps(s, ensure_ascii=False).decode('utf8')+"\n")
if __name__ == "__main__":
main_file()
| [
"qwteng@qq.com"
] | qwteng@qq.com |
aa6592a35e47611f56c8d2d3ecc1af822bb35f40 | c33e52d03deb509952e4dfe0ebe8ce535d076113 | /validationservice.py | 34509025b69787d99d7b4127b5e253b28278d9e1 | [] | no_license | timonology/paymentprocess | 246fa3a2aa83a14758b4993fcc3dfa8c2dc638d7 | 96b47c681753ad97239c08c7bb58cc465c8ba217 | refs/heads/master | 2023-02-25T14:15:06.264760 | 2021-01-26T17:25:03 | 2021-01-26T17:25:03 | 333,157,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | import creditcardvalidator
from datetime import datetime
def creditcard_checker(card_number):
response = creditcardvalidator.ValidateCard(card_number)
print(response)
if response == True:
return { 'Status': 'true', 'Message': "Credit Card Valid" }
else:
return { 'Status': 'false', 'Message': "Credit Card Number Invalid" }
return response
def date_checker(expirationdate):
currentdate = datetime.now()
print(expirationdate)
print(currentdate)
if expirationdate >= str(currentdate):
return { 'Status': 'true', 'Message': "Expiration date is OK" }
else:
return { 'Status': 'false', 'Message': "You cannot proceed because your Card has already expired" }
def securitycode_checker(securitycode):
if len(securitycode) == 3:
return { 'Status': 'true', 'Message': "Security Code Valid" }
else:
return { 'Status': 'false', 'Message': "Security Code cannot be greater than 3 digits" }
def field_isrequired(request):
expdate = request['ExpirationDate']
creditcard = request['CreditCardNumber']
securitycode = request['SecurityCode']
cardholder = request['CardHolder']
amount = request['Amount']
if expdate == "":
return { 'Status': 'true', 'Message': f"ExpirationDateis required" }
elif creditcard == "":
return { 'Status': 'false', 'Message': f"CreditCardNumber is required" }
elif securitycode == "":
return { 'Status': 'false', 'Message': f"SecurityCode is required" }
elif cardholder == "":
return { 'Status': 'false', 'Message': f"CardHolder is required" }
elif amount == "":
return { 'Status': 'false', 'Message': f"Amount is required" }
else:
return { 'Status': 'true', 'Message': f"OK" } | [
"Timothy.Babalola@ubagroup.com"
] | Timothy.Babalola@ubagroup.com |
df5d74665f7e253a5707711a3a7f978bebb10b96 | 50e375bdc8affc1a8c09aa567a740fa19df7d5a6 | /DSBQ/deployment/fixtures_old/test_Oracle_pytest_new.py | cbacf2973495522b4d34ecada22a816bff063a78 | [] | no_license | michTalebzadeh/SparkStructuredStreaming | ca7a257626e251c7b03a9844cfd229fa8ea95af5 | 87ef34ffe52061fcbb4f22fcd97764037717696a | refs/heads/master | 2023-07-13T00:49:10.753863 | 2021-07-12T16:39:50 | 2021-07-12T16:39:50 | 364,826,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | from pyspark.sql import SparkSession
import pytest
from sparkutils import sparkstuff as s
from src.config import ctest, test_url
from src.CreateSampleDataInMysql import extractHiveData, loadIntoMysqlTable, readSourceData, transformData, saveData, readSavedData
"""
@pytest.fixtures_old(scope = "session")
def initParameters():
# Prepare test data here in this fixtures_old
appName = ctest['common']['appName']
spark_session = s.spark_session(appName)
# create sample data
# read Hive source table and select read_df number of rows (see config_test.yml)
house_df = extractHiveData() ## read Hive table as sample source
# write to Mysql DB
loadIntoMysqlTable(house_df)
# data is ready to be tested in mysql
read_df = readSourceData()
# do Transform part of ETL (Extract, Transform, Load)
transformation_df = transformData()
# save data to target test table in mysql
saveData()
# read that data saved to ensure that the rows will tally
readSavedData_df = readSavedData()
return [read_df, transformation_df, readSavedData_df]
"""
def test_validity():
house_df = extractHiveData()
loadIntoMysqlTable(house_df)
# Assert that data read from source table is what is expected
read_df = readSourceData()
assert read_df.count() == ctest['statics']['read_df_rows']
# Assert data written to target table is what it should be
transformation_df = transformData()
assert transformation_df.count() == ctest['statics']['transformation_df_rows']
# Assert what is written tallies with the number of rows transformed
readSavedData_df = readSavedData()
assert readSavedData_df.subtract(transformation_df).count() == 0 | [
"mich.talebzadeh@gmail.com"
] | mich.talebzadeh@gmail.com |
eb2b9cbc7dcb2e45e3686d9f629a4a03d6867c1d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_Class841.py | 4f84993699cbd099216e52673eb203502687df81 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=24
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=22
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=23
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class841.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
67711448c51b3aa2c18dbd24c029ab0a57c28569 | 9df89a1652d183d8fc654acd728f9a578d6d1912 | /cli/tests/psym_tests/test_user.py | 400718eaa8635641ddbdad811c5aae5771aba6a4 | [
"BSD-3-Clause"
] | permissive | duranrojasm/symphony | b37d54a134e29093edacb80442e204fc71a37fbe | 55b3d0c20b669374303bafb10e9c96c734647c9c | refs/heads/main | 2023-08-24T02:00:33.433220 | 2021-10-28T20:35:23 | 2021-10-28T20:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | #!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import random
import string
from unittest import skip
from psym import UserDeactivatedException
from psym.api.user import (
activate_user,
add_user,
deactivate_user,
edit_user,
get_active_users,
)
from psym.graphql.enum.user_role import UserRole
from psym.graphql.enum.user_status import UserStatus
from ..utils import init_client
from ..utils.base_test import BaseTest
class TestUser(BaseTest):
@staticmethod
def random_string(length: int = 10) -> str:
letters = string.ascii_lowercase
return "".join(random.choices(letters, k=length))
def test_user_created(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
self.assertEqual(user_name, u.email)
self.assertEqual(UserStatus.ACTIVE, u.status)
active_users = get_active_users(client=self.client)
self.assertEqual(2, len(active_users))
client2 = init_client(email=user_name, password=user_name)
active_users = get_active_users(client=client2)
self.assertEqual(2, len(active_users))
def test_user_edited(self) -> None:
user_name = f"{self.random_string()}@fb.com"
new_password = self.random_string()
u = add_user(client=self.client, email=user_name, password=user_name)
edit_user(
client=self.client,
user=u,
new_password=new_password,
new_role=UserRole.OWNER,
)
client2 = init_client(email=user_name, password=new_password)
active_users = get_active_users(client=client2)
self.assertEqual(2, len(active_users))
def test_user_deactivated(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
deactivate_user(client=self.client, user=u)
active_users = get_active_users(client=self.client)
self.assertEqual(1, len(active_users))
with self.assertRaises(UserDeactivatedException):
init_client(email=user_name, password=user_name)
def test_user_reactivated(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
deactivate_user(client=self.client, user=u)
activate_user(client=self.client, user=u)
active_users = get_active_users(client=self.client)
self.assertEqual(2, len(active_users))
| [
"jcaroper@everis.com"
] | jcaroper@everis.com |
5275bb4a909af270b63538be9fb5d34655c19fe3 | baa69790a09fd017bcac5f8a0efb3b4be0b0984f | /exercises/ex12.py | 3e1e0248ec07d43c957e05c606305e1abe029022 | [] | no_license | dmcxblue/LearnPython3TheHardWay | cbf848fcf49c8e57c52f7bc3ad7affab3fda25cc | 571d7b082f117933f494b506e7fc73920bfb6b85 | refs/heads/master | 2020-05-01T19:43:16.931672 | 2019-05-21T18:24:37 | 2019-05-21T18:24:37 | 177,655,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | age = input("How old are you? ")
height = input("How tall are you? ")
weight = input("How much do you weight? ")
print(f"So, you're {age} years old, {height} tall and {weight} heavy.")
| [
"noreply@github.com"
] | noreply@github.com |
f3f3135e5cae2a247eb3a0c638ef4ee23f51d6d7 | 4105bbc19a6c17e9655a1c6b6d1c926683c505ae | /mission/wizauto.py | 062974754c000c0a4c7b8f721ef85516e1008523 | [] | no_license | lichao20000/mbp | 044ca609d8281b543ac687fe28ec845c8ea1e6b8 | e7dd55a7c320be04b24f6b48d5ec9b506c88ea18 | refs/heads/master | 2021-12-03T17:19:18.535170 | 2014-09-09T02:58:04 | 2014-09-09T02:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # coding: utf-8
'''
description:
Created by weibaohui on 14-5-18.
'''
import requests
import time
from datetime import datetime
if __name__=="__main__":
url="http://dyit.org:8806/wiz/autostart/"
while(True):
if (datetime.now().time().hour>6 and datetime.now().time().hour <23):
#r=requests.get(url)
#print(r.text)
pass
time.sleep(60*60)
| [
"zihuxinyu@163.com"
] | zihuxinyu@163.com |
a4a5217a92054490d85cba7f63ef1acb282a4847 | 989bb5d2d3e89db21fcbeac91a1e64967ea6377b | /sagemaker_neo_compilation_jobs/deploy_pytorch_model_on_Inf1_instance/resnet18.py | 9421aea5af6f0bf1ea89a34f99bc2cb5dcbceb35 | [
"Apache-2.0"
] | permissive | araitats/amazon-sagemaker-examples | 7cec9ea5822f0469d5dfabbcf3cab62ce9c0f0d1 | 512cb3b6310ae812c6124a451751237d98a109b1 | refs/heads/master | 2023-04-19T05:54:47.334359 | 2021-04-27T21:04:33 | 2021-04-27T21:04:33 | 338,094,683 | 2 | 1 | Apache-2.0 | 2021-04-27T15:35:14 | 2021-02-11T17:07:39 | Jupyter Notebook | UTF-8 | Python | false | false | 565 | py | def input_fn(request_body, request_content_type):
import torch
import torchvision.transforms as transforms
from PIL import Image
import io
f = io.BytesIO(request_body)
input_image = Image.open(f).convert('RGB')
preprocess = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
return input_batch | [
"noreply@github.com"
] | noreply@github.com |
afec961b7f4d3f04c9086399486a0dd42176e3dd | 53e85505b12ec83917d739369e9ace57222ffd17 | /env/toy_landmark_env.py | 2c0e968ecc0c02ff1bc5f768258ad5df7bef681b | [] | no_license | ashish-kmr/vmsr | b64805e5789d8fd4b150686f9dcac07fb086c4ce | 102a7b12632094d3a81bbc0918676eec7737c693 | refs/heads/master | 2022-04-11T23:50:07.533280 | 2020-03-18T21:48:49 | 2020-03-18T21:48:49 | 241,758,684 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61,512 | py | from __future__ import print_function
import logging
import numpy as np, os, cv2, os, scipy, skimage, itertools, copy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy import ndimage
from src import utils
from src import rotation_utils as ru
from src import map_utils as mu
from copy import deepcopy
#from env.mp_env import expert_navigator
def get_top_view_discrete_env_task_params(prob_random=0.0, fovs=[128], view_scales=[0.25],
batch_size=32, ignore_roads=True, output_roads=False, road_dilate_disk_size=0,
map_max_size=None, base_resolution=1.0, step_size=1, top_view=False, perturb_views=False,
graph_type='ogrid',t_prob_noise=0.0,task_typ='forward',replay_buffer=0,tf_distort=False,
minQ=20,reuseCount=4,spacious=False,multi_act=False, dilation_cutoff=4, nori = 12):
noise_model = utils.Foo(prob_random=prob_random)
outputs = utils.Foo(top_view=top_view, top_view_roads=output_roads, loc_on_map=True)
task_params = utils.Foo(noise_model=noise_model, batch_size=batch_size,
ignore_roads=ignore_roads, view_scales=view_scales, fovs=fovs,
outputs=outputs, relight=True, relight_fast=False,
road_dilate_disk_size=road_dilate_disk_size,
map_max_size=map_max_size, base_resolution=base_resolution, step_size=step_size,
perturb_views=perturb_views, graph_type=graph_type,t_prob_noise=t_prob_noise,
replay_buffer=replay_buffer,tf_distort=tf_distort,minQ=minQ,reuseCount=reuseCount,
spacious=spacious,multi_act=multi_act, dilation_cutoff=dilation_cutoff, nori = nori)
assert(graph_type in ['ogrid', 'null'])
return task_params
def _get_relative_goal_loc(goal_loc, loc, theta):
r = np.sqrt(np.sum(np.square(goal_loc - loc), axis=1))
t = np.arctan2(goal_loc[:,1] - loc[:,1], goal_loc[:,0] - loc[:,0])
t = t-theta[:,0] + np.pi/2
return np.expand_dims(r,axis=1), np.expand_dims(t, axis=1)
def perturb_images(imgs, rngs, noise):
# Given a list of images and a list of rngs, perturb the images.
ff = 0.5; imgs_out = [];
for i in range(len(rngs)):
rng = rngs[i]
img = imgs[i,...]*1
# Perturb all images in this set.
'''if rng.rand() < ff:
# Zoom in and rotate.
image_center = tuple(np.array(img.shape[:2])/2)
angle = 2*(rng.rand()-0.5)*90*noise
scale = np.exp(rng.rand()*np.log(1.+noise))
rot_mat = cv2.getRotationMatrix2D(image_center, angle, scale)
img = cv2.warpAffine(img, rot_mat, img.shape[:2], flags=cv2.INTER_LINEAR)
'''
if rng.rand() < ff and img.shape[2] == 3:
# Only messes with color images
# Mess with the channels a little bit.
w = np.exp(2*(rng.rand(1,1,3)-0.5)*np.log(1.+noise))
img = img*w
img = img.astype(np.uint8)
imgs_out.append(img)
return np.array(imgs_out)
class EnvMultiplexer():
# Samples an environment at each iteration.
def __init__(self, args, worker_id=0, num_workers=1):
params = vars(args)
self.r_obj = None
for k in params.keys():
setattr(self, k, params[k])
self._pick_data(worker_id, num_workers)
self._setup_data()
self.batch = -1
def _pick_data(self, worker_id, num_workers):
# Re does self.names to only train on data this worker should train on.
names = [(x, False) for x in self.names]
if self.env_task_params_2.add_flips:
names += [(x, True) for x in self.names]
out_names = []
if len(names) < num_workers:
while len(out_names) < num_workers:
logging.error('#Env: %d, #workers: %d', len(names), num_workers)
out_names = out_names + names
names = out_names[:num_workers]
to_pick = range(worker_id, len(names), num_workers)
logging.error('All Data: %s', str(names))
logging.error('worker_id: %d, num_workers: %d', worker_id, num_workers)
logging.error('Picking data: %s', str(to_pick))
self.names = [names[i] for i in to_pick]
logging.error('Picked Data: %s', str(self.names))
def _setup_data(self):
# Load building env class.
es = []
# Setup renderer if necessary.
if self.camera_param: r_obj = self.get_r_obj(self.camera_param)
else: r_obj = None
for b, flip in self.names:
logging.error('Loading %s with flip %d.', b, flip)
e = self.env_class(b, self.dataset, flip, self.env_task_params,
rng=np.random.RandomState(0), r_obj=r_obj)
obj = self.env_class_2(e, self.env_task_params_2)
es.append(obj)
logging.error('Loaded %s with flip %d.', b, flip)
self.envs = es
# Kill the renderer
self.r_obj = None
def sample_env(self, rng):
env_id = rng.choice(len(self.envs))
self.batch = self.batch+1
self.envs[env_id].batch = self.batch
return self.envs[env_id], env_id
def get_env(self, env_id):
return self.envs[env_id]
def gen_data(self, rng):
"""Used for generating data for a simple CNN."""
env_id = rng.choice(len(self.envs))
e = self.envs[env_id]
self._last_env = self.envs[env_id]
self.batch = self.batch+1
e.batch = self.batch
return e.gen_data(rng)
def get_r_obj(self, camera_param):
if self.r_obj is None:
from render import swiftshader_renderer_gpu as sru
cp = camera_param
rgb_shader, d_shader = sru.get_shaders(cp.modalities)
r_obj = sru.SwiftshaderRenderer()
fov_vertical = cp.fov_vertical
r_obj.init_display(width=cp.width, height=cp.height,
fov_vertical=fov_vertical, fov_horizontal=cp.fov_horizontal,
z_near=cp.z_near, z_far=cp.z_far, rgb_shader=rgb_shader,
d_shader=d_shader, im_resize=cp.im_resize)
r_obj.clear_scene()
self.r_obj = r_obj
return self.r_obj
class DiscreteEnv():
"""Observation is the top-view of the environment.
Actions are simple grid world actions.
- Rotate left, right, move straight stay in place.
- With some probability it stays in place.
"""
def __init__(self, task_params):
# The Expectation is the fill this function with code to fill up task and
# task_params.
raise NotImplemented
def _setup_noise(self):
self.take_action_kwargs = {
'prob_random': self.task_params.noise_model.prob_random }
def _compute_graph(self):
"""Computes traversibility and then uses it to compute the graph."""
if self.task_params.road_dilate_disk_size > 0:
disk = skimage.morphology.disk(dtype=np.bool,
radius=self.task_params.road_dilate_disk_size)
self.task.road = skimage.morphology.binary_dilation(self.task.road, disk)
if self.task_params.ignore_roads:
self.task.traversable = self.task.road == True
self.task.traversable[:] = True
else:
self.task.traversable = self.task.road == True
#print(self.task.traversable.shape)
# Build a grid graph on space for fast shortest path queries.
if self.task_params.graph_type == 'null':
self.task.graph = graphs.NullGraph(self.task.traversable, True,
self.task_params.step_size, 0)
elif self.task_params.graph_type == 'ogrid':
self.task.graph = graphs.OrientedGridGraph(self.task.traversable,
force=True, step_size=self.task_params.step_size)
# Compute embedding for nodes, compute an id for x coordinate, y-coordinate and theta.
node_embedding = np.zeros((self.task.graph.nodes.shape[0], 3), dtype=np.int32)
for i in range(3):
_, node_embedding[:,i] = np.unique(self.task.graph.nodes[:,i], return_inverse=True)
self.task.node_embedding = node_embedding
def get_loc_axis(self, states):
"""Based on the node orientation returns X, and Y axis. Used to sample the
map in egocentric coordinate frame.
"""
loc = states[:,0:2]*1.
theta = states[:,-1:]*np.pi/2.
x_axis = np.concatenate((np.cos(theta), np.sin(theta)), axis=1)
y_axis = np.concatenate((np.cos(theta+np.pi/2.), np.sin(theta+np.pi/2.)),
axis=1)
return loc, x_axis, y_axis, theta
def get_relative_coordinates(self, target_states, ref_states):
"""Given reference nodes (and not ids) [N] returns the relative
coordinates of the targets [N x K] wrt reference nodes."""
loc, x_axis, y_axis, theta = self.get_loc_axis(ref_states)
#print(target_states.shape)
rel_goal_orientation_, goal_dist_, goal_theta_ = [], [], []
for i in range(target_states.shape[1]):
goal_loc, _, _, _theta = self.get_loc_axis(np.array(target_states[:,i]))
rel_goal_orientation = 4*np.mod(theta-_theta, 2*np.pi) / (2*np.pi)
# rel_goal_orientation = np.mod(np.int32((theta - _theta)/(np.pi/2)), 4)
goal_dist, goal_theta = _get_relative_goal_loc(goal_loc, loc, theta)
goal_theta_.append(goal_theta)
rel_goal_orientation_.append(rel_goal_orientation)
goal_dist_.append(goal_dist)
goal_dist = np.array(goal_dist_)[...,0].T
goal_theta = np.array(goal_theta_)[...,0].T
rel_goal_orientation = np.array(rel_goal_orientation_)[...,0].T
#print(goal_dist.shape, goal_theta.shape, rel_goal_orientation.shape)
return goal_dist, goal_theta, rel_goal_orientation
def reset(self, rng, init_states=None, batch_size=None):
if batch_size is None:
batch_size = self.task_params.batch_size
assert(init_states is None or batch_size == len(init_states))
episodes = []
out_init_states = []
for i in range(batch_size):
# Generate seeds for each new episode.
rng_i = np.random.RandomState(rng.randint(np.iinfo(np.uint32).max))
rng_noise = np.random.RandomState(rng.randint(np.iinfo(np.uint32).max))
# Initialize the agent somewhere on the map (grid location and a fixed
# orientation).
if init_states is None:
waypoints, path = self.task.graph.sample_random_path_waypoints(rng_i, 1,
min_dist=4, max_dist=200)
init_state = waypoints[0]
else:
init_state = init_states[i]
# Reset position
episode = utils.Foo(rng=rng_i, rng_noise=rng_noise, states=[init_state],
executed_actions=[], action_status=[])
episodes.append(episode)
out_init_states.append(init_state)
self.episodes = episodes
# State for the agent is the 2D location on the map, (x,y,theta).
return out_init_states
def take_action(self, states, actions, sim=False):
"""Actions are discrete [0 (stay in place), 1(turn left), 2(turn right),
3(straight ahead)].
"""
out_states = []
episodes = self.episodes
batch_size = len(states)
prob_random = self.task_params.noise_model.prob_random
action_status = []
for i in range(batch_size):
action = actions[i]*1
state = states[i]*1
rng = episodes[i].rng_noise
u = rng.rand()
status = True
if u < prob_random and action == 3:
action = 0
status = False
if action == 3:
_ = state
for k in range(1):
nn = self.task.graph.get_neighbours([_])
__ = nn[0, 3]
if __ == -1:
break
_ = __
out_state = _
elif action == 0:
out_state = state
else:
nn = self.task.graph.get_neighbours([state])
out_state = nn[0, action]
if False:
nn = self.task.graph.get_neighbours([out_state])
out_state = nn[0, 3] if nn[0, 3] != -1 else out_state
assert(out_state != -1)
out_states.append(out_state)
if not sim:
episodes[i].states.append(out_state*1.)
episodes[i].executed_actions.append(actions[i]*1.)
episodes[i].action_status.append(status)
return out_states
def render_views(self, states):
# Renders out the view from the state.
np_states = np.array(states)
loc, x_axis, y_axis, theta = self.get_loc_axis(np_states)
views = mu.generate_egocentric_maps(self.task.scaled_views,
self.task_params.view_scales, self.task_params.fovs, loc, x_axis, y_axis)
return views
def get_features(self, states):
to_output = self.task_params.outputs
np_states = np.array(states)
loc, x_axis, y_axis, theta = self.get_loc_axis(np_states)
outputs = {}
if to_output.top_view:
views = self.render_views(states)
# ATTENTION. Len of Views will always be 1
for i in range(len(views)):
if self.task_params.perturb_views:
views[i] = perturb_images(views[i], [e.rng_noise for e in self.episodes], 0.1)
# self.task_params.noise_model.prob_random)
for i, _ in enumerate(views):
outputs['views_{:d}'.format(i)] = views[i]
if to_output.top_view_roads:
roads = mu.generate_egocentric_maps(self.task.scaled_roads,
self.task_params.view_scales, self.task_params.fovs, loc, x_axis, y_axis)
for i, _ in enumerate(roads):
outputs['roads_{:d}'.format(i)] = np.expand_dims(roads[i], -1)
if to_output.loc_on_map:
for i, sc in enumerate(self.task_params.view_scales):
outputs['loc_on_map_{:d}'.format(i)] = np.concatenate((loc*sc, theta), axis=1)
outputs['views_xyt'] = np_states
# if to_output.executed_action:
return outputs
class TopViewDiscreteEnv(DiscreteEnv):
"""Observation is the top-view of the environment.
Actions are simple grid world actions.
- Rotate left, right, move straight stay in place.
- With some probability it stays in place.
"""
def __init__(self, name, dataset, flip, task_params, road=None, view=None, rng=None, r_obj=None):
self.task = utils.Foo()
if road is not None and view is not None:
assert(dataset is None)
assert(flip is None)
self.task.view = view
self.task.road = road
else:
self.task.view, self.task.road = dataset.load_data(name, flip=flip,
map_max_size=task_params.map_max_size, rng=rng,
base_resolution=task_params.base_resolution)
self.task_params = task_params
assert(self.task_params.ignore_roads == True)
self._setup_noise()
self._compute_graph()
self._preprocess_for_task()
def _preprocess_for_task(self):
# Resize views with antialiasing.
self.task.scaled_views = mu.resize_maps(self.task.view,
self.task_params.view_scales, 'antialiasing')
self.task.scaled_roads = mu.resize_maps((self.task.road*255).astype(np.uint8),
self.task_params.view_scales, 'antialiasing')
self.task.view = None #Takes a lot of memory so remove if not needed.
self.task.road = None
def render_views(self, states):
# Renders out the view from the state.
np_states = np.array(states)
loc, x_axis, y_axis, theta = self.get_loc_axis(np_states)
views = mu.generate_egocentric_maps(self.task.scaled_views,
self.task_params.view_scales, self.task_params.fovs, loc, x_axis, y_axis)
return views
# def get_follower_task_params(batch_size=4, gt_delta_to_goal=False,
# terminate_on_complete=False, compute_optimal_actions=False,
# compute_optimal_actions_steps=2, gamma=0.99, add_flips=True,
# max_dist=200, min_dist=25, act_f=[0., 2., 4., 8., 16.],
# act_r=[15., 30., 45., 90.], history=0, teacher_steps=20,
# rejection_sampling=False):
# rl_params = utils.Foo(dense_wt=1.0, complete_reward=1.0, time_penalty=-0.01,
# dist_thresh=25., gamma=gamma, terminate_on_complete=terminate_on_complete)
# task_params = utils.Foo(min_dist=min_dist, max_dist=max_dist,
# batch_size=batch_size, act_f=act_f, act_r=act_r, rl_params=rl_params,
# gt_delta_to_goal=gt_delta_to_goal, compute_optimal_actions_step_size=0.9,
# compute_optimal_actions_steps=compute_optimal_actions_steps,
# compute_optimal_actions=compute_optimal_actions, add_flips=add_flips,
# history=history, teacher_steps=teacher_steps,
# rejection_sampling=rejection_sampling)
# return task_params
def get_follower_task_params(batch_size=4, max_dist=200, min_dist=25,
num_waypoints=1, path_length=20, history=0, add_flips=True, typ='sp',
data_typ='demonstartion', mapping_samples=20, share_start=False, dist_type='traj_dists',
extent_samples=200, plan_type='opt', plan_path=None,task_typ='forward',replay_buffer=0,tf_distort=False,
minQ=20,reuseCount=4,spacious=False,multi_act=False):
assert(data_typ in ['mapping', 'demonstartion'])
task_params = utils.Foo(min_dist=min_dist, max_dist=max_dist,
batch_size=batch_size, num_waypoints=num_waypoints,
path_length=path_length, history=history, add_flips=add_flips, typ=typ,
data_typ=data_typ, mapping_samples=mapping_samples, share_start=share_start,
dist_type=dist_type, extent_samples=extent_samples, plan_type=plan_type,
plan_path=plan_path,task_typ=task_typ,replay_buffer=replay_buffer,tf_distort=tf_distort,
minQ=minQ,reuseCount=reuseCount,spacious=spacious,multi_act=multi_act)
assert(typ in ['sp', 'U'])
assert(task_typ in ['return', 'forward'])
return task_params
class Follower():
"""Provides data for the follower leader style problem. The leader generates
a target trajectory, and outputs the set of images seen in going from the
starting location to the goal location. Follower has noisy actuators and has
to be able to get to the goal location the picked out by the leader."""
def __init__(self, env, task_params):
self.task = utils.Foo()
self.task.env = env
self.task_params = task_params
if self.task_params.data_typ == 'mapping':
# Initialize a mapper that can be used to generate images for the mapping
# part of things.
from env import mapper_env
mapper_task_params = mapper_env.get_mapper_task_params(
batch_size=task_params.batch_size, num_samples=task_params.mapping_samples,
extent_samples=task_params.extent_samples,
add_flips=task_params.add_flips, mapper_noise=0.,
output_optimal_actions=True)
self.task.env_mapper = mapper_env.MapperPlannerEnv(env, mapper_task_params)
def reset(self, rng):
rng_ = rng
actions, waypoints, path_dists, traj_dists, goal_dists, paths = [], [], [], [], [], []
task_params = self.task_params
batch_size = self.task_params.batch_size
spacious=task_params.spacious
#print(self.task.env.task_params.t_prob_noise)
# Generates the problem.
start_id = None
#print(batch_size)
goal_point=self.task.env._sample_point_on_map(np.random.RandomState(rng.randint(1e6)),in_room=True,spacious=spacious)
goal_dist=self.task.env.exp_nav._compute_distance_field(goal_point)
path_length=self.task_params.path_length
### how do we pick interesting goals
for i in range(batch_size):
start_id = start_id if task_params.share_start else None
#if self.task_params.typ == 'sp':
#ATTENTION
path,act=None,None
l2_dist=1.0
geo_dist=1.00
count_attempt=0
while(path is None):
if (count_attempt > 200):
goal_point=self.task.env._sample_point_on_map(np.random.RandomState(rng.randint(1e6)),in_room=True,spacious=spacious)
goal_dist=self.task.env.exp_nav._compute_distance_field(goal_point)
count_attempt=0
count_attempt+=1
start_id=self.task.env._sample_point_on_map(np.random.RandomState(rng.randint(1e6)),spacious=spacious)
geo_dist=goal_dist[int(start_id[0]),int(start_id[1])]
l2_dist=((start_id[0]-goal_point[0])**2 + (start_id[1]-goal_point[1])**2)**0.5
#print(geo_dist,l2_dist,self.task.env.task.step_size,path_length)
if (count_attempt > 100 or \
(geo_dist/self.task.env.task.step_size > 0.8*float(path_length) \
and geo_dist < 1000 and l2_dist/geo_dist < 0.8)):
path,act=self.task.env.exp_nav._find_shortest_path([start_id],goal_point,goal_dist,path_length+1,
noise_prob=self.task.env.task_params.t_prob_noise,rng=np.random.RandomState(rng.randint(1e6)),spacious=spacious)
#_,path_t_noise=self.task.env.exp_nav._virtual_steps(act,path[0],goal_dist,noise=0.0,check_collision=False)
#path=[path[0]]+path_t_noise
#print(len(path),len(act),act[-1])
#waypoint, path = graph.sample_random_path_waypoints(
# rng_, num_waypoints=task_params.num_waypoints,
# min_dist=task_params.min_dist, max_dist=task_params.max_dist,
# path_length=task_params.path_length, start_id=start_id)
#start_id = waypoint[0]
#traj_dist = graph.get_trajectory_distance(path.tolist(), 0.1,
traj_dist=0
# max_dist=task_params.path_length*4)
#goal_dist = self.task.env.exp_nav.goal_dist
# FIXME: goal_dist = graph.get_path_distance(path[-1:])
action = deepcopy(act)
# path_dist = graph.get_path_distance(path.tolist())
waypoints.append(deepcopy(path))
traj_dists.append(deepcopy(traj_dist))
goal_dists.append(deepcopy(goal_dist))
paths.append(deepcopy(path))
#print(np.array(path).shape)
#print(path)
actions.append(deepcopy(action))
# path_dists.append(path_dist)
task = self.task
task.paths, task.actions, task.traj_dists, task.waypoints, task.goal_dists = \
paths, actions, traj_dists, waypoints, goal_dists
# task.path_dists = path_dists
task.history_f = []
completed = [False for _ in paths]
task.completed = completed
# Set up problem for the follower agent.
init_states = [x[0] for x in paths]
task.init_states = init_states
env = self.task.env
_ = env.reset(np.random.RandomState(rng.randint(1e6)), init_states=init_states, batch_size=batch_size,spacious=spacious)
if self.task_params.data_typ == 'mapping':
task.mapper_rng = utils.copy_rng(rng)
return init_states
def gen_data(self, rng):
"""IGNORE"""
# This function is used to generate the same data that is used for
# generating the problems for trajectory following, but this generates it
# for mapping and planning.
init_states = self.reset(rng)
outputs = self._get_planning_data()
# Benchmark the optimal actions to see if they are effective.
# import pdb; pdb.set_trace()
# num_steps = self.task_params.path_length
# self.task.env_mapper.execute_actions_1(outputs['gt_actions'], init_states,
# self.task.goal_dists, self.task.goal_nn, num_steps)
# d_starts, d_ends = self.task.env_mapper.execute_actions(outputs['gt_actions'])
# Cross check if things come back in the same coodinate frame etc.
# _all_actions, _state_dist, _state_theta, _state_rel_orient = \
# self.task.env_mapper.decode_plan(outputs['gt_actions'],
# self.task.init_states, self.task.goal_dists, self.task.goal_nn, 20)
# _state_dist = _state_dist*5
# oo = self._get_demonstration()
return outputs
def _get_planning_data(self):
"""IGNORE"""
"""Generate data for training the path planner.
1. Call the _get_mapping_data function
2. Also compute ground truth for planning problems."""
init_states = self.task.init_states
graph = self.task.env.task.graph
outputs, _ = self._get_mapping_data()
for s in init_states:
assert(s == init_states[0]), \
'init_states are not all the same {:s}.'.format(str(init_states))
goal_dists = self.task.goal_dists
goal_nn = self.task.env_mapper._get_node_nn(init_states[0])
all_oas = []
goal_imgs = []
for i in range(len(init_states)):
_ = goal_nn[goal_nn > -1]
_n, _d = graph.get_action_distance_vec(goal_dists[i], _)
_oa = _d == np.min(_d, 1)[:,np.newaxis]
oas = np.zeros((goal_nn.shape[0], goal_nn.shape[1], goal_nn.shape[2], 4), dtype=np.bool)
goal_img = np.ones((goal_nn.shape[0], goal_nn.shape[1], goal_nn.shape[2]), dtype=np.float)*np.inf
goal_img[goal_nn>-1] = _d[:,0]
goal_img = goal_img == 0
goal_imgs.append(goal_img)
oas[goal_nn > -1, :] = _oa
all_oas.append(oas);
goal_imgs = np.array(goal_imgs)
goal_imgs = goal_imgs[:,::-1,:,:]
goal_imgs = np.transpose(goal_imgs, [0,2,1,3])
all_oas = np.array(all_oas)
all_oas = all_oas[:,::-1,:,:,:]
all_oas = np.transpose(all_oas, [0,2,1,3,4])
all_oas = all_oas.astype(np.float32)
goal_nn = goal_nn[::-1,:,:]
goal_nn = np.transpose(goal_nn, [1,0,2])
valid_mask = goal_nn[:,:,:1] > -1
valid_mask = np.logical_or(np.zeros((len(init_states),1,1,1), dtype=np.bool),
valid_mask[np.newaxis,...])
outputs['valid'] = valid_mask.astype(np.float32)
outputs['gt_actions'] = all_oas
outputs['goal_imgs'] = goal_imgs.astype(np.float32)
self.task.planning = utils.Foo(goal_nn=goal_nn, gt_actions=all_oas)
return outputs
def execute_actions(self, action_volume, output_dir, global_step):
"""Given the action volume, executes the actions in open loop on the
episode once to save the trajectories."""
"""IGNORE"""
# Unroll the trajectory from the start state.
num_steps = self.task_params.path_length
d_starts, d_ends = self.task.env_mapper.execute_actions_1(action_volume,
self.task.init_states, self.task.goal_dists, self.task.planning.goal_nn,
num_steps)
# For debugging.
# _all_actions, _state_dist, _state_theta, _state_rel_orient = \
# self.task.env_mapper.decode_plan(self.task.planning.gt_actions,
# self.task.init_states, self.task.goal_dists,
# self.task.planning.goal_nn, num_steps)
# _state_dist = _state_dist*5
_all_actions, _state_dist, _state_theta, _state_rel_orient = \
self.task.env_mapper.decode_plan(action_volume, self.task.init_states,
self.task.goal_dists, self.task.planning.goal_nn, num_steps)
_state_dist = _state_dist*5
# Save mapping samples, init_states, goal_states
if True:
goal_states = [np.where(self.task.goal_dists[i] == 0)[0][0]
for i,_ in enumerate(self.task.init_states)]
# goal_states = np.array(goal_states)
# init_states = np.array(self.task.init_states)
batch_data = utils.Foo(map_id_samples=self.task.map_id_samples,
init_states=self.task.init_states, goal_states=goal_states,
teacher_actions=_all_actions, teacher_dist=_state_dist,
teacher_theta=_state_theta, teacher_rel_orient=_state_rel_orient,
batch=self.batch)
out_dir = os.path.join(output_dir, 'plans', '{:08d}'.format(global_step))
utils.mkdir_if_missing(out_dir)
file_name = os.path.join(out_dir, '{:08d}.pkl'.format(self.batch))
print(file_name)
tt = vars(batch_data)
utils.save_variables(file_name, tt.values(), tt.keys(), overwrite=True)
# oo = self._get_demonstration()
# Save data here for loading into the class for processing.
return d_starts[:,np.newaxis], d_ends[:,np.newaxis], None
def get_common_data(self):
outputs = {}
#print(self.task_params.data_typ)
if self.task_params.task_typ=='return' and self.task_params.data_typ == 'mapping':
o1 = self._get_demonstration_flipped()
# print([(k, v.dtype) for k, v in zip(o1.keys(), o1.values())])
outputs.update(o1)
if self.task_params.plan_type == 'opt':
o1 = self._get_demonstration()
# print([(k, v.dtype) for k, v in zip(o1.keys(), o1.values())])
outputs.update(o1)
elif self.task_params.plan_type == 'custom':
o2 = self._get_demonstration_from_plan()
# print([(k, v.dtype) for k, v in zip(o2.keys(), o2.values())])
outputs.update(o2)
return outputs
def _get_demonstration_from_plan(self):
"""Load the data from file."""
"""Returns a demonstration of the trajectory that provides images and
actions taken to convey the robot to the target location.
Adds teacher_actions, teacher_xyt, teacher_dist, teacher_theta,
teacher_rel_orient, teacher_views to the dictionary."""
assert(self.task_params.plan_path is not None)
file_name = os.path.join(self.task_params.plan_path, '{:08d}.pkl'.format(self.batch))
tt = utils.load_variables(file_name)
logging.error('%s', file_name)
goal_states = [np.where(self.task.goal_dists[i] == 0)[0][0]
for i,_ in enumerate(self.task.init_states)]
assert(np.allclose(np.array(self.task.init_states), tt['init_states']))
assert(np.allclose(np.array(goal_states), tt['goal_states']))
# assert(np.allclose(self.task.map_id_samples, tt['map_id_samples']))
outputs = {}
for k in ['teacher_actions', 'teacher_dist', 'teacher_theta', 'teacher_rel_orient']:
outputs[k] = tt[k]
outputs['teacher_rel_orient'] = outputs['teacher_rel_orient']*1.
outputs['teacher_views'] = np.zeros((8,20,224,224,3), dtype=np.uint8)
outputs['teacher_xyt'] = np.zeros((8,20,3), dtype=np.int32)
return outputs
def _get_demonstration(self,name='teacher'):
"""Returns a demonstration of the trajectory that provides images and
actions taken to convey the robot to the target location.
Adds teacher_actions, teacher_xyt, teacher_dist, teacher_theta,
teacher_rel_orient, teacher_views to the dictionary."""
task = self.task
task_params = self.task_params
env = task.env
outputs = {}
teacher_actions = np.array(task.actions)
teacher_states = np.array(task.paths)
init_states = np.array([x[0] for x in task.paths])
#print((teacher_states))
#print(teacher_states[0][0])
#print(teacher_states[0].shape)
teacher_states = teacher_states[:,:-1]
#print((teacher_states).shape)
outputs[name+'_actions'] = teacher_actions
outputs[name+'_xyt'] = teacher_states #env.task.node_embedding[teacher_states,:]
# Teacher locations wrt the map.
teacher_dist, teacher_theta, teacher_rel_orient = \
env.get_relative_coordinates(teacher_states, init_states)
outputs[name+'_dist'] = teacher_dist * env.task.building.env.resolution
outputs[name+'_theta'] = teacher_theta
outputs[name+'_rel_orient'] = teacher_rel_orient
# Render out the views.
if self.task.env.task_params.outputs.top_view:
intermediate_views = env.render_views(teacher_states.reshape([-1,3]))[0]
sh = [teacher_states.shape[0], teacher_states.shape[1],
intermediate_views.shape[1], intermediate_views.shape[2],
intermediate_views.shape[3]]
intermediate_views = np.reshape(intermediate_views, sh)
outputs[name+'_views'] = intermediate_views.astype(np.uint8)
return outputs
def _get_demonstration_flipped(self):
"""Returns a demonstration of the trajectory that provides images and
actions taken to convey the robot to the target location.
Adds teacher_actions, teacher_xyt, teacher_dist, teacher_theta,
teacher_rel_orient, teacher_views to the dictionary."""
task = self.task
task_params = self.task_params
env = task.env
outputs = {}
inverted_paths=copy.deepcopy(task.paths)
for i1 in range(len(inverted_paths)):
for i2 in range(len(inverted_paths[i1])):
inverted_paths[i1][i2][2]+=np.pi
teacher_actions = np.array(task.actions)
teacher_states = np.array(inverted_paths)
init_states = np.array([x[0] for x in inverted_paths])
#print((teacher_states))
#print(teacher_states[0][0])
#print(teacher_states[0].shape)
teacher_states = teacher_states[:,:-1]
#print((teacher_states).shape)
outputs['mapping_actions'] = teacher_actions
outputs['mapping_xyt'] = teacher_states #env.task.node_embedding[teacher_states,:]
# Teacher locations wrt the map.
teacher_dist, teacher_theta, teacher_rel_orient = \
env.get_relative_coordinates(teacher_states, init_states)
outputs['mapping_dist'] = teacher_dist * env.task.building.env.resolution
outputs['mapping_theta'] = teacher_theta
outputs['mapping_rel_orient'] = teacher_rel_orient
# Render out the views.
if self.task.env.task_params.outputs.top_view:
intermediate_views = env.render_views(teacher_states.reshape([-1,3]))[0]
sh = [teacher_states.shape[0], teacher_states.shape[1],
intermediate_views.shape[1], intermediate_views.shape[2],
intermediate_views.shape[3]]
intermediate_views = np.reshape(intermediate_views, sh)
outputs['mapping_views'] = intermediate_views.astype(np.uint8)
return outputs
def _get_mapping_data(self):
"""Returns set of image, pose pairs around the current location of the
agent that are going to be used for mapping. Calls appropriate functions
from the mapper_env class."""
task, task_params = self.task, self.task_params
env, env_mapper = task.env, task.env_mapper
init_states = [x[0] for x in task.paths]
rng = task.mapper_rng
id_samples = env_mapper._sample_mapping_nodes(init_states, rng)
outputs = env_mapper._gen_mapping_data(id_samples, init_states)
# FIXME?: Rename things in outputs.
if task_params.share_start:
# Check if the starting point is the same or not.
for s in init_states:
assert(s == init_states[0]), \
'init_states are not all the same {:s}.'.format(str(init_states))
for k in outputs.keys():
outputs[k] = outputs[k][:1,...]
id_samples[1:,:] = id_samples[:1,:]
self.task.map_id_samples = id_samples
return outputs, id_samples
def pre_common_data(self, inputs):
"""Pre-computes the common data."""
return inputs
def pre_features(self, f):
"""Pre-computes the features."""
return f
def get_features(self, states, step_number=None):
"""Computes tensors that get fed into tensorflow at each time step."""
task = self.task; env = task.env; task_params = self.task_params
f = env.get_features(states)
while len(task.history_f) < task_params.history:
task.history_f.insert(0, copy.deepcopy(f))
# Insert the latest frame.
task.history_f.insert(0, copy.deepcopy(f))
if self.task.env.task_params.outputs.top_view:
view = np.concatenate([np.expand_dims(x['views_0'], -1) for x in task.history_f], -1)
view = np.expand_dims(view, axis=1)
f['view'] = view
f.pop('views_0', None);
if self.task.env.task_params.outputs.top_view_roads:
road = np.concatenate([np.expand_dims(x['roads_0'], -1) for x in task.history_f], -1)
road = np.expand_dims(road, axis=1)
f['road'] = road
f.pop('roads_0', None);
f['loc_on_map'] = np.expand_dims(f['loc_on_map_0'], axis=1)
f.pop('loc_on_map_0', None);
f['view_xyt'] = np.expand_dims(f['views_xyt'], axis=1)
# Compute distance from trajectory from current state.
gt_dist = np.array([getattr(task, task_params.dist_type)[i][int(x[0])][int(x[1])] for i,x in enumerate(states)],
dtype=np.float32)
#print(gt_dist.shape)
f['gt_dist'] = np.reshape(gt_dist*1., [-1,1,1])
cd1s, cd2s, _ = self._comptue_chamfer_distance()
f['cd_prec'] = cd1s
f['cd_recall'] = cd2s
# Compute chamfer distance between trajectories.
task.history_f.pop()
return f
def _comptue_chamfer_distance(self):
episodes = self.task.env.episodes
teacher_states = self.task.paths
#nodes = self.task.env.task.graph.nodes
cd1s = []; cd2s = [];
for i in range(len(teacher_states)):
teacher_traj = np.array(teacher_states[i])[0:2]*1.
student_traj = np.array(episodes[i].states)[0:2]*1.
tt = np.expand_dims(teacher_traj, 1) - np.expand_dims(student_traj, 0)
tt = np.sqrt(np.sum(tt**2, 2)) / self.task.env.task_params.step_size
cd1s.append(np.mean(np.min(tt,0)))
cd2s.append(np.mean(np.min(tt,1)))
cd1s = np.expand_dims(np.array(cd1s), 1)
cd1s = np.expand_dims(np.array(cd1s), 2)
cd2s = np.expand_dims(np.array(cd2s), 1)
cd2s = np.expand_dims(np.array(cd2s), 2)
task = self.task; task_params = self.task_params;
episodes = self.task.env.episodes
states = [(e.states[-1]) for e in episodes]
gt_dist = [getattr(task, task_params.dist_type)[i][int(x[0])][int(x[1])] for i, x in enumerate(states)]
gt_dist = np.array(gt_dist, dtype=np.float32)
gt_dist = np.reshape(gt_dist, [-1,1,1])
return cd1s, cd2s, gt_dist
def take_freespace_action(self, states, actions, step_number=None):
"""Given states, executes actions. Returns the reward for each time step.
"""
new_states = self.task.env.take_freespace_action(states, actions)
rewards = [0 for _ in states]
return new_states, rewards
def take_action(self, states, actions, step_number=None):
"""Given states, executes actions. Returns the reward for each time step.
"""
new_states = self.task.env.take_action(states, actions)
rewards = [0 for _ in states]
return new_states, rewards
def get_optimal_action(self, states, j):
"""Is used to execute actions that an expert would have taken.
Input:
states: Whatever reset returns TODO.
Output:
acts is one-hot encoding of optimal action from states.
"""
task = self.task; env = task.env; task_params = self.task_params
acts = []
for i in range(task_params.batch_size):
d, n = env.exp_nav.find_best_action_set(states[i],getattr(task, task_params.dist_type)[i]\
,spacious=task_params.spacious,multi_act=task_params.multi_act)
a=np.zeros([4])
if task_params.multi_act:
for d_i in d:
a[int(d_i[0])]=1
else:
a[int(d[0])]=1
#print(a)
acts.append(a)
acts = np.array(acts)*1
self.get_opt_act=acts
return acts
def get_targets(self, states, j):
"""Used to compute ground truth for things that the network should produce
at each time step.
gt_action: probability of taking each action
gt_q_value: q-value for different actions
"""
task = self.task; env = task.env; task_params = self.task_params
# a = np.zeros((len(states), 1, self.task.actions.shape[0]), dtype=np.int32);
# a[:,:,0] = 1;
#a = self.get_optimal_action(states, j)
assert(self.get_opt_act is not None)
a=self.get_opt_act
self.get_opt_act=None
ds = []
for i in range(self.task_params.batch_size):
#n, d = self.task.env.task.graph.get_action_distance(getattr(task, task_params.dist_type)[i], states[i])
#d[n == -1] = d[0]*1.+1
d=np.array([0,0,0,0])
ds.append(np.reshape(d, [1,-1]))
ds = np.concatenate(ds, 0)
ds = -1.*ds
a = np.expand_dims(a, axis=1)*1
ds = np.expand_dims(ds, axis=1)*1
return {'gt_action': a, 'gt_q_value': ds}
def get_gamma(self):
return 0.99 #self.task_params.rl_params.gamma
def make_vis_paper_wts(self, out_dir, suffix='', prefix='', pointer=None,
map_wt=None, rel_pose_teacher_mapping=None, mapping_view=None, teacher_views=None):
""" Visualizes the best reference view for each location."""
# Find the best view for each thing and write down the thingy.
bs, ts = map_wt.shape[:2]
ind = np.argmax(map_wt, 2)
theta = np.mod(np.round(np.arctan2(rel_pose_teacher_mapping[...,3], rel_pose_teacher_mapping[...,2])/np.pi*2.), 4)
for i in range(bs):
for t in range(ts):
fig, _, axes = utils.subplot2(plt, (1,2), (5,5))
ax = axes.pop()
ax.imshow(teacher_views[i,t,:,:,:].astype(np.uint8))
ax.axis('off')
ax = axes.pop()
ax.imshow(mapping_view[0,ind[i,t],:,:,:].astype(np.uint8))
ax.axis('off')
_p = rel_pose_teacher_mapping[i,t,ind[i,t],:]
ax.set_title('({:.0f}, {:.0f}, {:.0f}$^\circ$)'.format(round(_p[0]), round(_p[1]), 90*theta[i,t,ind[i,t]]))
out_file_name = os.path.join(out_dir, 'corres_vis',
'{:s}corres_vis{:s}_{:02d}_{:02d}.png'.format(prefix, suffix, i, t))
fig.savefig(out_file_name, bbox_inches='tight')
plt.close(fig)
def make_vis_video(self, out_dir, suffix, prefix, view, teacher_views,
mapping_view, rel_pose_teacher_mapping, pointer, map_wt):
import matplotlib.animation as manimation
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Full Visualization', artist='matplotlib',
comment='Visualization')
fps = 2
"""Visualizes the optimal and executed trajectories, action failure and the
steps."""
match_ind = np.argmax(map_wt, 2)
# Make a plot of the episode for environments in this batch.
map_id_samples = self.task.map_id_samples
ind = np.argmax(map_wt, 2)
theta = np.mod(np.round(np.arctan2(rel_pose_teacher_mapping[...,3],
rel_pose_teacher_mapping[...,2])/np.pi*2.), 4)
full_view = self.task.env.task.scaled_views[0]
vs = self.task.env.task_params.view_scales[0]
step_size = self.task.env.task_params.step_size
task = self.task
env = task.env
plt.style.use('fivethirtyeight')
cm = utils.get_538_cm()
def _get_trajectory_data(task, i):
vs = task.env.task_params.view_scales[0]
env = task.env
optimal = task.paths[i]
executed = env.episodes[i].states
o_loc, _, _, o_theta = env.get_loc_axis(np.array(optimal).astype(np.int32))
o_loc = o_loc*vs;
e_loc, _, _, e_theta = env.get_loc_axis(np.array(executed).astype(np.int32))
e_loc = e_loc*vs
action_status = np.array(env.episodes[i].action_status)
map_id_samples = task.map_id_samples
if map_id_samples is not None:
m_loc, _, _, m_theta = env.get_loc_axis(np.array(map_id_samples[i,:]).astype(np.int32))
m_loc = m_loc*vs
return o_loc, e_loc, m_loc, m_theta, action_status
def _adjust_size(ax, o_loc, e_loc):
min_size = 12
all_locs = np.concatenate([o_loc, e_loc], axis=0)
min_ = np.min(all_locs, axis=0)
max_ = np.max(all_locs, axis=0)
mid_ = (min_+max_)/2.
sz = np.maximum(1.2*np.max(max_-min_)/2., min_size)
ax.set_xlim([mid_[0]-sz, mid_[0]+sz])
ax.set_ylim([mid_[1]-sz, mid_[1]+sz])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
def _reset_figs(axes):
for a in axes:
a.clear()
a.axis('off')
matplotlib.rcParams['axes.titlesize'] = 8
for i in range(self.task_params.batch_size):
writer = FFMpegWriter(fps=fps, metadata=metadata)
offset = 1
o_loc, e_loc, m_loc, m_theta, action_status = _get_trajectory_data(task, i)
pointer_i = np.concatenate([np.array(0)[np.newaxis], pointer[i,:]*1], 0)
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(10,6.6))
gs = gridspec.GridSpec(3,5)
gs.update(left=0.0, right=1.0, top=0.95, bottom=0.05, wspace=0.05, hspace=0.05)
ax_view = plt.subplot(gs[:3,:3]) # First person view
ax_teacher = plt.subplot(gs[0,3]) # Reference image
ax_synth = plt.subplot(gs[0,4]) # Reference image
# Location on map (low alpha for the whole trajectory and full alpha for parts traversed already)
ax_map = plt.subplot(gs[-2:,-2:])
out_file_name = os.path.join(out_dir, '{:s}env_vis{:s}_{:02d}.mp4'.format(prefix, suffix, i))
with writer.saving(fig, out_file_name, 100):
_reset_figs([ax_view, ax_teacher, ax_synth, ax_map])
# Display the constant things with the map.
map_legend_bbox_to_anchor = (0.0, 0.7)
map_legend_loc = 'upper right'
ref_imgs_label = 'Ref. Images '
ax_map.imshow(1-full_view[:,:,0].astype(np.float32)/255.,
vmin=0., vmax=2.5, cmap='Greys', origin='lower')
ax_map.imshow(full_view, alpha=0.6, origin='lower')
_adjust_size(ax_map, o_loc, e_loc)
ax_map.text(.5, 1., 'Overhead View (Visualization Only)',
verticalalignment='top', horizontalalignment='center', transform=ax_map.transAxes,
fontdict={'fontsize': 10, 'color': 'red'}, bbox=dict(facecolor='white', alpha=0.9, lw=0))
writer.grab_frame(**{'facecolor':'black'})
for k in range(m_loc.shape[0]):
s = 4; t = m_theta[k,0]
arrow = ax_map.arrow(m_loc[k,0], m_loc[k,1], s*np.cos(t), s*np.sin(t),
head_width=2, head_length=2, fc='g', ec='g', alpha=0.8, width=.5)
ref_img = arrow
ax_map.legend([ref_img], [ref_imgs_label],
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
writer.grab_frame(**{'facecolor':'black'})
ax_map.plot(o_loc[0,0], o_loc[0,1], cm[0], alpha=1.0, ms=20, marker='.', ls='none', label='Start')
handles, labels = ax_map.get_legend_handles_labels()
ax_map.legend([ref_img]+handles, [ref_imgs_label]+labels,
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
writer.grab_frame(**{'facecolor':'black'})
ax_map.plot(o_loc[-1,0], o_loc[-1,1], cm[0], alpha=1.0, ms=20, marker='*', ls='none', label='Goal')
handles, labels = ax_map.get_legend_handles_labels()
ax_map.legend([ref_img] + handles, [ref_imgs_label]+labels,
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
writer.grab_frame(**{'facecolor':'black'})
ax_map.plot(o_loc[:,0], o_loc[:,1], cm[0], alpha=0.5, label='Planned')
handles, labels = ax_map.get_legend_handles_labels()
ax_map.legend([ref_img] + handles, [ref_imgs_label]+labels,
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
writer.grab_frame(**{'facecolor':'black'})
ax_map.plot(e_loc[:,0]-offset, e_loc[:,1]-offset, cm[1], alpha=0.5, label='Executed')
handles, labels = ax_map.get_legend_handles_labels()
ax_map.legend([ref_img] + handles, [ref_imgs_label]+labels,
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
writer.grab_frame(**{'facecolor':'black'})
o_loc_handle = ax_map.plot([], [], cm[0], alpha=1.0)[0]
o_loc_point_handle = ax_map.plot([], [], cm[0], label='Loc. on Plan',
marker='.', ms=12, alpha=1.0, ls='none')[0]
e_loc_handle = ax_map.plot([], [], cm[1], alpha=1.0)[0]
e_loc_point_handle = ax_map.plot([], [], cm[1], label='Actual Loc.',
marker='.', ms=12, alpha=1.0, ls='none')[0]
failed_handle = ax_map.plot([], [], 'kx')[0]
handles, labels = ax_map.get_legend_handles_labels()
ax_map.legend([ref_img] + handles + [failed_handle],
[ref_imgs_label] + labels +['Noisy Acts ( 0)'],
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
# ax_map.legend([ref_img] + handles + [failed_handle, rel_mem_handle],
# [ref_imgs_label] + labels +['Noisy Actions (0)', 'Rel. Mem.'],
# loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
ax_teacher.text(.5, 1., 'Actual Image on Path Plan\n(Vis Only)',
verticalalignment='top', horizontalalignment='center', transform=ax_teacher.transAxes,
fontdict={'fontsize': 10, 'color': 'red'}, bbox=dict(facecolor='white', alpha=0.9, lw=0))
ax_synth.text(.5, 1., 'Relevant Visual Memory',
verticalalignment='top', horizontalalignment='center', transform=ax_synth.transAxes,
fontdict={'fontsize': 10, 'color': 'red'}, bbox=dict(facecolor='white', alpha=0.9, lw=0))
ax_view.text(0., 1., "Robot's View",
verticalalignment='top', horizontalalignment='left', transform=ax_view.transAxes,
fontdict={'fontsize': 20, 'color': 'red'}, bbox=dict(facecolor='white', alpha=0.9, lw=0))
view_text_handle = ax_view.text(1., 1., 't = {:2d}, $\eta_t$ = {:4.1f}'.format(0, 0),
verticalalignment='top', horizontalalignment='right', transform=ax_view.transAxes,
fontdict={'fontsize': 20, 'color': 'red'}, bbox=dict(facecolor='white', alpha=0.9, lw=0))
writer.grab_frame(**{'facecolor':'black'})
writer.grab_frame(**{'facecolor':'black'})
logging.error('%d', action_status.shape[0])
for j in range(1+action_status.shape[0]):
# reset axes and figures
p = int(np.round(pointer_i[j]))
o_loc_handle.set_data(o_loc[:(p+1),0], o_loc[:(p+1),1])
e_loc_handle.set_data(e_loc[:(j+1),0]-offset, e_loc[:(j+1),1]-offset)
e_loc_point_handle.set_data(e_loc[j,0]-offset, e_loc[j,1]-offset)
failed_actions = np.where(np.invert(action_status[:j]))[0]
failed_handle.set_data(e_loc[failed_actions,0]-offset, e_loc[failed_actions,1]-offset)
rel_mem_handle = None
if p <= match_ind.shape[1]-1:
k = match_ind[i,p]; s = 6; t = m_theta[k,0];
rel_mem_handle = ax_map.arrow(m_loc[k,0], m_loc[k,1], s*np.cos(t), s*np.sin(t),
head_width=4, head_length=4, fc='m', ec='m', alpha=1.0, width=1.)
ax_synth.imshow(mapping_view[0,match_ind[i,p],:,:,:].astype(np.uint8))
ax_teacher.imshow(teacher_views[0,p,:,:,:].astype(np.uint8))
o_loc_point_handle.set_data(o_loc[p,0], o_loc[p,1])
else:
ax_synth.imshow(np.zeros((1,1,3), dtype=np.uint8))
ax_teacher.imshow(np.zeros((1,1,3), dtype=np.uint8))
noise_action_str = 'Noisy Acts ({:2d})'.format(np.sum(np.invert(action_status[:j])))
ax_map.legend([ref_img] + handles + [failed_handle, rel_mem_handle],
[ref_imgs_label] + labels +[noise_action_str, 'Rel. Mem.'],
loc=map_legend_loc, bbox_to_anchor=map_legend_bbox_to_anchor)
view_text_handle.set_text('t = {:2d}, $\eta_t$ = {:4.1f}'.format(j, pointer_i[j]))
ax_view.imshow(view[i,j,:,:,:,0].astype(np.uint8))
writer.grab_frame(**{'facecolor':'black'})
if rel_mem_handle is not None:
rel_mem_handle.remove()
plt.close()
def make_vis_paper(self, out_dir, suffix='', prefix='', pointer=None, map_wt=None):
"""Visualizes the optimal and executed trajectories, action failure and the
steps."""
min_size = 12
# Make a plot of the episode for environments in this batch.
cd_prec, cd_recall, gt_dist = self._comptue_chamfer_distance()
map_id_samples = self.task.map_id_samples
full_view = self.task.env.task.scaled_views[0]
vs = self.task.env.task_params.view_scales[0]
step_size = self.task.env.task_params.step_size
env = self.task.env
plt.style.use('fivethirtyeight')
for i in range(self.task_params.batch_size):
fig = plt.figure(figsize=(6,10));
gs = gridspec.GridSpec(5,3)
ax = plt.subplot(gs[:3, :3])
ax1 = plt.subplot(gs[3, :3])
ax2 = plt.subplot(gs[4, :3])
# Plot 1 with the trajectory on the map.
ax.imshow(full_view, alpha=0.6, origin='lower')
all_locs = []
optimal = self.task.paths[i]
executed = env.episodes[i].states
offset = 1
for j, (states, label) in enumerate(zip([optimal, executed], ['planned', 'executed'])):
loc, _, _, theta = env.get_loc_axis(np.array(states).astype(np.int32))
loc = loc*vs;
loc = loc - j*offset
ax.plot(loc[:,0], loc[:,1], label=label);
all_locs.append(loc)
if j == 0:
ax.plot(loc[0,0], loc[0,1], 'm.', ms=20)
ax.plot(loc[-1,0], loc[-1,1], 'm*', ms=20)
if j == 1:
# Plot where it got stuck.
action_status = np.array(env.episodes[i].action_status)
failed_actions = np.where(np.invert(action_status))[0]
ax.plot(loc[failed_actions,0], loc[failed_actions,1], 'kx', label='failed')
ax.legend()
all_locs = np.concatenate(all_locs, axis=0)
min_ = np.min(all_locs, axis=0)
max_ = np.max(all_locs, axis=0)
mid_ = (min_+max_)/2.
sz = np.maximum(1.2*np.max(max_-min_)/2., min_size)
ax.set_xlim([mid_[0]-sz, mid_[0]+sz])
ax.set_ylim([mid_[1]-sz, mid_[1]+sz])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if map_id_samples is not None:
map_loc, _, _, theta = env.get_loc_axis(np.array(map_id_samples[i,:]).astype(np.int32))
map_loc = map_loc*vs
for k in range(map_loc.shape[0]):
s = 4; t = theta[k,0]
ax.arrow(map_loc[k,0], map_loc[k,1], s*np.cos(t), s*np.sin(t),
head_width=2, head_length=2, fc='g', ec='g', alpha=0.2)
# Plot 2 with the executed actions.
ax = ax1
teacher_actions = np.array(self.task.actions[i])
t_ = ax.plot(teacher_actions, 'g.-', label='planned')
executed_actions = np.array(env.episodes[i].executed_actions)
e_ = ax.plot(executed_actions-0.2, 'b.-', alpha=0.5, label='executed')
action_status = np.array(env.episodes[i].action_status)
failed_actions = np.where(np.invert(action_status))[0]
nn = len(teacher_actions)
ax.plot(failed_actions, executed_actions[failed_actions]-0.2, 'kx', label='failed')
ax.set_ylim([-0.25, 3.2])
ax.get_yaxis().set_ticks([0,1,2,3])
ax.get_yaxis().set_ticklabels(['Stay', 'Left', 'Right', 'Forward'])
ax.get_xaxis().set_ticks(np.arange(0, nn, nn/10))
ax.legend()
ax.axhline(-0.2, color='k')
ax = ax2
pointer_i = pointer[i,:]*1
pointer_i[1:] = pointer_i[1:] - pointer_i[:-1]
ax.bar(np.arange(len(pointer_i)), height=pointer_i)
ax.get_xaxis().set_ticks(np.arange(0, nn, nn/10))
ax.set_ylabel('$\eta$')
ax.axhline(0.0, color='k')
# ax.axvline(-0.5, color='k')
out_file_name = os.path.join(out_dir, '{:s}env_vis{:s}_{:02d}.png'.format(prefix, suffix, i))
fig.savefig(out_file_name, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
def save_vis(self, out_dir, suffix='', prefix=''):
student_states = np.concatenate([e.states for e in self.task.env.episodes], 0)[np.newaxis,:,:]
teacher_states = np.concatenate(self.task.paths, 0)[np.newaxis,...]
return [[student_states, teacher_states]]
def make_vis(self, out_dir, suffix='', prefix=''):
min_size = 12
# Make a plot of the episode for environments in this batch.
cd_prec, cd_recall, gt_dist = self._comptue_chamfer_distance()
map_id_samples = None #self.task.map_id_samples
fig, _, axes = utils.subplot2(plt, (2, self.task_params.batch_size), (5,5))
full_view = self.task.env.task.scaled_views[0]
vs = self.task.env.task_params.view_scales[0]
step_size = self.task.env.task_params.step_size
env = self.task.env
full_view_file = os.path.join(out_dir, 'full_view.png')
if not os.path.exists(full_view_file):
cv2.imwrite(full_view_file, full_view)
for i in range(self.task_params.batch_size):
ax = axes.pop()
# Plot 1 with the trajectory on the map.
ax.imshow(full_view, alpha=0.6, origin='lower')
if map_id_samples is not None:
map_loc = env.get_loc_axis(np.array(map_id_samples[i,:]).astype(np.int32))[0]
map_loc = map_loc*vs
ax.plot(map_loc[:,0], map_loc[:,1], 'g*', alpha=0.5)
all_locs = []
for j, (states, cmap, sz, m, lw) in enumerate(zip(
[self.task.paths[i], env.episodes[i].states],
['copper', 'cool'], [40, 10], ['o', 'o'], [0, 0])):
loc = env.get_loc_axis(np.array(states).astype(np.int32))[0]
loc = loc*vs
loc = loc[:,::-1]*1
ax.scatter(loc[:,0], loc[:,1], c=np.arange(loc.shape[0]), s=sz,
cmap=cmap, marker=m, edgecolor='k', lw=lw)
all_locs.append(loc)
all_locs = np.concatenate(all_locs, axis=0)
min_ = np.min(all_locs, axis=0)
max_ = np.max(all_locs, axis=0)
mid_ = (min_+max_)/2.
sz = np.maximum(1.2*np.max(max_-min_)/2., min_size)
ax.set_xlim([mid_[0]-sz, mid_[0]+sz])
ax.set_ylim([mid_[1]-sz, mid_[1]+sz])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title('pre: {:0.2f}, rec: {:0.2f}, dist: {:0.2f}'.format(
cd_prec[i,0,0], cd_recall[i,0,0], gt_dist[i,0,0]))
# Plot 2 with the executed actions.
ax = axes.pop()
teacher_actions = np.array(self.task.actions[i])
t_ = ax.plot(teacher_actions, 'g.-', label='teacher')
executed_actions = np.array(env.episodes[i].executed_actions)
e_ = ax.plot(executed_actions-0.1, 'b.-', alpha=0.5, label='student')
ax.set_ylim([-0.2, 3.2])
ax.get_yaxis().set_ticks([0,1,2,3])
ax.legend() #[t_, e_])
out_file_name = os.path.join(out_dir,
'{:s}env_vis{:s}.png'.format(prefix, suffix))
fig.savefig(out_file_name, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
def test_follower_noise_continuous():
from env import factory
d = factory.get_dataset('campus', 'small')
name = d.get_imset()[0]
logging.error(name)
top_view_param = get_top_view_discrete_env_task_params(
prob_random=0.2, batch_size=4, map_max_size=200, step_size=1)
e = TopViewDiscreteEnv(dataset=d, name='small', task_params=top_view_param,
flip=False, rng=np.random.RandomState(0))
follower_task_param = get_follower_task_params(batch_size=4, min_dist=4,
max_dist=20, path_length=40, num_waypoints=8)
f = Follower(e, follower_task_param)
rng = np.random.RandomState(0)
init_states = f.reset(rng)
f.get_common_data()
states = init_states
for i in range(20):
feats = f.get_features(states)
logging.error('%s', feats.keys())
acts = f.get_optimal_action(states, 0)
gt_actions = f.get_targets(states, 0)
acts = np.argmax(acts, axis=1)
states, reward = f.take_action(states, acts)
logging.error('%s, %s', str(acts), str(states))
f.make_vis('tmp', '_1000_test')
def test_follower():
from env import factory
d = factory.get_dataset('campus', 'small')
name = d.get_imset()[0]
logging.error(name)
top_view_param = get_top_view_discrete_env_task_params(
prob_random=0.2, batch_size=4, map_max_size=200, step_size=1)
e = TopViewDiscreteEnv(dataset=d, name='small', task_params=top_view_param,
flip=False, rng=np.random.RandomState(0))
follower_task_param = get_follower_task_params(batch_size=4, min_dist=4,
max_dist=20, path_length=40, num_waypoints=8)
f = Follower(e, follower_task_param)
rng = np.random.RandomState(0)
init_states = f.reset(rng)
f.get_common_data()
states = init_states
for i in range(20):
feats = f.get_features(states)
logging.error('%s', feats.keys())
acts = f.get_optimal_action(states, 0)
gt_actions = f.get_targets(states, 0)
acts = np.argmax(acts, axis=1)
states, reward = f.take_action(states, acts)
logging.error('%s, %s', str(acts), str(states))
f.make_vis('tmp', '_1000_test')
def test_discrete_env():
from env import factory
d = factory.get_dataset('campus', 'small')
name = d.get_imset()[0]
logging.error(name)
top_view_param = get_top_view_discrete_env_task_params(
prob_random=0.1, batch_size=4, map_max_size=200, step_size=8)
e = TopViewDiscreteEnv(dataset=d, name='small', task_params=top_view_param,
flip=False, rng=np.random.RandomState(0))
# Try to take random actions inside this thing.
rng = np.random.RandomState(0)
init_states = e.reset(rng)
locs = []
states = init_states
for i in range(20):
states = e.take_action(states, [3,3,3,3])
loc, _, _, _ = e.get_loc_axis(states)
locs.append(loc)
print(np.array(locs)[:,0,:])
def test_discrete_env_noise():
from env import factory
import matplotlib.pyplot as plt
fig, _, axes = utils.subplot2(plt, (1,4))
d = factory.get_dataset('campus', 'small')
name = d.get_imset()[0]
logging.error(name)
for n in [0., 0.1, 0.2, 0.5]:
top_view_param = get_top_view_discrete_env_task_params(
prob_random=n, batch_size=32, map_max_size=200)
e = TopViewDiscreteEnv(dataset=d, name='small', task_params=top_view_param,
flip=False, rng=np.random.RandomState(0))
# Try to take random actions inside this thing.
rng = np.random.RandomState(0)
init_states = e.reset(rng)
locs = []
states = [init_states[0] for _ in init_states]
# states = init_states
actions = np.ones((20,), dtype=np.uint8)*3
actions[5] = 1; actions[15] = 1
for i in range(20):
loc, _, _, _ = e.get_loc_axis(states)
states = e.take_action(states, [actions[i]]*32)
locs.append(loc)
locs = np.array(locs)*1.
# Plot all these different trajectories and see what they look like
ax = axes.pop()
logging.error('%s', str(locs.shape))
print(locs[0,:,:])
for l in range(locs.shape[1]):
loc = locs[:,l,:]
r = rng.randn()*0.001
ax.plot(r + loc[:,0], r + loc[:,1])
ax.plot(loc[0,0], loc[0,1], 'rx')
ax.plot(loc[-1,0], loc[-1,1], 'gx')
plt.savefig('/tmp/sgupta-tmp-a.png')
plt.close(fig)
def test_follower_2():
from env import factory
d = factory.get_dataset('campus', 'mnist1')
name = d.get_imset()[0]
logging.error(name)
top_view_param = get_top_view_discrete_env_task_params(
prob_random=0.2, batch_size=4, view_scales=[0.125], fovs=[64],
base_resolution=1.0, step_size=128, top_view=True)
e = TopViewDiscreteEnv(dataset=d, name=name, task_params=top_view_param,
flip=False, rng=np.random.RandomState(0))
follower_task_param = get_follower_task_params(
batch_size=4, min_dist=4, max_dist=20, path_length=40,
num_waypoints=8, typ='U')
f = Follower(e, follower_task_param)
rng = np.random.RandomState(0)
init_states = f.reset(rng)
common_data = f.get_common_data()
states = init_states
feats = []
for i in range(80):
feats.append(f.get_features(states))
acts = f.get_optimal_action(states, 0)
gt_actions = f.get_targets(states, 0)
acts = np.argmax(acts, axis=1)
states, reward = f.take_action(states, acts)
| [
"ashish_kumar@berkeley.edu"
] | ashish_kumar@berkeley.edu |
9d271bad43590d0385529bc485e0fd4d18fa1faf | e38f7b5d46fd8a65c15e49488fc075e5c62943c9 | /pychron/core/ui/qt/custom_label_editor.py | af48d20e9e521f5da679a7aaf6a049248224d552 | [
"Apache-2.0"
] | permissive | INGPAN/pychron | 3e13f9d15667e62c347f5b40af366096ee41c051 | 8592f9fc722f037a61b0b783d587633e22f11f2f | refs/heads/master | 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | #===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from traits.etsconfig.etsconfig import ETSConfig
ETSConfig.toolkit = 'qt4'
#============= enthought library imports =======================
from traits.api import HasTraits, Str, Int, Color, \
Button, Any, Instance, on_trait_change
from traitsui.api import View, UItem
from traitsui.qt4.editor import Editor
from traitsui.basic_editor_factory import BasicEditorFactory
#============= standard library imports ========================
import random
from PySide.QtGui import QLabel
#============= local library imports ==========================
class _CustomLabelEditor(Editor):
# txtctrl = Any
color = Any
bgcolor = Any
weight = Any
text_size = Any
def init(self, parent):
self.control = self._create_control(parent)
# self.item.on_trait_change(self._set_color, 'color')
self.sync_value(self.factory.color, 'color', mode='from')
self.sync_value(self.factory.bgcolor, 'bgcolor', mode='from')
self.sync_value(self.factory.weight, 'weight', mode='from')
self.sync_value(self.factory.text_size, 'text_size', mode='from')
@on_trait_change('color, bgcolor, weight, text_size')
def _update_style(self):
self._set_style()
def _set_style(self, control=None,
color=None, bgcolor=None,
size=None, weight=None):
if control is None:
control = self.control
if color is None:
color = self.color.name()
if bgcolor is None:
if self.bgcolor is None:
bgcolor = 'transparent'
else:
bgcolor = self.bgcolor.name()
if size is None:
size = self.text_size
if not size:
size = self.item.size
if weight is None:
weight = self.weight
if not weight:
weight = self.item.weight
css = '''QLabel {{color:{};
background-color:{};
font-size:{}px;
font-weight:{};}}
'''.format(color,
bgcolor,
size,
weight)
control.setStyleSheet(css)
def update_editor(self):
if self.control:
# print self.object, self.value
if isinstance(self.value, (str, int, float, long, unicode)):
self.control.setText(str(self.value))
# self.control.SetLabel(self.value)
def _create_control(self, parent):
control = QLabel()
color = self.item.color.name()
self._set_style(color=color,
control=control)
control.setMargin(5)
parent.setSpacing(0)
return control
class CustomLabelEditor(BasicEditorFactory):
klass = _CustomLabelEditor
color = Str
bgcolor = Str
weight = Str
text_size = Str
class CustomLabel(UItem):
editor = Instance(CustomLabelEditor, ())
size = Int(12)
size_name = Str
color = Color('black')
color_name = Str
bgcolor = Color('transparent')
bgcolor_name = Str
weight = Str('normal')
top_padding = Int(5)
bottom_padding = Int(5)
left_padding = Int(5)
right_padding = Int(5)
def _size_name_changed(self):
self.editor.text_size = self.size_name
def _color_name_changed(self):
self.editor.color = self.color_name
def _bgcolor_name_changed(self):
self.editor.bgcolor = self.bgcolor_name
#===============================================================================
# demo
#===============================================================================
class Demo(HasTraits):
a = Str('asdfsdf')
foo = Button
color = Color('blue')
bgcolor = Color('green')
cnt = 0
size = Int(12)
def _foo_fired(self):
self.a = 'fffff {}'.format(random.random())
if self.cnt % 2 == 0:
self.color = 'red'
self.bgcolor = 'blue'
else:
self.bgcolor = 'red'
self.color = 'blue'
self.cnt += 1
def traits_view(self):
v = View(
UItem('size'),
'foo',
CustomLabel('a',
# color='blue',
size=24,
size_name='size',
top_padding=10,
left_padding=10,
color_name='color',
bgcolor_name='bgcolor'
),
resizable=True,
width=400,
height=100)
return v
if __name__ == '__main__':
d = Demo()
d.configure_traits()
#============= EOF =============================================
# css = '''QLabel {{ color:{}; font-size:{}px; font-weight:{};}}
# # '''.format(self.item.color.name(), self.item.size, self.item.weight)
# control.setStyleSheet(css)
# control.setAlignment(Qt.AlignCenter)
# control.setGeometry(0, 0, self.item.width, self.item.height)
# vbox = QVBoxLayout()
# vbox.setSpacing(0)
# hbox = QHBoxLayout()
# hbox.addLayout(vbox)
# parent.addLayout(vbox)
# print vbox.getContentsMargins()
# vbox.setContentsMargins(5, 5, 5, 5)
# vbox.setSpacing(-1)
# vbox.addSpacing(5)
# vbox.addSpacing(10)
# vbox.addWidget(control)
# vbox.addSpacing(5)
# vbox.addStretch()
# vbox.setSpacing(-1)
# vbox.setMargin(10)
# control.setLayout(vbox)
# parent.addWidget(control) | [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
3a55c09604726ec3ec95a699a4a3521b11473c76 | e9e8c200cb712e68b597875d3b860d0893074f4f | /utility_bills/migrations/0005_auto_20210214_2350.py | f8c859fd946d48f45c54b583ae546d41b8a2d564 | [] | no_license | noolfon/utility_bills | c31fd475dc0a2d1546efb6880dad44dcadc66597 | da919e0d50baaf1190be858b1382ec138e367c19 | refs/heads/master | 2023-03-07T10:49:45.166608 | 2021-02-22T18:14:34 | 2021-02-22T18:14:34 | 340,852,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 3.1.6 on 2021-02-14 20:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('utility_bills', '0004_auto_20210214_2104'),
]
operations = [
migrations.AlterModelOptions(
name='payments',
options={'ordering': ['date_create'], 'verbose_name': 'Платеж', 'verbose_name_plural': 'Платежи'},
),
]
| [
"noolfon@yandex.ru"
] | noolfon@yandex.ru |
df62bf280fad0e420569d63af99d4c8a66b7b79c | 9e749ebdcbf0554e895c97e641fd858c0e1ddd5a | /fintech_startup/payment/migrations/0001_initial.py | 93af8c02d1be35a462dbd74e384878ba3b7fd247 | [] | no_license | ericovva/python-test-account-payments | f1b7a0e4b3e611bae14b1b0e5d3638277ea06f08 | 603ce8626434f2093a61d87e1215bd2bd13bc4b9 | refs/heads/master | 2020-05-03T22:52:52.969647 | 2019-04-04T17:18:48 | 2019-04-04T17:18:48 | 178,852,301 | 0 | 1 | null | 2021-02-14T21:32:19 | 2019-04-01T11:51:05 | JavaScript | UTF-8 | Python | false | false | 1,079 | py | # Generated by Django 2.2 on 2019-04-04 17:13
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=100)),
('direction', models.IntegerField(choices=[(-1, 'outgoing'), (1, 'incoming')])),
('tr_hash', models.UUIDField(default=uuid.UUID('82d65f38-b5c3-40e7-a0fa-1c75f49a793d'))),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accounts', to='account.Account')),
('to_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_accounts', to='account.Account')),
],
),
]
| [
"e.ovsepyan@eovsepyan.local"
] | e.ovsepyan@eovsepyan.local |
d9a938d0a99764aa8df2b3e6a4fa0134d103ce25 | eb96fbb3b234eb9a555592795092a27057c3b97e | /tests/tests_redis.py | e536f2bff2bd7e4dcc0a53532bee84478de5985f | [] | no_license | ronething/snap-server | 9a32e175ca9d1d45d45c0b146b90e8b3ba2af340 | 20350e8e1c8a847d23acce33396acd4317ceab42 | refs/heads/master | 2020-11-29T01:42:06.272141 | 2019-12-24T17:45:09 | 2019-12-24T17:45:09 | 229,980,981 | 0 | 0 | null | 2020-11-24T07:47:18 | 2019-12-24T17:36:19 | Python | UTF-8 | Python | false | false | 610 | py | # -*- coding:utf-8 _*-
"""
@author: ronething
@time: 2019-01-27 11:10
@mail: axingfly@gmail.com
Less is more.
"""
from app.utils.conn_redis import ConnRedis
from app.utils.get_config import get_config
import time
snap_config = get_config()
redis_cli = ConnRedis(snap_config=snap_config)
if __name__ == '__main__':
print(redis_cli.set_value("jiaoyixia", "ronething"))
time.sleep(1)
print(redis_cli.get_value("jiaoyixia")) # ronething
time.sleep(1)
print(redis_cli.get_value_and_del("jiaoyixia")) # ronething
time.sleep(1)
print(redis_cli.get_value("jiaoyixia")) # None
| [
"axingfly@gmail.com"
] | axingfly@gmail.com |
dff0712580958054035ccdbe31de97a3e8ae818f | fb43bcc0a95d88e23d425dd3b8eb5aecfdaa2e29 | /Classification/SVMClassifier.py | be9ea0b9cb9913d2dcdbced45ab1a33e4af4be98 | [] | no_license | sarathspillai11/All_Machine_Learning_Algorithms | 6f4dc43e8c71dce1497528a6e3a1a9e1d325cf98 | 06223d024ece64f8a1413ff9ad721969a1658ca6 | refs/heads/master | 2023-05-15T02:32:28.189317 | 2021-06-05T03:01:25 | 2021-06-05T03:01:25 | 373,810,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py |
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
# Fitting SVM to the Training set
from sklearn.svm import SVC
from savemodel import saveas_sav
def svmClassification(dataframe,x_train, y_train, x_test,ticketId,cList,gammaList,kernelList):
if(len(cList) == 0):
cList = [0.5]
if (len(gammaList) == 0):
gammaList = [0.1]
if (len(kernelList) == 0):
kernelList = ['rbf']
#param_grid = {'C': [0.1, 0.5, 1, 1.5, 10], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001], 'kernel': ['rbf']}
param_grid = {'C': cList, 'gamma': gammaList, 'kernel': kernelList}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=3)
grid.fit(x_train, y_train)
print('best parameters :')
print(grid.best_params_)
print('how model looks after tuning :')
print(grid.best_estimator_)
# classifier = LinearSVC(C=0.5, class_weight=None, dual=True, fit_intercept=True,
# intercept_scaling=1, loss='squared_hinge', max_iter=10000,
# multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
# verbose=0)
classifier = grid.best_estimator_
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(x_test)
dataframe['predicted'] = y_pred
saveas_sav(classifier, 'svm_' + ticketId + '.sav')
return dataframe
# if __name__ == '__main__':
# data = pd.read_csv(r"D:\Personal\SmartIT\data\BankNote_Authentication.csv")
# # test = pd.read_excel(r"D:\Personal\SmartIT\data\hematological malignancies bayesian.xls",sheet_name='BCCA')
#
# trainingColumns = (list(data.columns))[:-2]
# print('training col :', trainingColumns)
# outputColumn = (list(data.columns))[-1]
# print('output column :', outputColumn)
# X = data.iloc[:, :-1]
# y = data.iloc[:, -1]
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
#
# dataframe = svmClassification(X_test, X_train, y_train, X_test, '123', cList = [] , gammaList = [], kernelList= [])
#
# from sklearn.metrics import confusion_matrix
# from sklearn.metrics import accuracy_score
# from sklearn.metrics import classification_report
#
# y_pred = dataframe['predicted']
#
# cm = confusion_matrix(y_test, y_pred)
# # Accuracy
#
# accuracy = accuracy_score(y_test, y_pred)
#
# print(cm)
#
# print(accuracy)
#
# print(classification_report(y_test, y_pred))
| [
"sarathspillai11@gmail.com"
] | sarathspillai11@gmail.com |
3cb486ec73cbcd31ee29a67e9891a012efe50a48 | 13d3f1f4508e0c74f9db7d8fd418a7bcfa954bc9 | /preprocess.py | 94db9b867d8416c0c60dc1a4ec2635019b91487e | [
"MIT"
] | permissive | Simenglv/HetGT | 03e026623730fb4aea1e3662b486578c1b3b2386 | 8f29ffa86a40b09261092726b87608661139eec0 | refs/heads/master | 2023-01-10T19:27:54.082689 | 2020-11-07T07:22:37 | 2020-11-07T07:22:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,705 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pre-process Data / features files and build vocabulary
"""
import codecs
import glob
import sys
import gc
import torch
from functools import partial
from collections import Counter, defaultdict
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import split_corpus
import onmt.inputters as inputters
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import _build_fields_vocab,\
_load_vocab
def check_existing_pt_files(opt):
""" Check if there are existing .pt files to avoid overwriting them """
pattern = opt.save_data + '.{}*.pt'
for t in ['train', 'valid']:
path = pattern.format(t)
if glob.glob(path):
sys.stderr.write("Please backup existing pt files: %s, "
"to avoid overwriting them!\n" % path)
sys.exit(1)
def build_save_dataset(corpus_type, fields, src_reader, tgt_reader, grh_reader, opt):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
counters = defaultdict(Counter)
srcs = opt.train_src
tgts = opt.train_tgt
grhs = opt.train_graph
ids = opt.train_ids
else:
srcs = [opt.valid_src]
tgts = [opt.valid_tgt]
grhs = [opt.valid_graph]
ids = [None]
for src, tgt, grh, maybe_id in zip(srcs, tgts, grhs, ids):
logger.info("Reading source&target and edge files: %s %s %s." % (src, tgt, grh))
src_shards = split_corpus(src, opt.shard_size)
tgt_shards = split_corpus(tgt, opt.shard_size)
grh_shards = split_corpus(grh, opt.shard_size)
shard_pairs = zip(src_shards, tgt_shards, grh_shards)
dataset_paths = []
if (corpus_type == "train" or opt.filter_valid) and tgt is not None:
filter_pred = partial(
inputters.filter_example, use_src_len=opt.data_type == "text",
max_src_len=opt.src_seq_length, max_tgt_len=opt.tgt_seq_length)
else:
filter_pred = None
if corpus_type == "train":
existing_fields = None
if opt.src_vocab != "":
try:
logger.info("Using existing vocabulary...")
existing_fields = torch.load(opt.src_vocab)
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
src_vocab, src_vocab_size = _load_vocab(
opt.src_vocab, "src", counters,
opt.src_words_min_frequency)
else:
src_vocab = None
if opt.tgt_vocab != "":
tgt_vocab, tgt_vocab_size = _load_vocab(
opt.tgt_vocab, "tgt", counters,
opt.tgt_words_min_frequency)
else:
tgt_vocab = None
for i, (src_shard, tgt_shard, grh_shard) in enumerate(shard_pairs):
assert len(src_shard) == len(tgt_shard)
logger.info("Building shard %d." % i)
dataset = inputters.Dataset(
fields,
readers=([src_reader, tgt_reader, grh_reader]
if tgt_reader else [src_reader, grh_reader]),
data=([("src", src_shard), ("tgt", tgt_shard), ('grh', grh_shard)]
if tgt_reader else [("src", src_shard), ('grh', grh_shard)]),
dirs=([opt.src_dir, None, None] # Cannot use _dir with TextDataReader
if tgt_reader else [opt.src_dir, None]),
sort_key=inputters.str2sortkey[opt.data_type],
filter_pred=filter_pred
)
if corpus_type == "train" and existing_fields is None:
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and
src_vocab is not None) or \
(sub_n == 'tgt' and
tgt_vocab is not None)
if (hasattr(sub_f, 'sequential')
and sub_f.sequential and not has_vocab):
val = fd
counters[sub_n].update(val)
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
data_path = "{:s}.{:s}.{:d}.pt".\
format(opt.save_data, shard_base, i)
dataset_paths.append(data_path)
logger.info(" * saving %sth %s data shard to %s."
% (i, shard_base, data_path))
dataset.save(data_path)
del dataset.examples
gc.collect()
del dataset
gc.collect()
if corpus_type == "train":
vocab_path = opt.save_data + '.vocab.pt'
if existing_fields is None:
fields = _build_fields_vocab(
fields, counters, opt.data_type,
opt.share_vocab, opt.vocab_size_multiple,
opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab_size, opt.tgt_words_min_frequency,
opt.shared_vocab_size)
else:
fields = existing_fields
torch.save(fields, vocab_path)
def build_save_vocab(train_dataset, fields, opt):
fields = inputters.build_vocab(
train_dataset, fields, opt.data_type, opt.share_vocab,
opt.src_vocab, opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab, opt.tgt_vocab_size, opt.tgt_words_min_frequency,
vocab_size_multiple=opt.vocab_size_multiple
)
vocab_path = opt.save_data + '.vocab.pt'
torch.save(fields, vocab_path)
def count_features(path):
"""
path: location of a corpus file with whitespace-delimited tokens and
│-delimited features within the token
returns: the number of features in the dataset
"""
with codecs.open(path, "r", "utf-8") as f:
first_tok = f.readline().split(None, 1)[0]
return len(first_tok.split(u"│")) - 1
def main(opt):
ArgumentParser.validate_preprocess_args(opt)
torch.manual_seed(opt.seed)
if not(opt.overwrite):
check_existing_pt_files(opt)
init_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = 0
tgt_nfeats = 0
for src, tgt in zip(opt.train_src, opt.train_tgt):
src_nfeats += count_features(src) if opt.data_type == 'text' \
else 0
tgt_nfeats += count_features(tgt) # tgt always text so far
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(
opt.data_type,
src_nfeats,
tgt_nfeats,
dynamic_dict=opt.dynamic_dict,
src_truncate=opt.src_seq_length_trunc,
tgt_truncate=opt.tgt_seq_length_trunc,
edges_vocab=opt.edges_vocab)
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
grh_reader = inputters.str2reader["grh"].from_opt(opt)
logger.info("Building & saving training data...")
build_save_dataset(
'train', fields, src_reader, tgt_reader, grh_reader, opt)
if opt.valid_src and opt.valid_tgt:
logger.info("Building & saving validation data...")
build_save_dataset('valid', fields, src_reader, tgt_reader, grh_reader, opt)
def _get_parser():
parser = ArgumentParser(description='preprocess.py')
opts.config_opts(parser)
opts.preprocess_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
opt = parser.parse_args()
main(opt)
| [
"noreply@github.com"
] | noreply@github.com |
46605b75b0b45618bf617cab05f1b68b03b6d0c9 | 9833f3a70637d4e3cb848423c9bd34694dab2bd7 | /formularios.py | 53bfc976c5ebe50f298fd5500c1d0f5d349b375a | [] | no_license | AlemanLucas/LucasAleman | 970ce1fe9c7ac0028629d9d8c608e55a73715fcd | 6be8c84791c9f9af9646621901e91c145db247d4 | refs/heads/master | 2021-08-27T20:14:54.382953 | 2017-11-28T06:41:39 | 2017-11-28T06:41:39 | 112,295,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, PasswordField
from wtforms.validators import DataRequired,EqualTo,Regexp,Length
#Acá se encuentran todas las clases que se utilizaron para el proyecto.
#Estas clases sirven para validar campos y tener los parametros.
class SearchCliente(FlaskForm):
parametro = StringField('Escriba el Nombre del Cliente que desea buscar: ', validators=[Length(min=3, max=100, message="Debe ingresar como minimo 3 caracteres"),DataRequired(message="Debe escribir valor")])
class SearchProd(FlaskForm):
parametro = StringField('Escriba el Nombre del Producto que desea buscar: ', validators=[Length(min=3, max=100, message="Debe ingresar como minimo 3 caracteres"),DataRequired(message="Debe escribir un valor")])
class SearchCant(FlaskForm):
parametro = StringField('Escriba la Cantidad de Stock que desea buscar: ', validators=[DataRequired(message="Debe escribir un valor"),Regexp(regex="\d+", message="Solo nùmeros enteros por favor")])
class SearchPrecio(FlaskForm):
parametro = StringField('Escriba el Precio que busca: ', validators=[DataRequired(message="Debe escribir un valor"),Regexp(regex="^(\d|-)?(\d|,)*\.?\d*$", message="Ingrese un precio valido")])
#Clases para validar usuarios y contraseñas.
class Checkeo_Log(FlaskForm):
name = StringField('Usuario:', validators=[DataRequired(message="Debe escribir un nombre de usuario")])
password = PasswordField('Contraseña:', validators=[DataRequired(message="Debe escribir una contraseña")])
# Clase para tener el nuevo usuario y checkeo de contraseña..
class CreaUsuario(FlaskForm):
name = StringField('Usuario:', validators=[DataRequired(message="Debe escribir un nombre de usuario")])
pass1 = PasswordField('Contraseña:', validators=[DataRequired(message="Debe escribir una contraseña")])
pass2 = PasswordField('Repita Contraseña:', validators=[DataRequired(message="Debe escribir de nuevo su contraseña"),EqualTo('pass1', message='Las contraseñas deben coincidir')])
| [
"lucas.h.aleman@gmail.com"
] | lucas.h.aleman@gmail.com |
90cde6d32a8640fa14a55510dc8681a377c7fdcc | f15ed6c14059a8fb810f06f68f339ecdbeef67b7 | /map_list.py | 154ff69dcf7a3e65a1ceb85042ccebbefe3b4dba | [] | no_license | Arjunb79/arjun_futura | 2babb6c179afcc263e3457a8409549fd39c6881f | e2cd6150263f3f9b538995c9fef2f5ec20749c17 | refs/heads/main | 2023-01-05T07:47:34.363497 | 2020-11-07T16:20:21 | 2020-11-07T16:20:21 | 310,884,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | keys = ['red', 'green', 'blue']
values = ['amal','gopal', 'manu']
color_dictionary = dict(zip(keys, values))
print(color_dictionary) | [
"arjunb0320@gmail.com"
] | arjunb0320@gmail.com |
547dc2c51cd7e4130340b227393dc5244bbb1f95 | 281eb7745511fc68a6f5342b5fc20af1d0864533 | /Calculations and Plots_Ho Wing Wong.py | 39b725190ecca82fd3aa905ac29f19d5ab0f9399 | [] | no_license | Karagul/Financial_Market_Index_Analysis | d85cdda1124a7fa929df7cdfff3160da33568175 | d3513d4175257e66c7316df79ad3a48e0c229eda | refs/heads/master | 2020-05-25T12:25:58.446864 | 2017-09-20T22:09:24 | 2017-09-20T22:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,879 | py | #==============================================================================
# Lists of stock index values
#
# By Johnny Lin
#
# There are three lists defined here:
# * nasdaq: Close of index at the end of each trading day in trading_days.
# * sp500: Close of index at the end of each trading day in trading_days.
# * djia: Close of index at the end of each trading day in trading_days.
# * trading_days: Number of trading days since Jun 1, 2016. Jun 1 is
# trading day 0.
#
# Values downloaded from http://finance.yahoo.com. Values of all lists are
# daily trading day closings from Jun 1-Aug 31, 2016, inclusive.
#==============================================================================
nasdaq = [4952.25,
4971.359863,
4942.52002,
4968.709961,
4961.75,
4974.640137,
4958.620117,
4894.549805,
4848.439941,
4843.549805,
4834.930176,
4844.919922,
4800.339844,
4837.209961,
4843.759766,
4833.319824,
4910.040039,
4707.97998,
4594.439941,
4691.870117,
4779.25,
4842.669922,
4862.569824,
4822.899902,
4859.160156,
4876.810059,
4956.759766,
4988.640137,
5022.819824,
5005.72998,
5034.060059,
5029.589844,
5055.779785,
5036.370117,
5089.930176,
5073.899902,
5100.160156,
5097.629883,
5110.049805,
5139.810059,
5154.97998,
5162.129883,
5184.200195,
5137.72998,
5159.740234,
5166.25,
5221.120117,
5213.140137,
5225.47998,
5204.580078,
5228.399902,
5232.890137,
5262.02002,
5227.109863,
5228.660156,
5240.149902,
5238.379883,
5244.600098,
5260.080078,
5217.689941,
5212.200195,
5218.919922,
5232.330078,
5222.990234,
5213.220215]
sp500 = [2099.330078,
2105.26001,
2099.129883,
2109.409912,
2112.129883,
2119.120117,
2115.47998,
2096.070068,
2079.060059,
2075.320068,
2071.5,
2077.98999,
2071.219971,
2083.25,
2088.899902,
2085.449951,
2113.320068,
2037.410034,
2000.540039,
2036.089966,
2070.77002,
2098.860107,
2102.949951,
2088.550049,
2099.72998,
2097.899902,
2129.899902,
2137.159912,
2152.139893,
2152.429932,
2163.75,
2161.73999,
2166.889893,
2163.780029,
2173.02002,
2165.169922,
2175.030029,
2168.47998,
2169.179932,
2166.580078,
2170.060059,
2173.600098,
2170.840088,
2157.030029,
2163.790039,
2164.25,
2182.870117,
2180.889893,
2181.73999,
2175.48999,
2185.790039,
2184.050049,
2190.149902,
2178.149902,
2182.219971,
2187.02002,
2183.870117,
2182.639893,
2186.899902,
2175.439941,
2172.469971,
2169.040039,
2180.379883,
2176.120117,
2170.949951]
djia = [17789.669922,
17838.560547,
17807.060547,
17920.330078,
17938.279297,
18005.050781,
17985.189453,
17865.339844,
17732.480469,
17674.820312,
17640.169922,
17733.099609,
17675.160156,
17804.869141,
17829.730469,
17780.830078,
18011.070312,
17400.75,
17140.240234,
17409.720703,
17694.679688,
17929.990234,
17949.369141,
17840.619141,
17918.619141,
17895.880859,
18146.740234,
18226.929688,
18347.669922,
18372.119141,
18506.410156,
18516.550781,
18533.050781,
18559.009766,
18595.029297,
18517.230469,
18570.849609,
18493.060547,
18473.75,
18472.169922,
18456.349609,
18432.240234,
18404.509766,
18313.769531,
18355,
18352.050781,
18543.529297,
18529.289062,
18533.050781,
18495.660156,
18613.519531,
18576.470703,
18636.050781,
18552.019531,
18573.939453,
18597.699219,
18552.570312,
18529.419922,
18547.300781,
18481.480469,
18448.410156,
18395.400391,
18502.990234,
18454.300781,
18400.880859]
trading_days = range(len(nasdaq))
#- Check lengths of all lists are the same: if (len(nasdaq) != len(sp500)) or (len(sp500) != len(djia)):
# raise ValueError, "bad data"
import scipy as sci
import matplotlib.pyplot as plt
#1
naMean = sci.mean(nasdaq)
naStd = sci.std(nasdaq)
spMean = sci.mean(sp500)
spStd = sci.std(sp500)
djMean = sci.mean(djia)
djStd = sci.std(djia)
print('Nasdaq mean= ', naMean)
print('Nasdaq s.d.= ', naStd)
print('Nasdaq s.d./mean=', naStd/naMean)
print('Sp500 mean= ', spMean)
print('spStd= ', spStd)
print('sp500 s.d./mean=', spStd/spMean)
print('djia mean= ', djMean)
print('djia Std= ', djStd)
print('djia s.d./mean=', djStd/djMean)
#2
plt.figure(1)
plt.plot(trading_days, nasdaq, 'r*--')
plt.ylabel('Index')
plt.xlabel('Time')
plt.title('NASDAQ', size = 24.0)
plt.show()
plt.figure(2)
plt.plot(trading_days, sp500, 'b*--')
plt.ylabel('Index')
plt.xlabel('Time')
plt.title('S&P 500 Index', size = 24.0)
plt.show()
plt.figure(3)
plt.plot(trading_days, djia, 'k*--')
plt.ylabel('Index')
plt.xlabel('Time')
plt.title('Dow Jones Industrial Index', size = 24.0)
plt.show()
#3
plt.figure(4)
#plt.plot(trading_days, nasdaq, 'r*-.',label="NASDAQ", trading_days, sp500, 'b*--',label= "S&P 500", trading_days, djia, 'k*-',label= "Dow Jones")
plt.plot(trading_days, nasdaq, 'r*--',label="NASDAQ")
plt.plot(trading_days, sp500, 'b*--',label= "S&P 500")
plt.plot(trading_days, djia, 'k*--',label= "Dow Jones")
plt.axis([0,100, 0, 20000])
plt.ylabel('Index')
plt.xlabel('Time')
plt.title('DJIA, SP500, NASDAQ Combined', size = 20.0)
plt.legend()
plt.show()
#===== end file =====
| [
"louiewhw@gmail.com"
] | louiewhw@gmail.com |
dbee63b54c7ff57f5bd7606d871b0a192dba6f78 | 51c37cc30c8a5bf3d2cb04f8eadde248ea431154 | /meiduo_mall/meiduo_mall/apps/verifications/views.py | 404518200600e01ff319b76fe42d06aa99626718 | [] | no_license | kkx001/meiduo_admin | b548ed8255c73863ad1917719a6ce2b82ac9a75a | 84dc7a76b6253a78e2d8f03e3066f52497c9d32d | refs/heads/main | 2023-02-16T00:51:22.121120 | 2021-01-11T06:51:21 | 2021-01-11T06:51:21 | 328,567,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,733 | py | from django.shortcuts import render
from django.views import View
from .libs.captcha.captcha import captcha
from django_redis import get_redis_connection
from . import constants
from django import http
from django_redis import get_redis_connection
from meiduo_mall.utils.response_code import RETCODE
from verifications.libs.yuntongxun.sms import CCP
from celery_tasks.sms.tasks import send_sms_code
import logging, random
#创建日志输出器
logger = logging.getLogger('django')
class ImageCodeView(View):
"""图形验证码"""
def get(self, request, uuid):
"""
:param request: 请求对象
:param uuid: 唯一标识图形验证码所属用户的id
:return: image/jpg
"""
#生成图片验证码
text, image = captcha.generate_captcha()
#保存图片验证码
redis_conn = get_redis_connection('verify_code')
redis_conn.setex('img_%s' % uuid, constants.IMAGE_CODE_REDIS_EXPIRES, text)
#响应图片验证码
return http.HttpResponse(image, content_type='/image/jpg')
class SMSCodeView(View):
"""短信验证码"""
def get(self, request, mobile):
"""
:param request: 请求对象
:param mobile: 手机号
:return: JSON
"""
#接收参数
image_code_client = request.GET.get('image_code')
uuid = request.GET.get('uuid')
#校验参数
if not all([image_code_client, uuid]):
return http.JsonResponse({'code': RETCODE.NECESSARYPARAMERR, 'errmsg':'缺少必传参数'})
#创建连接到redis
redis_conn = get_redis_connection('verify_code')
#判断用户是否频繁发送短信验证码
send_flag = redis_conn.get('send_flag_%s' % mobile)
if send_flag:
return http.JsonResponse({'code': RETCODE.THROTTLINGERR, 'errmsg': '短信验证码发送过于频繁'})
#提取图形验证码
image_code_server = redis_conn.get('img_%s' % uuid)
if image_code_server is None:
#图形验证码过期或不存在
return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'ermsg': '图形验证码失效'})
#删除图形验证码,避免恶意测试验证码
try:
redis_conn.delete('img_%s' % uuid)
except Exception as e:
logger.error(e)
#对比图形验证码
image_code_server = image_code_server.decode() #bytes转成字符
if image_code_client.lower() != image_code_server.lower(): #转化为小写进行比较
return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'errmsg': '验证码错误'})
#生成短信验证码:生成6位数的短信验证码
sms_code = '%06d' %random.randint(0, 999999)
logger.info(sms_code)
# #保存短信验证码
# redis_conn.setex('sms_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)
# #重新写入send_flag
# redis_conn.setex('send_flag_%s' % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
#创建redis管道
pl = redis_conn.pipeline()
#将redis请求添加到队列中
pl.setex('sms_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)
pl.setex('send_flag_%s' % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
#执行请求
pl.execute()
#发送短信验证码
# ccp = CCP()
# ccp.send_template_sms(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES // 60], constants.SEND_SMS_TEMPLATE_ID)
send_sms_code.delay(mobile, sms_code)
#响应结果
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '短信发送成功', })
| [
"40197330+kkx001@users.noreply.github.com"
] | 40197330+kkx001@users.noreply.github.com |
43988ddab4e642d7ffb5838eee6a82751456edd5 | 2a62d9376e381ed7b08fcc61a99b5dc96bd6f331 | /Ships.py | 4ac52f8cafce2d5b6dbaa2afac9c913bc775d811 | [] | no_license | dexterland99/space-control | 6a6f58b85953019572c30eb9383ae80a9d1b687b | 985fdcb5053654ebab88ce090edf5c4fe14461e2 | refs/heads/master | 2020-03-29T10:34:08.596011 | 2018-10-04T16:48:59 | 2018-10-04T16:48:59 | 149,811,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | '''
Ship Classes
'''
import pygame
class Ship(pygame.sprite.Sprite):
'''
Ship Class
'''
def __init__(self,width,height,screen):
super().__init__()
self.image = pygame.image.load("Ship.png")
self.rect = self.image.get_rect()
| [
"noreply@github.com"
] | noreply@github.com |
37ba1aa162d25931444ba005344100537f2992fa | 4dfc7fc9b84f76d690e33414610bc59a9b07001a | /bds/views/municipality.py | f7a746ad648d09ab470d311aed22e6dbf6fdd580 | [] | no_license | pythondev0101/-j-natividad-web-billing | e62da9ac943a74d2e1e9416d553fd3baafd3937f | 39f7b0d60d86a08d1c5d40cacf9904b28dc2355c | refs/heads/main | 2023-08-28T00:34:43.435674 | 2021-05-24T12:37:54 | 2021-05-24T12:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | from datetime import datetime
from flask import redirect, url_for, request, current_app, flash
from flask_login import current_user, login_required
from app import db
from app.admin.templating import admin_table, admin_edit
from bds import bp_bds
from bds.models import Municipality
from bds.forms import MunicipalityForm, MunicipalityEditForm
@bp_bds.route('/municipalities')
@login_required
def municipalities():
fields = [Municipality.id, Municipality.name, Municipality.description, Municipality.created_at, Municipality.updated_at]
form = MunicipalityForm()
return admin_table(Municipality, fields=fields,form=form,\
create_url='bp_bds.create_municipality', edit_url='bp_bds.edit_municipality')
@bp_bds.route('/municipalities/create', methods=['POST'])
@login_required
def create_municipality():
form = MunicipalityForm()
if not form.validate_on_submit():
for key, value in form.errors.items():
flash(str(key) + str(value), 'error')
return redirect(url_for('bp_bds.municipalities'))
try:
new = Municipality()
new.name = form.name.data
new.description = form.description.data
db.session.add(new)
db.session.commit()
flash('New municipality added successfully!')
except Exception as exc:
flash(str(exc), 'error')
return redirect(url_for('bp_bds.municipalities'))
@bp_bds.route('/municipalities/<int:oid>/edit', methods=['GET', 'POST'])
@login_required
def edit_municipality(oid):
ins = Municipality.query.get_or_404(oid)
form = MunicipalityEditForm(obj=ins)
if request.method == "GET":
return admin_edit(Municipality, form,'bp_bds.edit_municipality', oid, 'bp_bds.municipalities')
if not form.validate_on_submit():
for key, value in form.errors.items():
flash(str(key) + str(value), 'error')
return redirect(url_for('bp_bds.municipalities'))
try:
ins.name = form.name.data
ins.description = form.description.data
ins.updated_at = datetime.now()
ins.updated_by = "{} {}".format(current_user.fname,current_user.lname)
db.session.commit()
flash('Municipality update Successfully!','success')
except Exception as exc:
flash(str(exc),'error')
return redirect(url_for('bp_bds.municipalities'))
| [
"rmontemayor0101@gmail.com"
] | rmontemayor0101@gmail.com |
cb4bec1c5c9f2e7faae17939c77ca7c5189da426 | 7ede001485ce68aebcd185f9f7b01b1196e8900d | /forex/env-python2/lib/python2.7/site-packages/v20/user.py | 32388c147cd659635b7adaa22c68fbd120b48212 | [
"MIT"
] | permissive | phroiland/forex_algos | 971f04ebceb579a761dca6c1184fc14d1e78f9d1 | 055f51e55c52d6dd5cfd38550a48892a0fb09b0d | refs/heads/master | 2023-05-29T00:28:19.350350 | 2022-05-12T21:16:35 | 2022-05-12T21:16:35 | 92,301,496 | 1 | 0 | null | 2023-05-22T20:44:39 | 2017-05-24T14:28:29 | Python | UTF-8 | Python | false | false | 8,342 | py | import ujson as json
from v20.base_entity import BaseEntity
from v20.base_entity import EntityDict
from v20.request import Request
from v20 import spec_properties
class UserInfo(BaseEntity):
"""
A representation of user information, as provided to the user themself.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.user_UserInfo
def __init__(self, **kwargs):
"""
Create a new UserInfo instance
"""
super(UserInfo, self).__init__()
#
# The user-provided username.
#
self.username = kwargs.get("username")
#
# The user's OANDA-assigned user ID.
#
self.userID = kwargs.get("userID")
#
# The country that the user is based in.
#
self.country = kwargs.get("country")
#
# The user's email address.
#
self.emailAddress = kwargs.get("emailAddress")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new UserInfo from a dict (generally from loading a JSON
response). The data used to instantiate the UserInfo is a shallow copy
of the dict passed in, with any complex child types instantiated
appropriately.
"""
data = data.copy()
return UserInfo(**data)
class UserInfoExternal(BaseEntity):
"""
A representation of user information, as available to external (3rd party)
clients.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.user_UserInfoExternal
def __init__(self, **kwargs):
"""
Create a new UserInfoExternal instance
"""
super(UserInfoExternal, self).__init__()
#
# The user's OANDA-assigned user ID.
#
self.userID = kwargs.get("userID")
#
# The country that the user is based in.
#
self.country = kwargs.get("country")
#
# Flag indicating if the the user's Accounts adhere to FIFO execution
# rules.
#
self.FIFO = kwargs.get("FIFO")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new UserInfoExternal from a dict (generally from loading
a JSON response). The data used to instantiate the UserInfoExternal is
a shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
return UserInfoExternal(**data)
class EntitySpec(object):
"""
The user.EntitySpec wraps the user module's type definitions
and API methods so they can be easily accessed through an instance of a v20
Context.
"""
UserInfo = UserInfo
UserInfoExternal = UserInfoExternal
def __init__(self, ctx):
self.ctx = ctx
def get_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfo.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def get_external_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the externally-available user information for the specified user.
This endpoint is intended to be used by 3rd parties that have been
authorized by a user to view their personal information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}/externalInfo'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfoExternal.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
| [
"jon.froiland@gmail.com"
] | jon.froiland@gmail.com |
94bfa0658cfe9f7260b5af4a419628a7876e5df1 | 363da960114de5c1397ff223c5a0c22945e5a924 | /src/streamlit_web_application.py | 5813ce9764f40fc4caf996f2e758b6b6ae4b308d | [] | no_license | saumith/twitter-hashtag-recommendation | 6a1f5d9cfeea280e103749d2e0621a0159252386 | 350c4eb13e0e5b1caf6883ea2348d5ed5c1f9075 | refs/heads/main | 2023-06-29T23:56:24.942026 | 2021-08-07T17:50:22 | 2021-08-07T17:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | import streamlit as st
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import nltk
import tweepy
import re
import pandas as pd
from textblob import TextBlob
import csv
from collections import Counter
from tweet_api import *
from extract_keywords import *
from model import *
import itertools
st.title('Twitter Hashtag Recommendation')
st.sidebar.header("Trending Twitter hashtags (US)")
api = get_api()
trends_US = api.trends_place(23424977)
trends_world = api.trends_place(1)
topic_US = []
topic_World = []
for i in trends_US:
for trend in i['trends']:
topic_US.append(trend['name'])
for i in topic_US:
if i.startswith('#'):
st.sidebar.write(i)
st.sidebar.header("Trending Twitter hashtags (World)")
for i in trends_world:
for trend in i['trends']:
topic_World.append(trend['name'])
for i in topic_World:
if i.startswith('#'):
st.sidebar.write(i)
st.subheader('Write a tweet:')
tweet_input = st.text_input("")
appropriate_hashtag_list = tweet_similarity_model(tweet_input)
if tweet_input != "":
st.subheader('Recommended hashtag using Tweet similarity method:')
recommended_hashtags = " ".join(appropriate_hashtag_list)
st.write(recommended_hashtags)
| [
"dpoudel18@earlham.edu"
] | dpoudel18@earlham.edu |
f20fff98b3f5558fa62679cc38213bfe8b3fd252 | 03271803f9cdb665104788d00d1d2cc4eab27f38 | /views/htmltext.py | 58d52972011c19f7300b640f790b89c0bebc0e90 | [] | no_license | bancaldo/auction3 | 6a89166023fe8585e73c8153da5b6df5cf819089 | ae87e53d6631c84bf33259bdf58a06f090d02dca | refs/heads/master | 2022-12-20T12:09:46.648249 | 2020-10-24T16:02:41 | 2020-10-24T16:02:41 | 306,648,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # -*- coding: utf-8 -*-#
ABOUT = """
<html>
<center><h1>Auction</h1> versione 2.0<br>
by Bancaldo</td></center><br><br>
<b>Auction 2.0</b> is a simple app to manage a 'FantaLeague Auction'.<br>
At the auction End it is possible to export on a csv file<br>
<br>
<b>pacchetti utilizzati:</b><br>
- Python 3.7.1
- wxPython 4.0.3</b> for Graphics<br>
- Django 2.1.3</b> for database and Object Ralation Mapping<br>
<br>
<b>link utili:</b><br>
web-site: www.bancaldo.wordpress.com<br>
web-site: www.bancaldo.altervista.org<br>
<br>
<b>last revision:</b> Nov 26, 2018</p><br>
<b>author:</b> bancaldo
</html>
"""
HELP = """
<html>
<center><b>Auction 2.0</b></center><br><br>
<b>1) Import players</b><br>
Player evaluations file is available on my blog: www.bancaldo.wordpress.com<br>
From 'Players' menu chose 'import Players'<br>
<b>2) Create at least 2 teams</b><br>
From 'Team' menu chose 'New Team'<br>
<b>3) Start auction</b><br>
From 'Auction' menu chose 'New auction'<br>
Use the filter to find the player<br>.
Fill 'cost' field with the final auction value and set the Buyer team.<br>
</html>
"""
| [
"bancaldo@gmail.com"
] | bancaldo@gmail.com |
2ba938b829b5293d85393bdbabcce4e6f8a94016 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/game_control/AOGAS.py | d7b1cef8d85ccd796edb079ec9bcef0a8e803485 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 8,091 | py | # 2017.05.04 15:21:37 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/game_control/AOGAS.py
import time
import weakref
import BigWorld
import Event
from constants import AOGAS_TIME, ACCOUNT_ATTR
from debug_utils import LOG_ERROR, LOG_DEBUG
from enumerations import AttributeEnumItem, Enumeration
from helpers import time_utils
from skeletons.gui.game_control import IAOGASController
TIME_MODIFER = 3600
AOGAS_FORCE_START_NOTIFY = False
_DEFAULT_AOGAS_NOTIFY_TIMEOUT = 5000.0
AOGAS_NOTIFY_MSG = Enumeration('Notification message for Anti-online game addiction system', [('AOND_1', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_2', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_3', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_MORE_3', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_MORE_5', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('RESET', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT})], instance=AttributeEnumItem)
class AOGAS_NOTIFY_TIME(object):
AOND_1 = 1 * TIME_MODIFER - 600
AOND_2 = 2 * TIME_MODIFER - 600
AOND_3 = AOGAS_TIME.REDUCED_GAIN
AOND_5 = AOGAS_TIME.NO_GAIN
class AOGAS_NOTIFY_PERIOD(object):
AOND_START = 5 * TIME_MODIFER / 6
AOND_2_3 = 1 * TIME_MODIFER
AOND_3_5 = 0.5 * TIME_MODIFER
AOND_END = 0.25 * TIME_MODIFER
class AOGASController(IAOGASController):
def __init__(self):
super(AOGASController, self).__init__()
self.onNotifyAccount = Event.Event()
self.__isNotifyAccount = False
self.__lastNotifyMessages = []
self.__aogasStartedAt = 0
self.__isAogasEnabled = True
self.__notificator = _AOGASNotificator(self, '_AOGASController__notifyAccount')
def fini(self):
self.__notificator.stop()
self.onNotifyAccount.clear()
super(AOGASController, self).fini()
def onLobbyStarted(self, ctx):
serverTime = ctx.get('aogasStartedAt')
if serverTime is not None:
self.__aogasStartedAt = time_utils.makeLocalServerTime(serverTime)
else:
self.__aogasStartedAt = time.time()
self.__isAogasEnabled = ctx.get('isAogasEnabled', True)
if not self.__notificator.isStarted():
self.__requestRequiredInfo()
return
def onDisconnected(self):
self.__notificator.stop()
self.__isNotifyAccount = False
self.__lastNotifyMessages = []
def onLobbyInited(self, event):
LOG_DEBUG('enableNotifyAccount ', self.__lastNotifyMessages)
self.__isNotifyAccount = True
for message in self.__lastNotifyMessages:
self.onNotifyAccount(message)
self.__lastNotifyMessages = []
def onAvatarBecomePlayer(self):
LOG_DEBUG('disableNotifyAccount')
self.__isNotifyAccount = False
def __notifyAccount(self, message, collect = False):
if self.__isNotifyAccount:
self.onNotifyAccount(message)
elif collect:
self.__lastNotifyMessages.append(message)
else:
self.__lastNotifyMessages = [message]
def __requestRequiredInfo(self):
BigWorld.player().stats.get('attrs', self.__receiveAccountAttrs)
def __receiveAccountAttrs(self, resultID, attrs):
if resultID < 0:
LOG_ERROR('Server return error: ', resultID, attrs)
return
if self.__isAogasEnabled and ACCOUNT_ATTR.AOGAS & attrs != 0 or AOGAS_FORCE_START_NOTIFY:
BigWorld.player().stats.get('accOnline', self.__receiveAccOnline)
elif self.__notificator.isStarted():
self.__notificator.stop()
def __receiveAccOnline(self, resultID, accOnline):
if resultID < 0:
LOG_ERROR('Server return error: ', resultID, accOnline)
return
if not accOnline:
self.__notifyAccount(AOGAS_NOTIFY_MSG.RESET)
delta = round(time.time() - self.__aogasStartedAt)
AOND = delta + accOnline
LOG_DEBUG('Calculate AOND (seconds,seconds,seconds) : ', AOND, delta, accOnline)
self.__notificator.start(AOND)
class _AOGASNotificator(object):
def __init__(self, scope, function):
self.__scope = weakref.ref(scope)
self.__function = function
self.__started = False
self.__AOND = 0
self.__callbackID = None
return
def start(self, AOND):
if self.__started:
return
self.__started = True
self.__AOND = AOND
notificated = False
if AOND > AOGAS_NOTIFY_TIME.AOND_1:
prevAOND = self.__getPrevNotifyTime(AOND)
self.__doNotify(self.__getNotifyMessages(prevAOND))
notificated = prevAOND == AOND
if notificated:
notifyPeriod = self.__getNotifyPeriod(self.__AOND)
LOG_DEBUG('AOGAS started (seconds,seconds)', self.__AOND, notifyPeriod)
self.__callbackID = BigWorld.callback(notifyPeriod, lambda : self.__notify(notifyPeriod))
else:
notifyTime = self.__getNextNotifyTime(AOND)
nextNotifyDelay = abs(notifyTime - AOND)
LOG_DEBUG('AOGAS started (seconds,seconds,seconds)', self.__AOND, notifyTime, nextNotifyDelay)
self.__callbackID = BigWorld.callback(nextNotifyDelay, lambda : self.__notify(nextNotifyDelay))
def stop(self):
self.__started = False
if self.__callbackID is not None:
BigWorld.cancelCallback(self.__callbackID)
self.__callbackID = None
return
def isStarted(self):
return self.__started
def __getNotifyPeriod(self, AOND):
if AOND < AOGAS_NOTIFY_TIME.AOND_1:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_START
elif AOND < AOGAS_NOTIFY_TIME.AOND_3:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_2_3
elif AOND < AOGAS_NOTIFY_TIME.AOND_5:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_3_5
else:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_END
return notifyPeriod
def __getNextNotifyTime(self, AOND):
notifyTime = 0
while notifyTime < AOND:
notifyPeriod = self.__getNotifyPeriod(notifyTime)
notifyTime += notifyPeriod
return notifyTime
def __getPrevNotifyTime(self, AOND):
notifyTime = 0
notifyPeriod = 0
while notifyTime < AOND:
notifyPeriod = self.__getNotifyPeriod(notifyTime)
notifyTime += notifyPeriod
return notifyTime - notifyPeriod
def __getNotifyMessages(self, AOND):
if AOND == AOGAS_NOTIFY_TIME.AOND_1:
messages = (AOGAS_NOTIFY_MSG.AOND_1,)
elif AOND == AOGAS_NOTIFY_TIME.AOND_2:
messages = (AOGAS_NOTIFY_MSG.AOND_2,)
elif AOND == AOGAS_NOTIFY_TIME.AOND_3:
messages = (AOGAS_NOTIFY_MSG.AOND_3, AOGAS_NOTIFY_MSG.AOND_MORE_3)
elif AOND < AOGAS_NOTIFY_TIME.AOND_5:
messages = (AOGAS_NOTIFY_MSG.AOND_MORE_3,)
else:
messages = (AOGAS_NOTIFY_MSG.AOND_MORE_5,)
return messages
def __doNotify(self, messages):
notifyHandler = getattr(self.__scope(), self.__function, None)
if notifyHandler is not None and callable(notifyHandler):
collect = len(messages) > 1
for message in messages:
notifyHandler(message, collect)
LOG_DEBUG('notify (seconds, message)', self.__AOND, message)
else:
LOG_ERROR('Not found notify handler ', self.__scope(), self.__function)
return
def __notify(self, notifyPeriod):
self.__AOND += notifyPeriod
self.__doNotify(self.__getNotifyMessages(self.__AOND))
notifyPeriod = self.__getNotifyPeriod(self.__AOND)
self.__callbackID = BigWorld.callback(notifyPeriod, lambda : self.__notify(notifyPeriod))
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\game_control\AOGAS.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:21:38 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
327914a84e501df8aa4e30d0ab286a73a37f1b35 | dc75370390e821b857b327100f0d2e9a60f34f89 | /chat/migrations/0001_initial.py | 477def66b7a7e08f361a5435958dcb17e478690a | [] | no_license | DontTouchMyMind/OnlineChat_Udemy | 018e24f6dfe7c1c2d1f37540f219f7b652987666 | 77ee36d89adbf71d07b6f73f9b6757aacabde939 | refs/heads/master | 2023-01-28T16:56:40.070478 | 2020-12-07T11:40:11 | 2020-12-07T11:40:11 | 315,873,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 3.1.3 on 2020-11-25 07:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Online',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100)),
],
),
]
| [
"tobigface@gmail.com"
] | tobigface@gmail.com |
e4b50200cdcfab029ada56611d23bd13fb829714 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/42e441b34ef3b68f657a5e36027aaa21ff0b4d84-<run_bottleneck_on_image>-bug.py | 8ad0925041a855fb4f37bc20e4557601749d4a45 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py |
def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):
"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: Numpy array of image data.\n image_data_tensor: Input data layer in the graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n "
bottleneck_values = sess.run(bottleneck_tensor, {
image_data_tensor: image_data,
})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
49be6f19af78663962e559d4140c0414b52e4836 | b5644b71eade9abd62e3cd8747808b8edeea8ee1 | /movies/admin.py | 25365dac1ee29104521aa3d036714f35d6767529 | [] | no_license | HSx3/project_UBD | 68aa8dd1a3a2bf9c3523967a3c489a51c4bdac04 | fcc2b035dac07376ddb0e6c1eceb4544e3415455 | refs/heads/master | 2020-05-24T06:09:14.730903 | 2019-05-17T00:34:29 | 2019-05-17T00:34:29 | 187,133,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.contrib import admin
from .models import Movie, Genre, Actor, Director, Cast, Score
# Register your models here.
admin.site.register(Movie)
admin.site.register(Genre)
admin.site.register(Director)
admin.site.register(Actor)
admin.site.register(Cast)
admin.site.register(Score) | [
"hs.ssafy@gmail.com"
] | hs.ssafy@gmail.com |
17b2457239c0753f00d775c707faee923df59847 | 0e76cdc762ce938e1b8e458722f7efd6ecc3a11f | /modules/eb/MapEventModule.py | b8a5a482c6b60dc8f038ad5c7d99735bcb4dd814 | [] | no_license | NichtJens/CoilSnake | 878a2d975ca6b2e3e8616fbdec694d2f7bacf3cd | 843daf0245e9d68ed8aeca422564f377ef182568 | refs/heads/master | 2021-01-21T00:36:25.741872 | 2013-02-28T14:23:21 | 2013-02-28T14:23:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,702 | py | import EbModule
from EbTablesModule import EbTable
from EbDataBlocks import DataBlock
from modules.Progress import updateProgress
import yaml
from re import sub
class MapEventModule(EbModule.EbModule):
_name = "Map Events"
_PTR_LOC = 0x70d
_PTR_BANK_LOC = 0x704
def __init__(self):
self._ptrTbl = EbTable(0xD01598)
self._entries = [ ]
def freeRanges(self):
return [(0x101598, 0x10187f)]
def readFromRom(self, rom):
self._ptrTbl.readFromRom(rom,
EbModule.toRegAddr(rom.readMulti(self._PTR_LOC, 3)))
updateProgress(5)
bank = (rom[self._PTR_BANK_LOC] - 0xc0) << 16
pct = 45.0/20
for i in range(20):
addr = bank | self._ptrTbl[i,0].val()
tsetEntry = []
while (rom.readMulti(addr, 2) != 0):
flag = rom.readMulti(addr, 2)
num = rom.readMulti(addr+2, 2)
addr += 4
changes = []
for j in range(num):
changes.append((rom.readMulti(addr, 2),
rom.readMulti(addr+2, 2)))
addr += 4
tsetEntry.append((flag, changes))
self._entries.append(tsetEntry)
updateProgress(pct)
def writeToProject(self, resourceOpener):
out = dict()
i = 0
for entry in self._entries:
entryOut = []
for (flag, changes) in entry:
changeOut = { "Event Flag": flag }
changeOut["Changes"] = changes
entryOut.append(changeOut)
if entryOut == []:
out[i] = None
else:
out[i] = entryOut
i += 1
updateProgress(25)
with resourceOpener("map_changes", "yml") as f:
s = yaml.dump(out, Dumper=yaml.CSafeDumper)
s = sub("Event Flag: (\d+)",
lambda i: "Event Flag: " + hex(int(i.group(0)[12:])), s)
f.write(s)
updateProgress(25)
def readFromProject(self, resourceOpener):
with resourceOpener("map_changes", "yml") as f:
input = yaml.load(f, Loader=yaml.CSafeLoader)
for mtset in input:
entry = []
entryIn = input[mtset]
if (entryIn != None):
for csetIn in entryIn:
entry.append((csetIn["Event Flag"],
csetIn["Changes"]))
self._entries.append(entry)
updateProgress(50.0/20)
def writeToRom(self, rom):
self._ptrTbl.clear(20)
blockSize = 0
for entry in self._entries:
for (flag, set) in entry:
blockSize += 4 + 4*len(set)
blockSize += 2
if blockSize > 0xffff:
raise RuntimeError("Too many map changes")
loc = rom.getFreeLoc(blockSize)
rom[self._PTR_BANK_LOC] = (loc >> 16) + 0xc0
i = 0
for entry in self._entries:
self._ptrTbl[i,0].setVal(loc & 0xffff)
for (flag, set) in entry:
rom.writeMulti(loc, flag, 2)
rom.writeMulti(loc+2, len(set), 2)
loc += 4
for (before, after) in set:
rom.writeMulti(loc, before, 2)
rom.writeMulti(loc+2, after, 2)
loc += 4
rom[loc] = 0
rom[loc+1] = 0
loc += 2
i += 1
updateProgress(45.0/20)
ptrTblLoc = self._ptrTbl.writeToFree(rom)
rom.writeMulti(self._PTR_LOC, EbModule.toSnesAddr(ptrTblLoc), 3)
updateProgress(5)
| [
"arewereally@gmail.com"
] | arewereally@gmail.com |
94b1fde9f448d75e37bbbfe1ee2754a031bc206b | daebdc3a9c236c82f1be0ec480f19ebc29f4a23c | /pdfcrawler/spiders/llnl.py | 4131776eba62022a3e3a673288f52308ef04287a | [] | no_license | jwendyr/pdfcrawler | 8c1f3e51e139f6131eb41eb71fe609388dac0874 | 6725bef4cd7ee2cd83f9f7e63446a1c9294af1a2 | refs/heads/master | 2020-03-13T10:34:04.884461 | 2018-04-26T02:20:37 | 2018-04-26T02:20:37 | 131,085,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | # -*- coding: utf-8 -*-
import scrapy
import re
import sys
import json
import os
#import urlparse2
from scrapy.http import Request
from scrapy.conf import settings
class LlnlSpider(scrapy.Spider):
name = 'llnl'
#allowed_domains = ['github.com','arxiv.org','cv-foundation.org']
start_urls = ['https://www.researchgate.net/publication/247949042_Large-Scale_Synthesis_of_Uniform_Silver_Nanowires_Through_a_Soft_Self-Seeding_Polyol_Process',
'http://pubs.acs.org/doi/abs/10.1021/nl048912c',
'http://pubs.acs.org/doi/abs/10.1021/nn400414h',
'http://pubs.acs.org/doi/full/10.1021/acs.nanolett.5b02582',
'http://onlinelibrary.wiley.com/doi/10.1002/cjoc.201400518/abstract',
'http://pubs.acs.org/doi/abs/10.1021/cr100275d',
'http://onlinelibrary.wiley.com/doi/10.1002/anie.201100087/abstract',
'http://pubs.acs.org/doi/abs/10.1021/acs.jpclett.5b02123',
'https://www.researchgate.net/publication/230739689_Defining_Rules_for_the_Shape_Evolution_of_Gold_Nanoparticles',
'http://pubs.acs.org/doi/abs/10.1021/ac0702084',
'http://pubs.rsc.org/en/Content/ArticleLanding/2012/RA/c2ra21224b#!di-vAbstract',
'http://www.mdpi.com/1996-1944/3/9/4626',
'http://pubs.acs.org/doi/abs/10.1021/la050220w']
def __init__(self):
settings.overrides['DEPTH_LIMIT'] = 2
def parse(self, response):
# selector of pdf file.
for href in response.xpath('//a/@href').extract():
if href.endswith('.pdf'):
yield Request(
url=response.urljoin(href),
callback=self.save_pdf
)
else:
yield Request(
url=response.urljoin(href),
callback=self.parse
)
def save_pdf(self, response):
""" Save pdf files """
path = response.url.split('/')[-1]
self.log('.pdf file found')
self.logger.info('Saving PDF %s', path);
with open(path, 'wb') as file:
file.write(response.body);
| [
"jwendyr@yahoo.com"
] | jwendyr@yahoo.com |
68e40e88264e8b1809d47877f41a6d0cb8c43f90 | 4f837a0ebab53ef8a438268c38c7f9790f50c3b2 | /flask_app.py | 64a116124caf089fc3b534d66069a06dda19962a | [] | no_license | RomanRodionov/GeoFinder | a000e062678ce4b7d9f235d3e52c4f090dd710f0 | aeb86980e4a2cc8e631183e497c77f7d576372b0 | refs/heads/master | 2020-05-04T22:06:03.310759 | 2019-05-08T14:46:10 | 2019-05-08T14:46:10 | 179,499,509 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,497 | py | from flask import Flask, request
import requests
import logging
import json
from geo import is_address, find_coords, find_object, get_image_id, path
from random import choice
app = Flask(__name__)
logging.basicConfig(level=logging.INFO, filename='app.log', format='%(asctime)s %(levelname)s %(name)s %(message)s')
sessionStorage = {}
file = open(path('token.json'), 'r', encoding="utf-8")
tokens = json.loads(file.read())
file.close()
file = open(path('municipals.json'), 'r', encoding="utf-8-sig")
data = json.loads(file.read())
file.close()
municipals = []
for city in data:
municipals.append(city)
token = tokens["token"]
skill_id = tokens["skill_id"]
search_api_key = tokens["search_api_key"]
del tokens
def delete_image(id):
url = f'https://dialogs.yandex.net/api/v1/skills/{skill_id}/images/' + str(id)
requests.delete(url, headers={'Authorization': f'OAuth {token}'}).json()
@app.route('/post', methods=['POST'])
def main():
logging.info('Request: %r', request.json)
response = {
'session': request.json['session'],
'version': request.json['version'],
'response': {
'end_session': False
}
}
handle_dialog(response, request.json)
logging.info('Request: %r', response)
return json.dumps(response)
def handle_dialog(res, req):
user_id = req['session']['user_id']
if req['session']['new']:
res['response'][
'text'] = 'Привет! Я могу найти ближайшую интересующую тебя организацию, например,' \
' магазин, аптеку или кинотеатр, и показать её на карте! Мне нужно знать твой город.'
sessionStorage[user_id] = {
'city': None,
'coords': None,
'object_name': None,
'result': None,
'image_id': None,
'point': 0,
'ignore': 0,
'buttons': {},
'contact': None
}
return
if req['request']['original_utterance'].lower() in ['помощь', 'помоги', 'что ты умеешь', 'что ты умеешь?']:
file = open(path('dialogs.json'), 'r', encoding="utf-8")
text = json.loads(file.read())['help']
file.close()
res['response']['text'] = text
elif sessionStorage[user_id]['city'] is None:
city = get_city(req)
if not city:
res['response']['text'] = 'Я не расслышала город. Можешь повторить?'
else:
sessionStorage[user_id]['city'] = city
if not sessionStorage[user_id]['coords']:
res['response']['text'] = f'Теперь мне нужно знать твой адрес'
else:
res['response']['text'] = f'Что надо найти?'
sessionStorage[user_id]['buttons']['change_address'] = {
'title': 'Изменить адрес',
'hide': True
}
sessionStorage[user_id]['buttons']['change_city'] = {
'title': 'Изменить город',
'hide': True
}
elif sessionStorage[user_id]['coords'] is None:
address = get_address(sessionStorage[user_id]['city'], req)
if not address:
res['response']['text'] = 'Мне кажется, адрес какой-то неправильный. Можешь повторить?'
else:
sessionStorage[user_id]['coords'] = address
res['response']['text'] = f'Отлично, теперь я могу тебе помочь. Что надо найти поблизости?'
sessionStorage[user_id]['buttons']['change_address'] = {
'title': 'Изменить адрес',
'hide': True
}
sessionStorage[user_id]['buttons']['change_city'] = {
'title': 'Изменить город',
'hide': True
}
elif req['request']['original_utterance'].lower() == 'изменить город':
sessionStorage[user_id]['buttons'].pop('change_address', None)
sessionStorage[user_id]['buttons'].pop('show_map', None)
sessionStorage[user_id]['buttons'].pop('skip', None)
sessionStorage[user_id]['buttons'].pop('site', None)
sessionStorage[user_id]['buttons'].pop('contact', None)
if sessionStorage[user_id]['image_id']:
delete_image(sessionStorage[user_id]['image_id'])
sessionStorage[user_id]['buttons'].pop('change_city', None)
sessionStorage[user_id]['city'] = None
sessionStorage[user_id]['coords'] = None
res['response']['text'] = 'Хорошо, где же ты теперь?'
elif req['request']['original_utterance'].lower() == 'изменить адрес':
sessionStorage[user_id]['buttons'].pop('change_city', None)
sessionStorage[user_id]['buttons'].pop('show_map', None)
sessionStorage[user_id]['buttons'].pop('skip', None)
sessionStorage[user_id]['buttons'].pop('site', None)
sessionStorage[user_id]['buttons'].pop('contact', None)
if sessionStorage[user_id]['image_id']:
delete_image(sessionStorage[user_id]['image_id'])
sessionStorage[user_id]['buttons'].pop('change_address', None)
sessionStorage[user_id]['coords'] = None
res['response']['text'] = 'Хорошо, где же ты теперь?'
elif sessionStorage[user_id]['result'] and req['request']['original_utterance'].lower() == 'сайт организации':
sessionStorage[user_id]['buttons'].pop('site', None)
res['response']['text'] = choice(['Ок', 'Хорошо', 'Ладно', 'Окей'])
elif sessionStorage[user_id]['result'] and req['request']['original_utterance'].lower() == 'контактные данные':
sessionStorage[user_id]['buttons'].pop('contact', None)
res['response']['text'] = sessionStorage[user_id]['contact']
elif sessionStorage[user_id]['result'] and req['request']['original_utterance'].lower() == 'показать на карте':
sessionStorage[user_id]['buttons'].pop('show_map', None)
object_name = sessionStorage[user_id]['object_name']
coords = sessionStorage[user_id]['result']['coords']
coords_hrf = sessionStorage[user_id]['result']['coords_hrf']
if sessionStorage[user_id]['image_id'] is None:
sessionStorage[user_id]['image_id'] = get_image_id(sessionStorage[user_id]['result'],
sessionStorage[user_id]['coords'])
res['response']['text'] = f'Объект "{object_name}" на карте'
res['response']['card'] = {}
res['response']['card']['type'] = 'BigImage'
res['response']['card']['title'] = f'Результат по запросу "{object_name}"'
res['response']['card']['image_id'] = sessionStorage[user_id]['image_id']
res['response']['card']['button'] = {}
res['response']['card']['button']['text'] = 'Найти в Яндекс.Картах'
res['response']['card']['button'][
'url'] = f'https://yandex.ru/maps/?clid=9403&ll={str(coords[0])},' \
f'{str(coords[1])}&z=14,8&pt={str(coords_hrf)},pm2bm'
elif sessionStorage[user_id]['result'] and req['request'][
'original_utterance'].lower() == 'показать другой результат':
if sessionStorage[user_id]['image_id']:
delete_image(sessionStorage[user_id]['image_id'])
sessionStorage[user_id]['ignore'] += 1
object_name = sessionStorage[user_id]['object_name']
info = find_object(object_name, sessionStorage[user_id]['coords'], sessionStorage[user_id]['ignore'])
sessionStorage[user_id]['image_id'] = None
if not info:
res['response'][
'text'] = f'Больше объектов "{object_name}" не найдено. Попробуй изменить запрос или адрес.'
sessionStorage[user_id]['buttons'].pop('show_map', None)
sessionStorage[user_id]['buttons'].pop('skip', None)
sessionStorage[user_id]['buttons'].pop('contact', None)
sessionStorage[user_id]['buttons'].pop('url', None)
sessionStorage[user_id]['ignore'] = 0
else:
text = f'название: {info["name"]}; адрес: {info["address"]};' \
f' время работы: {info["hours"]}; расстояние до объекта: {info["distance"]}'
res['response']['text'] = f'Объект "{object_name}" найден: ' + text
sessionStorage[user_id]['buttons']['show_map'] = {
'title': 'Показать на карте',
'hide': True
}
if info['url']:
sessionStorage[user_id]['buttons']['site'] = {
'title': 'Сайт организации',
"url": info['url'],
'hide': True
}
if info['contact']:
sessionStorage[user_id]['contact'] = info['contact']
sessionStorage[user_id]['buttons']['contact'] = {
'title': 'Контактные данные',
'hide': True
}
sessionStorage[user_id]['buttons']['skip'] = {
'title': 'Показать другой результат',
'hide': True
}
sessionStorage[user_id]['result'] = info
else:
sessionStorage[user_id]['buttons'].pop('show_map', None)
sessionStorage[user_id]['buttons'].pop('skip', None)
sessionStorage[user_id]['buttons'].pop('site', None)
sessionStorage[user_id]['buttons'].pop('contact', None)
if sessionStorage[user_id]['image_id']:
delete_image(sessionStorage[user_id]['image_id'])
object_name = req['request']['original_utterance']
sessionStorage[user_id]['object_name'] = object_name
sessionStorage[user_id]['ignore'] = 0
info = find_object(object_name, sessionStorage[user_id]['coords'])
sessionStorage[user_id]['image_id'] = None
if not info:
res['response'][
'text'] = f'К сожалению, объект "{object_name}" не найден. Попробуй изменить запрос или адрес.'
sessionStorage[user_id]['buttons'].pop('show_map', None)
else:
text = f'название: {info["name"]}; адрес: {info["address"]}; время работы:' \
f' {info["hours"]}; расстояние до объекта: {info["distance"]}'
res['response']['text'] = f'Объект "{object_name}" найден: ' + text
sessionStorage[user_id]['buttons']['show_map'] = {
'title': 'Показать на карте',
'hide': True
}
if info['url']:
sessionStorage[user_id]['buttons']['site'] = {
'title': 'Сайт организации',
'url': info['url'],
'hide': True
}
if info['contact']:
sessionStorage[user_id]['contact'] = info['contact']
sessionStorage[user_id]['buttons']['contact'] = {
'title': 'Контактные данные',
'hide': True
}
sessionStorage[user_id]['buttons']['skip'] = {
'title': 'Показать другой результат',
'hide': True
}
sessionStorage[user_id]['result'] = info
res['response']['buttons'] = list(sessionStorage[user_id]['buttons'].values()) + [{
'title': 'Помощь',
'hide': True
}]
def get_first_name(req):
for entity in req['request']['nlu']['entities']:
if entity['type'] == 'YANDEX.FIO':
return entity['value'].get('first_name', None)
def get_city(req):
city = False
for entity in req['request']['nlu']['entities']:
if entity['type'] == 'YANDEX.GEO':
if 'city' in entity['value'].keys():
city = entity['value']['city']
if not city:
for municipal in municipals:
if req['request']['original_utterance'].lower() in municipal.lower():
city = req['request']['original_utterance']
break
return city
def get_address(city, req):
address = []
for entity in req['request']['nlu']['entities']:
if entity['type'] == 'YANDEX.GEO':
if 'street' in entity['value'].keys():
address.append(entity['value']['street'])
if 'house_number' in entity['value'].keys():
address.append(entity['value']['house_number'])
if 'airport' in entity['value'].keys():
address.append(entity['value']['airport'])
if len(address) == 0:
return False
address = city + ' '.join(address)
if is_address(address):
coords = find_coords(address)
return coords
return False
if __name__ == '__main__':
app.run()
| [
"rodionovv22@gmail.com"
] | rodionovv22@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.