index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
16,600 | 8968a15aaa1f8a31caf5108240f3740944def166 | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for code coverage tools."""
import os
import re
import shutil
import subprocess
import sys
import unittest
import coverage_utils
def _RecursiveDirectoryListing(dirpath):
"""Returns a list of relative paths to all files in a given directory."""
result = []
for root, _, files in os.walk(dirpath):
for f in files:
result.append(os.path.relpath(os.path.join(root, f), dirpath))
return result
def _ReadFile(filepath):
"""Returns contents of a given file."""
with open(filepath) as f:
return f.read()
class CoverageTest(unittest.TestCase):
def setUp(self):
self.maxDiff = 1000
self.COVERAGE_TOOLS_DIR = os.path.abspath(os.path.dirname(__file__))
self.COVERAGE_SCRIPT = os.path.join(self.COVERAGE_TOOLS_DIR, 'coverage.py')
self.COVERAGE_UTILS = os.path.join(self.COVERAGE_TOOLS_DIR,
'coverage_utils.py')
self.CHROMIUM_SRC_DIR = os.path.dirname(
os.path.dirname(self.COVERAGE_TOOLS_DIR))
self.BUILD_DIR = os.path.join(self.CHROMIUM_SRC_DIR, 'out',
'code_coverage_tools_test')
self.REPORT_DIR_1 = os.path.join(self.BUILD_DIR, 'report1')
self.REPORT_DIR_1_NO_COMPONENTS = os.path.join(self.BUILD_DIR,
'report1_no_components')
self.REPORT_DIR_2 = os.path.join(self.BUILD_DIR, 'report2')
self.REPORT_DIR_3 = os.path.join(self.BUILD_DIR, 'report3')
self.REPORT_DIR_4 = os.path.join(self.BUILD_DIR, 'report4')
self.LLVM_COV = os.path.join(self.CHROMIUM_SRC_DIR, 'third_party',
'llvm-build', 'Release+Asserts', 'bin',
'llvm-cov')
self.PYTHON = 'python3'
self.PLATFORM = coverage_utils.GetHostPlatform()
if self.PLATFORM == 'win32':
self.LLVM_COV += '.exe'
self.PYTHON += '.exe'
# Even though 'is_component_build=false' is recommended, we intentionally
# use 'is_component_build=true' to test handling of shared libraries.
self.GN_ARGS = ('use_clang_coverage=true '
'dcheck_always_on=true '
'ffmpeg_branding=\"ChromeOS\" '
'is_component_build=true '
'is_debug=false '
'proprietary_codecs=true '
'use_libfuzzer=true')
shutil.rmtree(self.BUILD_DIR, ignore_errors=True)
gn_gen_cmd = ['gn', 'gen', self.BUILD_DIR, '--args=%s' % self.GN_ARGS]
self.run_cmd(gn_gen_cmd)
build_cmd = [
'autoninja', '-C', self.BUILD_DIR, 'crypto_unittests',
'libpng_read_fuzzer'
]
self.run_cmd(build_cmd)
def tearDown(self):
shutil.rmtree(self.BUILD_DIR, ignore_errors=True)
def run_cmd(self, cmd):
return subprocess.check_output(cmd, cwd=self.CHROMIUM_SRC_DIR)
def verify_component_view(self, filepath):
"""Asserts that a given component view looks correct."""
# There must be several Blink and Internals components.
with open(filepath) as f:
data = f.read()
counts = data.count('Blink') + data.count('Internals')
self.assertGreater(counts, 5)
def verify_directory_view(self, filepath):
"""Asserts that a given directory view looks correct."""
# Directory view page does a redirect to another page, extract its URL.
with open(filepath) as f:
data = f.read()
url = re.search(r'.*refresh.*url=([a-zA-Z0-9_\-\/.]+).*', data).group(1)
directory_view_path = os.path.join(os.path.dirname(filepath), url)
# There must be at least 'crypto' and 'third_party' directories.
with open(directory_view_path) as f:
data = f.read()
self.assertTrue('crypto' in data and 'third_party' in data)
def verify_file_view(self, filepath):
"""Asserts that a given file view looks correct."""
# There must be hundreds of '.*crypto.*' files and 10+ of '.*libpng.*'.
with open(filepath) as f:
data = f.read()
self.assertGreater(data.count('crypto'), 100)
self.assertGreater(data.count('libpng'), 10)
def verify_lcov_file(self, filepath):
"""Asserts that a given lcov file looks correct."""
with open(filepath) as f:
data = f.read()
self.assertGreater(data.count('SF:'), 100)
self.assertGreater(data.count('crypto'), 100)
self.assertGreater(data.count('libpng'), 10)
def test_different_workflows_and_cross_check_the_results(self):
"""Test a few different workflows and assert that the results are the same
and look legit.
"""
# Testcase 1. End-to-end report generation using coverage.py script. This is
# the workflow of a regular user.
cmd = [
self.COVERAGE_SCRIPT,
'crypto_unittests',
'libpng_read_fuzzer',
'-v',
'-b',
self.BUILD_DIR,
'-o',
self.REPORT_DIR_1,
'-c'
'%s/crypto_unittests' % self.BUILD_DIR,
'-c',
'%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,
]
self.run_cmd(cmd)
output_dir = os.path.join(self.REPORT_DIR_1, self.PLATFORM)
self.verify_component_view(
os.path.join(output_dir, 'component_view_index.html'))
self.verify_directory_view(
os.path.join(output_dir, 'directory_view_index.html'))
self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))
# Also try generating a report without components view. Useful for cross
# checking with the report produced in the testcase #3.
cmd = [
self.COVERAGE_SCRIPT,
'crypto_unittests',
'libpng_read_fuzzer',
'-v',
'-b',
self.BUILD_DIR,
'-o',
self.REPORT_DIR_1_NO_COMPONENTS,
'-c'
'%s/crypto_unittests' % self.BUILD_DIR,
'-c',
'%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,
'--no-component-view',
]
self.run_cmd(cmd)
output_dir = os.path.join(self.REPORT_DIR_1_NO_COMPONENTS, self.PLATFORM)
self.verify_directory_view(
os.path.join(output_dir, 'directory_view_index.html'))
self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'component_view_index.html')))
# Testcase #2. Run the script for post processing in Chromium tree. This is
# the workflow of the code coverage bots.
instr_profile_path = os.path.join(self.REPORT_DIR_1, self.PLATFORM,
'coverage.profdata')
cmd = [
self.COVERAGE_SCRIPT,
'crypto_unittests',
'libpng_read_fuzzer',
'-v',
'-b',
self.BUILD_DIR,
'-p',
instr_profile_path,
'-o',
self.REPORT_DIR_2,
]
self.run_cmd(cmd)
# Verify that the output dirs are the same except of the expected diff.
report_1_listing = set(_RecursiveDirectoryListing(self.REPORT_DIR_1))
report_2_listing = set(_RecursiveDirectoryListing(self.REPORT_DIR_2))
logs_subdir = os.path.join(self.PLATFORM, 'logs')
self.assertEqual(
set([
os.path.join(self.PLATFORM, 'coverage.profdata'),
os.path.join(logs_subdir, 'crypto_unittests_output.log'),
os.path.join(logs_subdir, 'libpng_read_fuzzer_output.log'),
]), report_1_listing - report_2_listing)
output_dir = os.path.join(self.REPORT_DIR_2, self.PLATFORM)
self.verify_component_view(
os.path.join(output_dir, 'component_view_index.html'))
self.verify_directory_view(
os.path.join(output_dir, 'directory_view_index.html'))
self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))
# Verify that the file view pages are binary equal.
report_1_file_view_data = _ReadFile(
os.path.join(self.REPORT_DIR_1, self.PLATFORM, 'file_view_index.html'))
report_2_file_view_data = _ReadFile(
os.path.join(self.REPORT_DIR_2, self.PLATFORM, 'file_view_index.html'))
self.assertEqual(report_1_file_view_data, report_2_file_view_data)
# Testcase #3, run coverage_utils.py on manually produced report and summary
# file. This is the workflow of OSS-Fuzz code coverage job.
objects = [
'-object=%s' % os.path.join(self.BUILD_DIR, 'crypto_unittests'),
'-object=%s' % os.path.join(self.BUILD_DIR, 'libpng_read_fuzzer'),
]
cmd = [
self.PYTHON,
self.COVERAGE_UTILS,
'-v',
'shared_libs',
'-build-dir=%s' % self.BUILD_DIR,
] + objects
shared_libraries = self.run_cmd(cmd)
objects.extend(shared_libraries.split())
instr_profile_path = os.path.join(self.REPORT_DIR_1_NO_COMPONENTS,
self.PLATFORM, 'coverage.profdata')
cmd = [
self.LLVM_COV,
'show',
'-format=html',
'-output-dir=%s' % self.REPORT_DIR_3,
'-instr-profile=%s' % instr_profile_path,
] + objects
if self.PLATFORM in ['linux', 'mac']:
cmd.extend(['-Xdemangler', 'c++filt', '-Xdemangler', '-n'])
self.run_cmd(cmd)
cmd = [
self.LLVM_COV,
'export',
'-summary-only',
'-instr-profile=%s' % instr_profile_path,
] + objects
summary_output = self.run_cmd(cmd)
summary_path = os.path.join(self.REPORT_DIR_3, 'summary.json')
with open(summary_path, 'wb') as f:
f.write(summary_output)
cmd = [
self.PYTHON,
self.COVERAGE_UTILS,
'-v',
'post_process',
'-src-root-dir=%s' % self.CHROMIUM_SRC_DIR,
'-summary-file=%s' % summary_path,
'-output-dir=%s' % self.REPORT_DIR_3,
]
self.run_cmd(cmd)
output_dir = os.path.join(self.REPORT_DIR_3, self.PLATFORM)
self.verify_directory_view(
os.path.join(output_dir, 'directory_view_index.html'))
self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'component_view_index.html')))
# Verify that the file view pages are binary equal.
report_1_file_view_data_no_component = _ReadFile(
os.path.join(self.REPORT_DIR_1_NO_COMPONENTS, self.PLATFORM,
'file_view_index.html'))
report_3_file_view_data = _ReadFile(
os.path.join(self.REPORT_DIR_3, self.PLATFORM, 'file_view_index.html'))
self.assertEqual(report_1_file_view_data_no_component,
report_3_file_view_data)
# Testcase 4. Export coverage data in lcov format using coverage.py script.
cmd = [
self.COVERAGE_SCRIPT,
'crypto_unittests',
'libpng_read_fuzzer',
'--format',
'lcov',
'-v',
'-b',
self.BUILD_DIR,
'-o',
self.REPORT_DIR_4,
'-c'
'%s/crypto_unittests' % self.BUILD_DIR,
'-c',
'%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,
]
self.run_cmd(cmd)
output_dir = os.path.join(self.REPORT_DIR_4, self.PLATFORM)
self.verify_lcov_file(os.path.join(output_dir, 'coverage.lcov'))
if __name__ == '__main__':
unittest.main()
|
16,601 | d68f1dc1a7b8f16102579df40bc60c44fe2a0e7c | import pandas as pd
fandango = pd.read_csv('fandango_score_comparison.csv')
# Use the head method to return the first two rows in the dataframe, then display them with the print function.
print('\nPeak at 1st 2 rows using head\n')
print(fandango.head(2))
# Use the index attribute to return the index of the dataframe, and display it with the print function.
print('\nHere\'s fandano\'s Indexes\n')
print(fandango.index.values)
print('\nHere\'s fandango\'s Indexes as a list\n')
print(fandango.index.tolist()) |
16,602 | 579638fac54f2e891b8ea137f4f34f37b00407a5 | from django.contrib.sitemaps import Sitemap
from guardian.shortcuts import get_anonymous_user
from .models import Case
class CaseSitemap(Sitemap):
def items(self):
return Case.objects.all().for_user(get_anonymous_user())
|
16,603 | 8826b367a92d9e3eef24a9e8a6bea8738747eb4a | # -*- coding: utf-8 -*-
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
def redirect(request, mode=1):
if 'redirect_inscription' in request.session:
try:
redirect_inscription = request.session['redirect_inscription']
del request.session['redirect_inscription']
return HttpResponseRedirect( redirect_inscription )
except:
return HttpResponseRedirect( reverse('c2v:myc2v') )
else:
if mode==1:
return HttpResponseRedirect( reverse('c2v:myc2v') )
if mode==2:
return HttpResponseRedirect( reverse('members:list-job') )
|
16,604 | 70a0c111bc56ff0955c7b489b7c300d043b19a1a | import sys
pp = "C:\\Python27\\lib\\site-packages\\nose-1.3.7-py2.7.egg"
sys.path.append(pp)
import maya.standalone
maya.standalone.initialize(name='python')
import nose
nose.run()
|
16,605 | 9193ad1f278effbc6c75a8265552820096fe1b01 | # -*- coding: utf-8 -*-
import os
# flask
SECRET_KEY = os.environ.get("CG_SECRET_KEY") or "thisIsNotASafeKey"
TEMPLATES_AUTO_RELOAD = True
# sqlalchemy
SQLALCHEMY_DATABASE_URI = os.environ["CG_SQL_DATABASE_URI"]
SQLALCHEMY_POOL_RECYCLE = 7200
SQLALCHEMY_TRACK_MODIFICATIONS = "FLASK_DEBUG" in os.environ
# server
CG_ENABLE_ADMIN = ("FLASK_DEBUG" in os.environ) or (os.environ.get("CG_ENABLE_ADMIN") == "1")
# lims
LIMS_HOST = os.environ["LIMS_HOST"]
LIMS_USERNAME = os.environ["LIMS_USERNAME"]
LIMS_PASSWORD = os.environ["LIMS_PASSWORD"]
OSTICKET_API_KEY = os.environ.get("OSTICKET_API_KEY")
OSTICKET_DOMAIN = os.environ.get("OSTICKET_DOMAIN")
SUPPORT_SYSTEM_EMAIL = os.environ.get("SUPPORT_SYSTEM_EMAIL")
EMAIL_URI = os.environ.get("EMAIL_URI")
# oauth
GOOGLE_OAUTH_CLIENT_ID = os.environ["GOOGLE_OAUTH_CLIENT_ID"]
GOOGLE_OAUTH_CLIENT_SECRET = os.environ["GOOGLE_OAUTH_CLIENT_SECRET"]
# invoice
TOTAL_PRICE_THRESHOLD = 750000
|
16,606 | 39cc3b60e5b5aef5d5a91f59d5cc347e367a290d | import time
t_0 = time.time()
print('a')
def rgb_tiles_ii(x, len_colour):
f_in = [0, 1, 2, 4, 8]
for n in range(5, x + 1):
#print('n', n)
f_n = f_in[n - 1] + f_in[n - 2] + f_in[n - 3] + f_in[n - 4]
#print('n, f_in:', n, f_in)
print(n, f_n)
f_in.append(f_n)
print('f_in', f_in)
return f_in[-1::]
def rgb_tiles(x, i, len_colour):
sub_total = 0
if i > x:
return 0
if i == x:
return 1
sub_total += rgb_tiles(x, i + len_colour, len_colour)
sub_total += rgb_tiles(x, i + 1, len_colour)
return sub_total
test_n = 50
sum_ii = 0
sum_i = 0
print(rgb_tiles_ii(test_n, 2))
print(rgb_tiles_ii(test_n, 3))
print(rgb_tiles_ii(test_n, 4))
"""print(rgb_tiles(test_n, 0, 2) - 1)
print(rgb_tiles(test_n, 0, 3) - 1)
print(rgb_tiles(test_n, 0, 4) - 1)"""
print(time.time() - t_0)
# a generator that yields items instead of returning a list
"""def firstn(n):
num = 0
while num < n:
yield num
num += 1
sum_of_first_n = sum(firstn(1000000))""" |
16,607 | cff81b7b2ec761871ae8f6d4ff7ad5ad900a6fc0 | import requests
import json
import time
import random
url = "http://119.3.209.144:6200"
for i in range(0,10):
names=['Kelly','Addison','Alex','Tommy','Joyce','Andrew','Alonso','Karen','Denny',
'Kenney','Colin','Warren','Ben','Carl','Charles','Easter','Bill','Glen','Alva',
'Roger','Solomon','Paul','Randy','Tina','Wesley','Fred','Leon','James','Bruce',
'Benson','Barry','Amy','Nico','Ekko','Zed','Xin','Yi','Master','Aattrox','Blues','Gloria','Emma',
'Lucy','Jenney','May','Kate','Sophia','Leon']
postdata = {'name':str(random.choice(names)+random.choice(names)),'age':random.randint(6, 70),'parentName':str(random.choice(names)+random.choice(names)),
'address':str(random.choice(names)+" St."+ str(random.randint(1,10)))+"Apt.",'parentPhone':str(random.randint(1000000000,9999999999)),'gender':str(random.choice(['male','female'])),
'read':str(random.randint(0,100)),'sport':str(random.randint(0,100)),'math':str(random.randint(0,100)),'algorithm':'0'}
print(postdata)
headers = {
'Accept': '*/*',
'Accept-Language': 'zh-CN',
'Content-Type': 'application/json',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)',
'Pragma': 'no-cache',
'Content-Length': str(len(postdata))
}
time1 = time.time()
req = requests.post(url, data=json.dumps(postdata), headers=headers)
print(req.status_code)
print(req.text)
time2=time.time()
print(time2-time1) |
16,608 | 25c90b3be2cad27bc9b6e3b234a6019faf779d46 | """
Author : A. Emerick
Assoc : Columbia University - American Museum of Natural History
Date : April 2016
Code to extract (most) of the SED from binned OSTAR2002 data
and repack with first column as the frequency, and each subsequent
column a separate OSTAR2002 model. Due to the silliness of the
SED files, this cuts out the last three frequencey bins in every
case, corresponding to very low energy photons (well outside the UV
region we care about)
This should be run on the OSTAR2002 Cloudy-binned models
"""
import numpy as np
import sys # Z/Z_sun
def extract_sed(filepath = './ostar2002_sed'):
"""
Extract the OSTAR2002 SED's for the listed identifier
names in a format thats a bit easier to handle
"""
identifier_names = ['p03', # 2
'p00', # 1
'm03', # 1/2
'm07', # 1/5
'm10', # 1/10
'm15', # 1/30
'm17', # 1/50
'm20', # 1/100
'm30', # 1/1000
'm99'] # -- don't use z = 0
header_count = 29
full_rows = 4000 # (4000 for all, but 1000 for Z=0) each model takes up this many rows in the file
rows_per_model = full_rows - 1 # cut out last row since it only has 3 col
nbins = rows_per_model * 5 # number of bins
nmodel = 69 # number of OSTAR2002 models
for identifier in identifier_names:
if identifier == 'm99':
full_rows = 1000
rows_per_model = full_rows - 1
nbins = rows_per_model * 5
nmodel = 69
else:
full_rows = 4000 # (4000 for all, but 1000 for Z=0) each model takes up this many rows in the file
rows_per_model = full_rows - 1 # cut out last row since it only has 3 col
nbins = rows_per_model * 5 # number of bins
nmodel = 69 # number of OSTAR2002 models
OSTAR_file = filepath + 'ostar2002_' + identifier + '.ascii'
outname = filepath + 'ostar2002_' + identifier + '_extracted.dat'
SED_data = np.zeros( (nmodel + 1, nbins) ) # SED for each star, first bin is freq
# load frequency
data = np.genfromtxt( OSTAR_file, max_rows = rows_per_model, skip_header = header_count)
SED_data[0] = data.flatten()
print("extracting data for ostar file", OSTAR_file)
# loop over everything else:
for i in np.arange(nmodel):
data = np.genfromtxt(OSTAR_file, skip_header = (full_rows)*(i+1) + header_count,
max_rows = (rows_per_model) )
SED_data[i + 1] = data.flatten()
# now write out to file
np.savetxt(outname, np.transpose(SED_data), fmt = "%.5E")
return
if __name__ == "__main__":
if len(sys.argv) > 1:
extract_sed(filepath=sys.argv[1])
else:
extract_sed()
|
16,609 | f91a665c70848771ddae2030404b38b26f851ec8 | # Highest repeated word (histogram program)
name = input("Enter file: ")
p = open(name, "r")
# Counts word frequency
counts = dict()
for line in p:
words = line.split()
for word in words:
word = word.lower() # Makes all words in lowercase to count correctly
counts[word] = counts.get(word, 0) + 1 # When we have same key then the number increments
# Checks the most common word and stores it
big_count = None
big_word = None
print("\nHistogram:\n", counts)
for word, num in counts.items():
if big_count is None or num > big_count: # when big_count is None (initial) and greater one num>big_count
big_word = word
big_count = num
# Prints
print("\nData ===>", '"' + big_word + '"', 'repeated', '"' + str(big_count) + '"', 'time(s)')
|
16,610 | bce4fd832ec222f75f62a770c21b968823dcf6be | from math import *
n = float(input())
a = n - int(n)
print(a)
a = a * 100
print(floor(n), round(a), end=' ')
|
16,611 | c5a9123cf9920ec6c97752f417daf88ae8adb474 | import os
from cm.util import paths
from cm.util import misc
from cm.services import service_states
from cm.services import ServiceRole
from cm.services import ServiceDependency
from cm.services.apps import ApplicationService
import logging
log = logging.getLogger('cloudman')
INVOKE_SUCCESS = "Successfully invoked LWR."
INVOKE_FAILURE = "Error invoking LWR."
DEFAULT_LWR_PORT = 8913
class LwrService(ApplicationService):
def __init__(self, app):
super(LwrService, self).__init__(app)
self.lwr_home = self.app.path_resolver.lwr_home
self.lwr_port = DEFAULT_LWR_PORT
self.name = ServiceRole.to_string(ServiceRole.LWR)
self.svc_roles = [ServiceRole.LWR]
self.dependencies = [
ServiceDependency(self, ServiceRole.SGE), # Well someday anyway :)
ServiceDependency(self, ServiceRole.GALAXY_TOOLS) # Anyway to make this depend on where LWR installed?
]
def __rel_path(self, *args):
return os.path.join(self.lwr_home, *args)
def __ini_path(self):
return self.__rel_path("server.ini")
def _check_lwr_running(self):
return self._port_bound(self.lwr_port)
def start(self):
self.state = service_states.STARTING
self.status()
if not self.state == service_states.RUNNING:
self._setup()
started = self._run("--daemon")
if not started:
log.warn("Failed to setup or run LWR server.")
self.start = service_states.ERROR
def _setup(self):
ini_path = self.__ini_path()
if not os.path.exists(ini_path):
misc.run("cp '%s.sample' '%s'" % (ini_path, ini_path))
# TODO: Configure LWR.
def remove(self, synchronous=False):
log.info("Removing '%s' service" % self.name)
super(LwrService, self).remove(synchronous)
self.state = service_states.SHUTTING_DOWN
log.info("Shutting down LWR service...")
if self._run("--stop-daemon"):
self.state = service_states.SHUT_DOWN
# TODO: Handle log files.
else:
log.info("Failed to shutdown down LWR service...")
self.state = service_states.ERROR
def _run(self, args):
command = '%s - galaxy -c "bash %s/run.sh %s"' % (
paths.P_SU, self.lwr_home, args)
return misc.run(command, INVOKE_FAILURE, INVOKE_SUCCESS)
def status(self):
if self.state == service_states.SHUTTING_DOWN or \
self.state == service_states.SHUT_DOWN or \
self.state == service_states.UNSTARTED or \
self.state == service_states.WAITING_FOR_USER_ACTION:
pass
elif self._check_daemon('lwr'):
if self._check_lwr_running():
self.state = service_states.RUNNING
elif self.state != service_states.STARTING:
log.error("LWR error; LWR not runnnig")
self.state = service_states.ERROR
|
16,612 | d09052c22b7598db0ebaf1db0716e626a6ec5a91 | def func(param, **kwargs):
pass
func(param=1, param2=1) |
16,613 | 95ab2802615bc47447294d52d4333bcec170872b | '''
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess)
global graph
graph = tf.get_default_graph()
'''
from keras import backend as K
from flask_cors import CORS
from flask import Flask, render_template , request , jsonify, Response
from PIL import Image
import scipy
from glob import glob
import numpy as np
import cv2
import base64
from discogan_final import Discogan
from data_loader import DataLoader
from keras.layers import Dense, Activation
from keras.models import Sequential, Model,load_model
from keras.optimizers import Adam
import datetime
import matplotlib.pyplot as plt
import sys
import io
import os
import random
app=Flask(__name__)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
@app.route('/test',methods=['GET','POST'])
def test():
print("log:secces")
return jsonify({'status': 'success'})
@app.route('/genColor')
def home():
return render_template('index.jinja2')
@app.route('/maskImage' , methods=['POST'])
def discogan_mask_generate_image():
file = request.files['image'].read()
npimg = np.fromstring(file, np.uint8)
img1 = cv2.imdecode(npimg,cv2.IMREAD_COLOR)
lab= cv2.cvtColor(img1, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
im_gray = cl
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = 127
im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]
img=np.stack((im_bw,)*3,-1)
img = Image.fromarray(img.astype("uint8"))
rawBytes = io.BytesIO()
img.save(rawBytes, "JPEG")
rawBytes.seek(0)
img_base64 = base64.b64encode(rawBytes.read())
return jsonify({'status':str(img_base64)})
@app.route('/colorImage' , methods=['POST'])
def discogan_color_generate_image():
K.clear_session()
file = request.files['image'].read()
npimg = np.fromstring(file, np.uint8)
img1 = cv2.imdecode(npimg,cv2.IMREAD_COLOR)
lab= cv2.cvtColor(img1, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
r, c = 1,1
img_rows = 256
img_cols = 256
channels = 3
#img_shape = (img_rows, img_cols, channels)
im_gray = cl
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = 127
im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]
img=np.stack((im_bw,)*3,-1)
#edge_bw = cv2.resize(img , (256 , 256))
cv2.imwrite("img.jpg",img)
data_loader = DataLoader(dataset_name="img.jpg",
img_res=(256, 256))
imgs_A = data_loader.load_data(batch_size=1, is_testing=True)
model = load_model('saved_model/actual_model16.h5')
optimizer = Adam(0.0002, 0.5)
model.compile(loss='mse',optimizer=optimizer)
# Translate images to the other domain
print(imgs_A.shape)
img = model.predict(imgs_A)
#cv2.imwrite("img2.jpg",img[0])
gen_imgs = np.concatenate([img])
fig, axs = plt.subplots(r, c)
axs.imshow(gen_imgs[0])
axs.axis('off')
fig.savefig("img.jpg")
plt.close()
with open("img.jpg", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return jsonify({'status':str(encoded_string)})
if __name__ == '__main__':
app.run(host='0.0.0.0',port=9000,debug=True) |
16,614 | 98fa948765ed6a71b82548753105f7086fb43550 | import math
class Problem:
def __init__(self) :
self.data = {"X": 0, "R": 0, "C": 0}
self.X = 0
self.X = 0
def readProblem(input):
problem = Problem()
line = input.readline().split()
problem.data["X"] = int(line[0]);
problem.data["R"] = int(line[1]);
problem.data["C"] = int(line[2]);
return problem
def solveProblem(problem):
answer = "There is no solution."
if ((problem.data["R"] * problem.data["C"]) % problem.data["X"] != 0) :
answer = "RICHARD"
elif (int(math.ceil(problem.data["X"]/2.0)) > min(problem.data['R'], problem.data['C'])) :
answer = "RICHARD"
elif ((problem.data['X'] in {4,6}) and \
(min(problem.data['R'], problem.data['C']) <= problem.data["X"]/2)) :
# if the minimum dimension isn't wider than half the length
# you can force the user to create a region that isn't even in number
# and this can never be filled
answer = "RICHARD"
elif ((problem.data['X'] == 5) and \
(min(problem.data['R'], problem.data['C']) == 2) and \
(problem.data["R"] * problem.data["C"] < 3 * problem.data["X"])) :
# as long as there is enough space above and below you can make this work
answer = "RICHARD"
elif ((problem.data['X'] == 5) and \
(min(problem.data['R'], problem.data['C']) == 3) and \
(min(problem.data['R'], problem.data['C']) <= int(math.ceil(problem.data["X"]/2.0)))) :
# if the minimum dimension isn't wider than the ceiling of half the length
# you can force the user to create a region that isn't odd in number
# and this can never be filled
answer = "RICHARD"
elif (problem.data['X'] > 6) :
# X-ominoes of greater than 6 cells all have one shape that has a hole in it
# if this piece is picked then the other player can never win
answer = "RICHARD"
else :
answer = "GABRIEL"
return answer #+ " (" + str(problem.data["X"]) + ", " + str(problem.data["R"]) + ", " + str(problem.data["C"]) + ")"
input = open('input.in')
output = open('output.out', 'w')
cases = int(input.readline())
for i in range(cases):
problem = readProblem(input)
answer = solveProblem(problem)
output.write("Case #" + str(i+1) + ": " + str(answer) + "\n")
|
16,615 | 152a21ad011bfe4912fdba4fda48acc98b939521 | """Support for Netatmo Smart thermostats."""
from datetime import timedelta
import logging
from typing import List, Optional
import pyatmo
import requests
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
DEFAULT_MIN_TEMP,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_TEMPERATURE,
PRECISION_HALVES,
STATE_OFF,
TEMP_CELSIUS,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.util import Throttle
from .const import (
ATTR_HOME_NAME,
ATTR_SCHEDULE_NAME,
AUTH,
DOMAIN,
MANUFACTURER,
MODELS,
SERVICE_SETSCHEDULE,
)
_LOGGER = logging.getLogger(__name__)
PRESET_FROST_GUARD = "Frost Guard"
PRESET_SCHEDULE = "Schedule"
PRESET_MANUAL = "Manual"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_PRESET = [PRESET_AWAY, PRESET_BOOST, PRESET_FROST_GUARD, PRESET_SCHEDULE]
STATE_NETATMO_SCHEDULE = "schedule"
STATE_NETATMO_HG = "hg"
STATE_NETATMO_MAX = "max"
STATE_NETATMO_AWAY = PRESET_AWAY
STATE_NETATMO_OFF = STATE_OFF
STATE_NETATMO_MANUAL = "manual"
STATE_NETATMO_HOME = "home"
PRESET_MAP_NETATMO = {
PRESET_FROST_GUARD: STATE_NETATMO_HG,
PRESET_BOOST: STATE_NETATMO_MAX,
PRESET_SCHEDULE: STATE_NETATMO_SCHEDULE,
PRESET_AWAY: STATE_NETATMO_AWAY,
STATE_NETATMO_OFF: STATE_NETATMO_OFF,
}
NETATMO_MAP_PRESET = {
STATE_NETATMO_HG: PRESET_FROST_GUARD,
STATE_NETATMO_MAX: PRESET_BOOST,
STATE_NETATMO_SCHEDULE: PRESET_SCHEDULE,
STATE_NETATMO_AWAY: PRESET_AWAY,
STATE_NETATMO_OFF: STATE_NETATMO_OFF,
STATE_NETATMO_MANUAL: STATE_NETATMO_MANUAL,
}
HVAC_MAP_NETATMO = {
PRESET_SCHEDULE: HVAC_MODE_AUTO,
STATE_NETATMO_HG: HVAC_MODE_AUTO,
PRESET_FROST_GUARD: HVAC_MODE_AUTO,
PRESET_BOOST: HVAC_MODE_HEAT,
STATE_NETATMO_OFF: HVAC_MODE_OFF,
STATE_NETATMO_MANUAL: HVAC_MODE_AUTO,
PRESET_MANUAL: HVAC_MODE_AUTO,
STATE_NETATMO_AWAY: HVAC_MODE_AUTO,
}
CURRENT_HVAC_MAP_NETATMO = {True: CURRENT_HVAC_HEAT, False: CURRENT_HVAC_IDLE}
CONF_HOMES = "homes"
CONF_ROOMS = "rooms"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
DEFAULT_MAX_TEMP = 30
NA_THERM = "NATherm1"
NA_VALVE = "NRV"
SCHEMA_SERVICE_SETSCHEDULE = vol.Schema(
{
vol.Required(ATTR_SCHEDULE_NAME): cv.string,
vol.Required(ATTR_HOME_NAME): cv.string,
}
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Netatmo energy platform."""
auth = hass.data[DOMAIN][entry.entry_id][AUTH]
home_data = HomeData(auth)
def get_entities():
"""Retrieve Netatmo entities."""
entities = []
try:
home_data.setup()
except pyatmo.NoDevice:
return
home_ids = home_data.get_all_home_ids()
for home_id in home_ids:
_LOGGER.debug("Setting up home %s ...", home_id)
try:
room_data = ThermostatData(auth, home_id)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home_id][room_id]["name"]
_LOGGER.debug("Setting up room %s (%s) ...", room_name, room_id)
entities.append(NetatmoThermostat(room_data, room_id))
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
def _service_setschedule(service):
"""Service to change current home schedule."""
home_name = service.data.get(ATTR_HOME_NAME)
schedule_name = service.data.get(ATTR_SCHEDULE_NAME)
home_data.homedata.switchHomeSchedule(schedule=schedule_name, home=home_name)
_LOGGER.info("Set home (%s) schedule to %s", home_name, schedule_name)
if home_data.homedata is not None:
hass.services.async_register(
DOMAIN,
SERVICE_SETSCHEDULE,
_service_setschedule,
schema=SCHEMA_SERVICE_SETSCHEDULE,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Netatmo energy sensors."""
return
class NetatmoThermostat(ClimateEntity):
"""Representation a Netatmo thermostat."""
def __init__(self, data, room_id):
"""Initialize the sensor."""
self._data = data
self._state = None
self._room_id = room_id
self._room_name = self._data.homedata.rooms[self._data.home_id][room_id]["name"]
self._name = f"{MANUFACTURER} {self._room_name}"
self._current_temperature = None
self._target_temperature = None
self._preset = None
self._away = None
self._operation_list = [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
self._support_flags = SUPPORT_FLAGS
self._hvac_mode = None
self._battery_level = None
self._connected = None
self.update_without_throttle = False
self._module_type = self._data.room_status.get(room_id, {}).get(
"module_type", NA_VALVE
)
if self._module_type == NA_THERM:
self._operation_list.append(HVAC_MODE_OFF)
self._unique_id = f"{self._room_id}-{self._module_type}"
@property
def device_info(self):
"""Return the device info for the thermostat/valve."""
return {
"identifiers": {(DOMAIN, self._room_id)},
"name": self._room_name,
"manufacturer": MANUFACTURER,
"model": MODELS[self._module_type],
}
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return PRECISION_HALVES
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode."""
return self._hvac_mode
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes."""
return self._operation_list
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
if self._module_type == NA_THERM:
return CURRENT_HVAC_MAP_NETATMO[self._data.boilerstatus]
# Maybe it is a valve
if self._room_id in self._data.room_status:
if (
self._data.room_status[self._room_id].get("heating_power_request", 0)
> 0
):
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self.turn_off()
elif hvac_mode == HVAC_MODE_AUTO:
if self.hvac_mode == HVAC_MODE_OFF:
self.turn_on()
self.set_preset_mode(PRESET_SCHEDULE)
elif hvac_mode == HVAC_MODE_HEAT:
self.set_preset_mode(PRESET_BOOST)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.target_temperature == 0:
self._data.homestatus.setroomThermpoint(
self._data.home_id, self._room_id, STATE_NETATMO_HOME,
)
if (
preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]
and self._module_type == NA_VALVE
):
self._data.homestatus.setroomThermpoint(
self._data.home_id,
self._room_id,
STATE_NETATMO_MANUAL,
DEFAULT_MAX_TEMP,
)
elif preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]:
self._data.homestatus.setroomThermpoint(
self._data.home_id, self._room_id, PRESET_MAP_NETATMO[preset_mode]
)
elif preset_mode in [PRESET_SCHEDULE, PRESET_FROST_GUARD, PRESET_AWAY]:
self._data.homestatus.setThermmode(
self._data.home_id, PRESET_MAP_NETATMO[preset_mode]
)
else:
_LOGGER.error("Preset mode '%s' not available", preset_mode)
self.update_without_throttle = True
self.schedule_update_ha_state()
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
return self._preset
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes."""
return SUPPORT_PRESET
def set_temperature(self, **kwargs):
"""Set new target temperature for 2 hours."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is None:
return
self._data.homestatus.setroomThermpoint(
self._data.home_id, self._room_id, STATE_NETATMO_MANUAL, temp
)
self.update_without_throttle = True
self.schedule_update_ha_state()
@property
def device_state_attributes(self):
"""Return the state attributes of the thermostat."""
attr = {}
if self._battery_level is not None:
attr[ATTR_BATTERY_LEVEL] = self._battery_level
return attr
def turn_off(self):
"""Turn the entity off."""
if self._module_type == NA_VALVE:
self._data.homestatus.setroomThermpoint(
self._data.home_id,
self._room_id,
STATE_NETATMO_MANUAL,
DEFAULT_MIN_TEMP,
)
elif self.hvac_mode != HVAC_MODE_OFF:
self._data.homestatus.setroomThermpoint(
self._data.home_id, self._room_id, STATE_NETATMO_OFF
)
self.update_without_throttle = True
self.schedule_update_ha_state()
def turn_on(self):
"""Turn the entity on."""
self._data.homestatus.setroomThermpoint(
self._data.home_id, self._room_id, STATE_NETATMO_HOME
)
self.update_without_throttle = True
self.schedule_update_ha_state()
@property
def available(self) -> bool:
"""If the device hasn't been able to connect, mark as unavailable."""
return bool(self._connected)
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
try:
if self.update_without_throttle:
self._data.update(no_throttle=True)
self.update_without_throttle = False
else:
self._data.update()
except AttributeError:
_LOGGER.error("NetatmoThermostat::update() got exception")
return
try:
if self._module_type is None:
self._module_type = self._data.room_status[self._room_id]["module_type"]
self._current_temperature = self._data.room_status[self._room_id][
"current_temperature"
]
self._target_temperature = self._data.room_status[self._room_id][
"target_temperature"
]
self._preset = NETATMO_MAP_PRESET[
self._data.room_status[self._room_id]["setpoint_mode"]
]
self._hvac_mode = HVAC_MAP_NETATMO[self._preset]
self._battery_level = self._data.room_status[self._room_id].get(
"battery_level"
)
self._connected = True
except KeyError as err:
if self._connected is not False:
_LOGGER.debug(
"The thermostat in room %s seems to be out of reach. (%s)",
self._room_name,
err,
)
self._connected = False
self._away = self._hvac_mode == HVAC_MAP_NETATMO[STATE_NETATMO_AWAY]
class HomeData:
"""Representation Netatmo homes."""
def __init__(self, auth, home=None):
"""Initialize the HomeData object."""
self.auth = auth
self.homedata = None
self.home_ids = []
self.home_names = []
self.room_names = []
self.schedules = []
self.home = home
self.home_id = None
def get_all_home_ids(self):
"""Get all the home ids returned by NetAtmo API."""
if self.homedata is None:
return []
for home_id in self.homedata.homes:
if (
"therm_schedules" in self.homedata.homes[home_id]
and "modules" in self.homedata.homes[home_id]
):
self.home_ids.append(self.homedata.homes[home_id]["id"])
return self.home_ids
def setup(self):
"""Retrieve HomeData by NetAtmo API."""
try:
self.homedata = pyatmo.HomeData(self.auth)
self.home_id = self.homedata.gethomeId(self.home)
except TypeError:
_LOGGER.error("Error when getting home data")
except AttributeError:
_LOGGER.error("No default_home in HomeData")
except pyatmo.NoDevice:
_LOGGER.debug("No thermostat devices available")
except pyatmo.InvalidHome:
_LOGGER.debug("Invalid home %s", self.home)
class ThermostatData:
"""Get the latest data from Netatmo."""
def __init__(self, auth, home_id=None):
"""Initialize the data object."""
self.auth = auth
self.homedata = None
self.homestatus = None
self.room_ids = []
self.room_status = {}
self.schedules = []
self.home_id = home_id
self.home_name = None
self.away_temperature = None
self.hg_temperature = None
self.boilerstatus = None
self.setpoint_duration = None
def get_room_ids(self):
"""Return all module available on the API as a list."""
if not self.setup():
return []
for room in self.homestatus.rooms:
self.room_ids.append(room)
return self.room_ids
def setup(self):
"""Retrieve HomeData and HomeStatus by NetAtmo API."""
try:
self.homedata = pyatmo.HomeData(self.auth)
self.homestatus = pyatmo.HomeStatus(self.auth, home_id=self.home_id)
self.home_name = self.homedata.getHomeName(self.home_id)
self.update()
except TypeError:
_LOGGER.error("ThermostatData::setup() got error")
return False
except pyatmo.exceptions.NoDevice:
_LOGGER.debug(
"No climate devices for %s (%s)", self.home_name, self.home_id
)
return False
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the NetAtmo API to update the data."""
try:
self.homestatus = pyatmo.HomeStatus(self.auth, home_id=self.home_id)
except pyatmo.exceptions.NoDevice:
_LOGGER.error("No device found")
return
except TypeError:
_LOGGER.error("Error when getting homestatus")
return
except requests.exceptions.Timeout:
_LOGGER.warning("Timed out when connecting to Netatmo server")
return
for room in self.homestatus.rooms:
try:
roomstatus = {}
homestatus_room = self.homestatus.rooms[room]
homedata_room = self.homedata.rooms[self.home_id][room]
roomstatus["roomID"] = homestatus_room["id"]
if homestatus_room["reachable"]:
roomstatus["roomname"] = homedata_room["name"]
roomstatus["target_temperature"] = homestatus_room[
"therm_setpoint_temperature"
]
roomstatus["setpoint_mode"] = homestatus_room["therm_setpoint_mode"]
roomstatus["current_temperature"] = homestatus_room[
"therm_measured_temperature"
]
roomstatus["module_type"] = self.homestatus.thermostatType(
home_id=self.home_id, rid=room, home=self.home_name
)
roomstatus["module_id"] = None
roomstatus["heating_status"] = None
roomstatus["heating_power_request"] = None
batterylevel = None
for module_id in homedata_room["module_ids"]:
if (
self.homedata.modules[self.home_id][module_id]["type"]
== NA_THERM
or roomstatus["module_id"] is None
):
roomstatus["module_id"] = module_id
if roomstatus["module_type"] == NA_THERM:
self.boilerstatus = self.homestatus.boilerStatus(
rid=roomstatus["module_id"]
)
roomstatus["heating_status"] = self.boilerstatus
batterylevel = self.homestatus.thermostats[
roomstatus["module_id"]
].get("battery_level")
elif roomstatus["module_type"] == NA_VALVE:
roomstatus["heating_power_request"] = homestatus_room[
"heating_power_request"
]
roomstatus["heating_status"] = (
roomstatus["heating_power_request"] > 0
)
if self.boilerstatus is not None:
roomstatus["heating_status"] = (
self.boilerstatus and roomstatus["heating_status"]
)
batterylevel = self.homestatus.valves[
roomstatus["module_id"]
].get("battery_level")
if batterylevel:
batterypct = interpolate(
batterylevel, roomstatus["module_type"]
)
if roomstatus.get("battery_level") is None:
roomstatus["battery_level"] = batterypct
elif batterypct < roomstatus["battery_level"]:
roomstatus["battery_level"] = batterypct
self.room_status[room] = roomstatus
except KeyError as err:
_LOGGER.error("Update of room %s failed. Error: %s", room, err)
self.away_temperature = self.homestatus.getAwaytemp(home_id=self.home_id)
self.hg_temperature = self.homestatus.getHgtemp(home_id=self.home_id)
self.setpoint_duration = self.homedata.setpoint_duration[self.home_id]
def interpolate(batterylevel, module_type):
"""Interpolate battery level depending on device type."""
na_battery_levels = {
NA_THERM: {
"full": 4100,
"high": 3600,
"medium": 3300,
"low": 3000,
"empty": 2800,
},
NA_VALVE: {
"full": 3200,
"high": 2700,
"medium": 2400,
"low": 2200,
"empty": 2200,
},
}
levels = sorted(na_battery_levels[module_type].values())
steps = [20, 50, 80, 100]
na_battery_level = na_battery_levels[module_type]
if batterylevel >= na_battery_level["full"]:
return 100
if batterylevel >= na_battery_level["high"]:
i = 3
elif batterylevel >= na_battery_level["medium"]:
i = 2
elif batterylevel >= na_battery_level["low"]:
i = 1
else:
return 0
pct = steps[i - 1] + (
(steps[i] - steps[i - 1])
* (batterylevel - levels[i])
/ (levels[i + 1] - levels[i])
)
return int(pct)
|
16,616 | 3dc8fd9243467e234fe915093bed7dc8e8015c6b | from .declarations import (
result_type, nothing, requires as RequiresType,
returns as Returns, returns_result_type as ReturnsType
)
class CallPoint(object):
next = None
previous = None
requires = nothing
returns = result_type
def __init__(self, obj, requires=None, returns=None):
self.obj = obj
if requires is None:
self.requires = getattr(obj, '__mush_requires__', nothing)
else:
if isinstance(requires, (list, tuple)):
requires = RequiresType(*requires)
elif not isinstance(requires, RequiresType):
requires = RequiresType(requires)
self.requires = requires
if returns is None:
self.returns = getattr(obj, '__mush_returns__', result_type)
else:
if isinstance(returns, (list, tuple)):
returns = Returns(*returns)
elif not isinstance(returns, ReturnsType):
returns = Returns(returns)
self.returns = returns
self.labels = set()
self.added_using = set()
def __call__(self, context):
return context.call(self.obj, self.requires, self.returns)
def __repr__(self):
txt = '%r %r %r' % (self.obj, self.requires, self.returns)
if self.labels:
txt += (' <-- ' + ', '.join(sorted(self.labels)))
return txt
|
16,617 | b7ed62e478a72c716c3530687ac5b1e38b52a1b5 | """
在CSDN上看到“不脱发的程序猿"的文章《基于Python的人脸自动戴口罩系统》
https://blog.csdn.net/m0_38106923/article/details/104174562
里面介绍了怎么利用Dlib模块的landmark人脸68个关键点识别人脸五官数据,从而实现带口罩。
本文是在作者的基础上,增加了添加眼镜的部分。
"
"""
# _*_ coding:utf-8 _*_
from PIL import Image, ImageTk
from tkinter.filedialog import askopenfilename
import cv2
import tkinter as tk
import PIL
import dlib
class AddMask(object):
# 设计对话框
def __init__(self):
self.root = tk.Tk()
self.root.title('基于Pyhon的人脸自动戴口罩系统')
self.root.geometry('1200x500')
self.path1_ = None
self.path2_ = None
self.seg_img_path = None
self.mask = None
self.glass=None
self.label_Img_seg = None
decoration = PIL.Image.open('C:/Achillesccj/Work/AI/MaskandGlass/pic/bg.png').resize((1200, 500))
render = ImageTk.PhotoImage(decoration)
img = tk.Label(image=render)
img.image = render
img.place(x=0, y=0)
# 原图1的展示
tk.Button(self.root, text="打开头像", command=self.show_original1_pic).place(x=50, y=120)
# tk.Button(self.root, text="退出软件", command=quit).place(x=900, y=40)
tk.Label(self.root, text="头像", font=10).place(x=280, y=120)
self.cv_orinial1 = tk.Canvas(self.root, bg='white', width=270, height=270)
self.cv_orinial1.create_rectangle(8, 8, 260, 260, width=1, outline='red')
self.cv_orinial1.place(x=180, y=150)
self.label_Img_original1 = tk.Label(self.root)
self.label_Img_original1.place(x=180, y=150)
tk.Label(self.root,text="选择口罩",font=8).place(x=550,y=120)
tk.Label(self.root,text="选择眼镜",font=8).place(x=650,y=120)
first_pic = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Mask.png")
first_pic = first_pic.resize((60, 60), Image.ANTIALIAS)
first_pic = ImageTk.PhotoImage(first_pic)
self.first = tk.Label(self.root, image=first_pic)
self.first.place(x=550,y=160, width=60, height=60)
self.first.bind("<Button-1>", self.mask0)
second_pic = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Mask1.png")
second_pic = second_pic.resize((60, 60), Image.ANTIALIAS)
second_pic = ImageTk.PhotoImage(second_pic)
self.second_pic = tk.Label(self.root, image=second_pic)
self.second_pic.place(x=550, y=230, width=60, height=60)
self.second_pic.bind("<Button-1>", self.mask1)
third_pic = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Mask3.png")
third_pic = third_pic.resize((60, 60), Image.ANTIALIAS)
third_pic = ImageTk.PhotoImage(third_pic)
self.third_pic = tk.Label(self.root, image=third_pic)
self.third_pic.place(x=550, y=300, width=60, height=60)
self.third_pic.bind("<Button-1>", self.mask3)
forth_pic = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Mask4.png")
forth_pic = forth_pic.resize((60, 60), Image.ANTIALIAS)
forth_pic = ImageTk.PhotoImage(forth_pic)
self.forth_pic = tk.Label(self.root, image=forth_pic)
self.forth_pic.place(x=550, y=370, width=60, height=60)
self.forth_pic.bind("<Button-1>", self.mask4)
first_glass = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Glass1.png")
first_glass = first_glass.resize((60, 60), Image.ANTIALIAS)
first_glass = ImageTk.PhotoImage(first_glass)
self.first_glass = tk.Label(self.root, image=first_glass)
self.first_glass.place(x=650,y=160, width=60, height=60)
self.first_glass.bind("<Button-1>", self.glass1)
second_glass = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Glass2.png")
second_glass = second_glass.resize((60, 60), Image.ANTIALIAS)
second_glass = ImageTk.PhotoImage(second_glass)
self.second_glass = tk.Label(self.root, image=second_glass)
self.second_glass.place(x=650, y=230, width=60, height=60)
self.second_glass.bind("<Button-1>", self.glass2)
third_glass = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Glass3.png")
third_glass = third_glass.resize((60, 60), Image.ANTIALIAS)
third_glass = ImageTk.PhotoImage(third_glass)
self.third_pic = tk.Label(self.root, image=third_glass)
self.third_pic.place(x=650, y=300, width=60, height=60)
self.third_pic.bind("<Button-1>", self.glass3)
forth_glass = Image.open("C:/Achillesccj/Work/AI/MaskandGlass/pic/Glass4.png")
forth_glass = forth_glass.resize((60, 60), Image.ANTIALIAS)
forth_glass = ImageTk.PhotoImage(forth_glass)
self.forth_glass = tk.Label(self.root, image=forth_glass)
self.forth_glass.place(x=650, y=370, width=60, height=60)
self.forth_glass.bind("<Button-1>", self.glass4)
tk.Label(self.root, text="佩戴效果", font=10).place(x=920, y=120)
self.cv_seg = tk.Canvas(self.root, bg='white', width=270, height=270)
self.cv_seg.create_rectangle(8, 8, 260, 260, width=1, outline='red')
self.cv_seg.place(x=820, y=150)
self.label_Img_seg = tk.Label(self.root)
self.label_Img_seg.place(x=820, y=150)
self.root.mainloop()
# 原图1展示
def show_original1_pic(self):
self.path1_ = askopenfilename(title='选择文件')
print(self.path1_)
self.Img = PIL.Image.open(r'{}'.format(self.path1_))
Img = self.Img.resize((270,270),PIL.Image.ANTIALIAS) # 调整图片大小至256x256
img_png_original = ImageTk.PhotoImage(Img)
#这边是做什么的
self.label_Img_original1.config(image=img_png_original)
self.label_Img_original1.image = img_png_original # keep a reference
self.cv_orinial1.create_image(5, 5,anchor='nw', image=img_png_original)
# 人脸戴口罩效果展示
def show_morpher_pic(self):
#调取原图
img1 = cv2.imread(self.path1_)
#调取mouth的边缘数据
x_min, x_max, y_min, y_max, size = self.get_mouth(img1)
#基于位置信息,调整口罩大小
adding = self.mask.resize(size)
im = Image.fromarray(img1[:, :, ::-1]) # 切换RGB格式
# 两幅图融合到一起
im.paste(adding, (int(x_min), int(y_min)), adding)
# im.show()
save_path = self.path1_.split('.')[0]+'_result.jpg'
im.save(save_path)
Img = im.resize((270, 270), PIL.Image.ANTIALIAS) # 调整图片大小至270x270
img_png_seg = ImageTk.PhotoImage(Img)
self.label_Img_seg.config(image=img_png_seg)
self.label_Img_seg.image = img_png_seg # keep a reference
#带眼镜的照片
def show_glass_pic(self):
#调取原图
img1 = cv2.imread(self.path1_)
#调取mouth的边缘数据
x_min, x_max, y_min, y_max, size = self.get_eye(img1)
#基于位置信息,调整口罩大小
adding = self.glass.resize(size)
im = Image.fromarray(img1[:, :, ::-1]) # 切换RGB格式
# 两幅图融合到一起
im.paste(adding, (int(x_min), int(y_min)), adding)
# im.show()
save_path = self.path1_.split('.')[0]+'_result.jpg'
im.save(save_path)
Img = im.resize((270, 270), PIL.Image.ANTIALIAS) # 调整图片大小至270x270
img_png_seg = ImageTk.PhotoImage(Img)
self.label_Img_seg.config(image=img_png_seg)
self.label_Img_seg.image = img_png_seg # keep a reference
def mask0(self, event):
self.mask = Image.open('pic/mask.png')
self.show_morpher_pic()
def mask1(self, event):
self.mask = Image.open('pic/mask1.png')
self.show_morpher_pic()
def mask3(self, event):
self.mask = Image.open('pic/mask3.png')
self.show_morpher_pic()
def mask4(self, event):
self.mask = Image.open('pic/mask4.png')
self.show_morpher_pic()
def glass1(self, event):
self.glass = Image.open('pic/Glass1.png')
self.show_glass_pic()
def glass2(self, event):
self.glass = Image.open('pic/Glass2.png')
self.show_glass_pic()
def glass3(self, event):
self.glass = Image.open('pic/Glass3.png')
self.show_glass_pic()
def glass4(self, event):
self.glass = Image.open('pic/Glass4.png')
self.show_glass_pic()
def get_mouth(self, img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
faces = detector(img_gray, 0)
for k, d in enumerate(faces):
x = []
y = []
# 人脸大小的高度
height = d.bottom() - d.top()
# 人脸大小的宽度
width = d.right() - d.left()
shape = predictor(img_gray, d)
# 48-67 为嘴唇部分
for i in range(48, 68):
x.append(shape.part(i).x)
y.append(shape.part(i).y)
# 根据人脸的大小扩大嘴唇对应口罩的区域
y_max = (int)(max(y) + height / 3)
y_min = (int)(min(y) - height / 3)
x_max = (int)(max(x) + width / 3)
x_min = (int)(min(x) - width / 3)
size = ((x_max - x_min), (y_max - y_min))
return x_min, x_max, y_min, y_max, size
def get_eye(self, img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
faces = detector(img_gray, 0)
for k, d in enumerate(faces):
x = []
y = []
# 人脸大小的高度
height = d.bottom() - d.top()
# 人脸大小的宽度
width = d.right() - d.left()
shape = predictor(img_gray, d)
# 36-45 为眼睛部分
for i in range(36, 45):
x.append(shape.part(i).x)
y.append(shape.part(i).y)
# 根据眼睛的位置,经过适当扩展,得到眼镜的区域
y_max = (int)(max(y) + height / 3)
y_min = (int)(min(y) - height / 3)
x_max = (int)(max(x) + width / 3)
x_min = (int)(min(x) - width / 3)
size = ((x_max - x_min), (y_max - y_min))
return x_min, x_max, y_min, y_max, size
def quit(self):
self.root.destroy()
if __name__ == '__main__':
AddMask()
|
16,618 | 2e1b4d4aca0d4424d8c1a205fa1168acea3de80c | from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
class BookAd(models.Model):
CAT_CHOICES = (
('FANTASY', 'fantasy'),
('BIOGRAPHY', 'biography'),
('CLASSIC', 'classic'),
('COMMIC', 'commic'),
('HORROR', 'horror'),
('ROMANCE', 'romance'),
('OTHER', 'other'),
)
title = models.CharField(max_length=40)
author = models.CharField(max_length=40)
price = models.IntegerField(default=10000)
category = models.CharField(choices=CAT_CHOICES, default='OTHER', max_length=30)
publication = models.CharField(max_length=50)
def __str__(self):
return str(self.title) + ' / ' + str(self.author)
def get_absolute_url(self):
return reverse('ad', kwargs={'pk': self.pk})
def save(self, *args, **kwargs):
self.full_clean()
return super(BookAd, self).save(*args, **kwargs)
|
16,619 | 622d21f44c91eb87608495e9519d0cd7e9f58d3f | class board:
def __init__(InitMap,self):
|
16,620 | 7f4f3f38b8126a404953bbd0016561d5ae714907 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selectlines.ui'
#
# Created: Sun Oct 30 19:53:08 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SelectLines(object):
def setupUi(self, SelectLines):
SelectLines.setObjectName(_fromUtf8("SelectLines"))
SelectLines.resize(202, 218)
self.buttonBox = QtGui.QDialogButtonBox(SelectLines)
self.buttonBox.setGeometry(QtCore.QRect(-150, 180, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.lineslist = QtGui.QListWidget(SelectLines)
self.lineslist.setGeometry(QtCore.QRect(10, 10, 181, 161))
self.lineslist.setAlternatingRowColors(True)
self.lineslist.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.lineslist.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.lineslist.setProperty("isWrapping", False)
self.lineslist.setLayoutMode(QtGui.QListView.SinglePass)
self.lineslist.setSpacing(0)
self.lineslist.setSelectionRectVisible(True)
self.lineslist.setObjectName(_fromUtf8("lineslist"))
self.retranslateUi(SelectLines)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), SelectLines.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), SelectLines.reject)
QtCore.QMetaObject.connectSlotsByName(SelectLines)
def retranslateUi(self, SelectLines):
SelectLines.setWindowTitle(_translate("SelectLines", "Select Lines", None))
self.lineslist.setSortingEnabled(False)
|
16,621 | 1fa9291f928c1a65483dc382870cce517ec2885a | from django.contrib import admin
from .models import Teacher, Student
# Register your models here.
@admin.register(Teacher)
class TeacherAdmin(admin.ModelAdmin):
pass
@admin.register(Student)
class StudentAdmin(admin.ModelAdmin):
pass |
16,622 | b9f0f5846d3f0a09451a77ff78642f0439eda0eb | import argparse
import json
import re
import sys
import threading
import time
from socket import AF_INET, SOCK_STREAM, socket
from common.variables import DEFAULT_PORT, ENCODING, MAX_PACKAGE_LENGTH
from log.client_log_config import LOG
def log(func):
def wrap_log(*args, **kwargs):
res = func(*args, **kwargs)
LOG.debug(f'Log: {func.__name__}({args},{kwargs}) = {res}')
return res
return wrap_log
@log
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', default='localhost')
parser.add_argument('-n', default='Guest')
parser.add_argument('-p', type=int, default=DEFAULT_PORT)
namespace = parser.parse_args(sys.argv[1:])
return namespace.a, namespace.p, namespace.n
@log
def parse_answer(jim_obj):
if not isinstance(jim_obj, dict):
print('Server answer not dict')
return
if 'response' in jim_obj.keys():
print(f'Server answer: {jim_obj["response"]}')
else:
print('Answer has not "response" code')
if 'error' in jim_obj.keys():
print(f'Server error message: {jim_obj["error"]}')
if 'alert' in jim_obj.keys():
print(f'Server alert message: {jim_obj["alert"]}')
@log
def make_presence_message(client_name, status):
return {
'action': 'presence',
'time': time.time(),
'type': 'status',
'user': {
'client_name': client_name,
'status': status,
}
}
@log
def make_msg_message(client_name, msg, to='#'):
return {
'action': 'msg',
'time': time.time(),
'to': to,
'from': client_name,
'encoding': 'utf-8',
'message': msg,
}
@log
def send_message_take_answer(sock, msg):
msg = json.dumps(msg, separators=(',', ':'))
try:
sock.send(msg.encode(ENCODING))
data = sock.recv(MAX_PACKAGE_LENGTH)
return json.loads(data.decode(ENCODING))
except json.JSONDecodeError:
LOG.error('Answer JSON broken')
return {}
@log
def cmd_help():
print('Поддерживаемые команды:')
print('m [сообщение] - отправить сообщение в общий чат.')
print('p [получатель] [сообщение] - отправить приватное сообщение.')
print('help - вывести подсказки по командам')
print('exit - выход из программы')
@log
def user_input(sock, client_name):
try:
cmd_help()
while True:
msg = input('Введите команду: \n')
msg = msg.strip()
msg = re.split('\\s+', msg)
if msg[0] == 'exit':
break
elif msg[0] == 'help':
cmd_help()
continue
elif msg[0] == 'm':
if len(msg) < 2:
print('Неверное количество аргументов команды.'
'Введите "help" для вывода списка команд')
continue
msg = make_msg_message(client_name, ' '.join(msg[1:]))
elif msg[0] == 'p':
if len(msg) < 3:
print('Неверное количество аргументов команды.'
'Введите "help" для вывода списка команд')
continue
msg = make_msg_message(client_name, ' '.join(msg[2:]), msg[1])
else:
print('Команда не распознана. '
'Введите "help" для вывода списка команд')
continue
msg = json.dumps(msg, separators=(',', ':'))
sock.send(msg.encode(ENCODING))
except Exception as e:
LOG.debug(f'Ошибка выходного потока {e}')
@log
def user_output(sock, client_name):
try:
while True:
data = sock.recv(MAX_PACKAGE_LENGTH)
if not data:
break
try:
jim_obj = json.loads(data.decode(ENCODING))
except json.JSONDecodeError:
LOG.error(f'Brocken jim {data}')
continue
if not isinstance(jim_obj, dict):
LOG.error(f'Data not dict {jim_obj}')
continue
if 'response' in jim_obj.keys():
LOG.debug(f'Получен ответ сервера {jim_obj["response"]}')
continue
if 'action' in jim_obj.keys():
if jim_obj['action'] == 'msg':
if 'from' in jim_obj.keys() \
and 'message' in jim_obj.keys():
if 'to' in jim_obj.keys() \
and jim_obj['to'] == '#':
print(
f'{jim_obj["from"]}> {jim_obj["message"]}'
)
else:
print(
f'{jim_obj["from"]} (private)> '
f'{jim_obj["message"]}'
)
except Exception as e:
LOG.debug(f'Ошибка входного потока{e}')
def main():
address, port, client_name = parse_args()
try:
print('Консольный месседжер. Клиентский модуль.')
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((address, port))
message = make_presence_message(client_name, 'I am here!')
answer = send_message_take_answer(sock, message)
message = json.dumps(message, separators=(',', ':'))
sock.send(message.encode(ENCODING))
print('Установлено соединение с сервером.')
LOG.info(
f'Запущен клиент с парамертами: адрес сервера: {address}, '
f'порт: {port}, имя пользователя: {client_name}')
LOG.info(f'Установлено соединение с сервером. Ответ сервера: {answer}')
print(f'\nПривет {client_name}!\n')
except Exception as e:
print('Соединение с сервером не установлено.')
LOG.error(f'Соединение с сервером не установлено. Ошибка {e}')
else:
sender = threading.Thread(
target=user_input, args=(sock, client_name))
sender.daemon = True
sender.start()
receiver = threading.Thread(
target=user_output, args=(sock, client_name))
receiver.daemon = True
receiver.start()
LOG.debug('Запущены процессы')
while True:
time.sleep(10)
if sender.is_alive() and receiver.is_alive():
continue
break
if __name__ == '__main__':
main()
|
16,623 | 5f794bf2ad428e216548c832bf5030c93827b36d | task_string = 'Hello, Python my name is Bahram' #print the third character of this string
print(task_string[2]) #1 task
print(task_string[-1]) #2task
print(task_string[0:5]) #3 task
print(task_string[:-2]) #4 task
print(task_string[::2]) #5task
print(task_string[-1::2]) #6 task
print(task_string[::-1]) #7task
print(task_string[::-2]) #8task
print(len(task_string)) #9task
|
16,624 | 46cdf2b96388729f001fc0933534740049fe62a9 | import pulp as _pulp
import numpy as _np
'''
Graph description
v0 -> v2
v0 -> v1
v0 -> v3
v1 -> v3
v2 -> v3
v0
/ |\
/ | \
v1 | v2
\ | /
\ |/
v3
All the initial capacities are 1. Max flow is 3
Cost function
Cost of increasing capacity on any edge by factor of x is x.
-----------------------------------
For our purposes, assume we have some transition matrix for this graph
This will be 4x4 matrix, which looks like the following:
[0 1 1 1
0 0 0 1
0 0 0 1
0 0 0 0]
This next part is simply to give an example.
In the future, we assume we're given this matrix
Note that, since initial capacities are all 1, the adjacency/transition matrix encodes this automatically
if there is an edge
transition_matrix is an np.arrays
'''
transition_matrix = _np.zeros((4,4))
transition_matrix[0,1] = 1
transition_matrix[0,2] = 1
transition_matrix[0,3] = 1
transition_matrix[1,3] = 1
transition_matrix[2,3] = 1
dimension = transition_matrix.shape[0] # in this case, (4,4) = 4
'''We will simulate trauma by removing the edge from v1 -> v3 by simply saying there are no transitions'''
transition_matrix[1,3] = 0
'''Make the LP problem'''
lp_problem = _pulp.LpProblem("Mincost Brain",_pulp.LpMinimize)
sequence = []
for i in range(0,dimension):
sequence += str(i)
rows = sequence
columns = sequence
'''
First, lets specify the scaling factor variables.
Lets make it simple and have a scaling factor for each edge
'''
scaling_factors = _pulp.LpVariable.dicts("scaling_factors",(rows,columns),lowBound=1,cat="Continuous")
'''
Next up, lets create the flow variables.
Again, for simplicity, one for each edge (regardless of capacity)
'''
flow_values = _pulp.LpVariable.dicts("flow_values",(rows,columns),lowBound=0,cat="Continuous")
'''
Now its time to add the constraints
First, the objective function
in this case, just use lpsum over scaling_factors which takes in the list of variables
scaling_factors_list = a list of all of the scaling factor variables in this case
e.g.,
lp_problem += scaling_factor[0][0] + scaling_factors[0][1] + .... + scaling_fators[n][n]
'''
'''
Now lets add the Flow constraints
for each flow variable
lp_problem += flow_variable[i][j] <= scaling_factor[i][j] * transition_matrix[i][j]
'''
'''
Finally, lets solve it!
lp_problem.solve()
'''
|
16,625 | b0e1b793c28f3ee013cb31f4ef037ab04e52ff2c | # -*- coding: utf-8 -*-
import os
import re
import xlwt
from PyQt5.QtWidgets import QWizardPage, QWidgetItem, QSpacerItem, QComboBox
from PyQt5.QtGui import QColor
from PyQt5.QtCore import Qt
class QIWizardPage(QWizardPage):
def __init__(self, settings, parent=None):
super(QIWizardPage, self).__init__(parent)
self.settings = settings
def clearLayout(layout):
for i in reversed(list(range(layout.count()))):
item = layout.itemAt(i)
if isinstance(item, QWidgetItem):
# print("widget" + str(item))
item.widget().close()
# or
# item.widget().setParent(None)
elif isinstance(item, QSpacerItem):
pass
# print("spacer " + str(item))
# no need to do extra stuff
else:
# print("layout " + str(item))
clearLayout(item.layout())
# remove the item from layout
layout.removeItem(item)
class ColorListWidget(QComboBox):
_xlwt_rgbcolors = [
(0, 0, 0), (255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255),
(255, 255, 0),
(255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255), (255, 0, 0),
(0, 255, 0),
(0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (128, 0, 0),
(0, 128, 0),
(0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128),
(192, 192, 192),
(128, 128, 128), (153, 153, 255), (153, 51, 102), (255, 255, 204),
(204, 255, 255), (102, 0, 102), (255, 128, 128), (0, 102, 204),
(204, 204, 255),
(0, 0, 128), (255, 0, 255), (255, 255, 0), (0, 255, 255), (128, 0, 128),
(128, 0, 0), (0, 128, 128), (0, 0, 255), (0, 204, 255), (204, 255, 255),
(204, 255, 204), (255, 255, 153), (153, 204, 255), (255, 153, 204),
(204, 153, 255), (255, 204, 153), (51, 102, 255), (51, 204, 204),
(153, 204, 0),
(255, 204, 0), (255, 153, 0), (255, 102, 0), (102, 102, 153),
(150, 150, 150),
(0, 51, 102), (51, 153, 102), (0, 51, 0), (51, 51, 0), (153, 51, 0),
(153, 51, 102),
(51, 51, 153), (51, 51, 51)
]
def __init__(self):
super(ColorListWidget, self).__init__()
cmap = xlwt.Style.colour_map
self.xlwt_colornames = []
self.xlwt_color_index = []
self.xlwt_rgbcolors = []
# self._xlwt_colorlabels = []
for i in list(range(64)):
cnames = [name for (name, index) in list(cmap.items()) if index == i]
# self._xlwt_colorlabels.append(cnames[0] if len(cnames)>0 else '')
if cnames != []:
self.xlwt_colornames.append(', '.join(cnames))
self.xlwt_color_index.append(i)
self.xlwt_rgbcolors.append(self._xlwt_rgbcolors[i])
for i, xlwtcolor in enumerate(self.xlwt_colornames):
self.insertItem(i, xlwtcolor)
self.setItemData(i, QColor(*self.xlwt_rgbcolors[i]),
Qt.DecorationRole)
def get_current_rgb(self):
return self.xlwt_rgbcolors[self.currentIndex()]
#######################################################
# Supplementory functions
def shorten_path(path):
sep = os.path.sep
if path.count(sep) > 2:
id = path.rfind(sep)
id = path.rfind(sep, 0, id)
else:
id = 0
if path == '':
return path
else:
return "...%s" % (path[id:])
def get_property(style, property):
styles = [str(s) for s in style.split(';')]
for s in styles:
if s.strip(' ').startswith(property + ':'):
return s.replace(property + ':', '')
return ''
def get_rgb(style_string):
rgbregex = re.compile(
" *rgb\( {0,2}(?P<r>\d{1,3}), {0,2}(?P<g>\d{1,3}), {0,2}(?P<b>\d{"
"1,3})\) *")
match = rgbregex.match(style_string)
if match:
groups = match.groupdict()
return tuple([int(groups['r']), int(groups['g']), int(groups['b'])])
else:
raise ValueError(
'No rgb identification possible from "%s"' % style_string)
|
16,626 | 6d1875443f6d3907f5193e25ce1b78e88bba6f0c | #!/usr/bin/python
from server.Emolytics import emolytics
from wsgiref.simple_server import make_server
from werkzeug.debug import DebuggedApplication
if __name__ == '__main__':
application = DebuggedApplication(emolytics, True)
httpd = make_server('0.0.0.0', 8051, application)
httpd.serve_forever()
|
16,627 | 3010d6f049611dc04d217bf37e0e6b14862ac71e | from unittest import TestCase
import games.cards.Class_Player
from games.cards.Class_Card_Game import Card_Game
from games.cards.Class_Player import Player
from games.cards.Class_DeckOfCard import DeckOfCards
class TestCard_Game(TestCase):
def setup(self):
self.game1 = Card_Game("ori", "snir", 15)
self.player1 = Player("ori",DeckOfCards,15)
self.player2 = Player("snir",DeckOfCards,15)
def test_new_game(self):
pass
def test_get_winner(self):
self.player1 = Player("ori", DeckOfCards, 15)
self.player2 = Player("snir", DeckOfCards, 15)
self.assertTrue(len(self.player1.hand) == len(self.player2.hand))
|
16,628 | d7d6e0bc6f9019a3c1c14525a9691e0122f7e809 | rows = int(input("Enter the no. of rows in A"))
for i in range(rows):
space = rows - i - 1
while space>0:
space = space -1
print(" ",end='')
for j in range(i):
if j == 0 or j == i-1:
print("*",end=' ')
elif i==rows/2:
print("*",end=' ')
else:
print(" ",end=' ')
print()
|
16,629 | 4c8832e53699ab950cf3cf8707e6872757048f3f | import sqlite3
from track import Track
from playlist import Playlist
def create_database():
try:
sqliteConnection = sqlite3.connect('FinalProjectDatabase.db')
cursor = sqliteConnection.cursor()
cursor.execute("DROP TABLE IF EXISTS Spotify_Data")
create_spotify_table_sql ='''
CREATE TABLE "Spotify_Data" (
"SongName" TEXT,
"Artist" TEXT,
"SentimentAnalysisScore" INTEGER,
"Playlist_ID" TEXT
);
'''
cursor.execute(create_spotify_table_sql)
cursor.execute("DROP TABLE IF EXISTS Playlists")
create_playlist_table_sql = '''
CREATE TABLE "Playlists" (
"PlaylistName" TEXT,
"Playlist_ID" TEXT,
"Playlist_URL" TEXT
);'''
cursor.execute(create_playlist_table_sql)
sqliteConnection.commit()
print("Record Updated successfully ")
cursor.close()
except sqlite3.Error as error:
print("Failed to update sqlite table", error)
finally:
if sqliteConnection:
sqliteConnection.close()
print("The SQLite connection is closed")
def updateSqliteSpotifyTable(tracks, playlist):
try:
sqliteConnection = sqlite3.connect('FinalProjectDatabase.db')
cursor = sqliteConnection.cursor()
print("Connected to SQLite")
playlist_id = playlist.playlist_id
for track in tracks:
name = track.name
artist = track.artist
sentiment_score = track.sentiment_score
sql_song_query = f"""INSERT INTO Spotify_Data (SongName, Artist, SentimentAnalysisScore, Playlist_ID)
VALUES (?, ?, ?, ?);"""
cursor.execute(sql_song_query, [name, artist, sentiment_score, playlist_id])
sqliteConnection.commit()
print("Record Updated successfully ")
cursor.close()
except sqlite3.Error as error:
print("Failed to update sqlite table", error)
finally:
if sqliteConnection:
sqliteConnection.close()
print("The SQLite connection is closed")
def updateSqlitePlaylistTable(playlist):
try:
sqliteConnection = sqlite3.connect('FinalProjectDatabase.db')
cursor = sqliteConnection.cursor()
print("Connected to SQLite")
playlist_name = playlist.name
playlist_id = playlist.playlist_id
playlist_url = playlist.playlist_url
sql_playlist_query = f"""INSERT INTO Playlists (PlaylistName, Playlist_ID, Playlist_URL)
VALUES (?,?,?);"""
cursor.execute(sql_playlist_query, [playlist_name, playlist_id, playlist_url])
sqliteConnection.commit()
print("Record Updated successfully ")
cursor.close()
except sqlite3.Error as error:
print("Failed to update sqlite table", error)
finally:
if sqliteConnection:
sqliteConnection.close()
print("The SQLite connection is closed")
#playlist id: each song belongs to a playlist
#tweet table not needed
#create playlist table (each one has a unique id auto incrementing) add songs to the song table. foreign key = playlist id
#
#songs have a foreign key for playlist table, 1st add a row for playlist table (null id)
#use select statment to find row and primary key associated with song
# SELECT song id from playlist where == to playlist url
#playlist id is the foreign key to the song table
#select_country_id_sql = '''
# SELECT Id FROM Countries
# WHERE playlist = ?
#fetch 1
#cur.execute(select_country_id_sql, eng_name
#Result = cur.fetchone()
#for song in song list curr.execute
# Add result to playlist table
# find id of the playlist (last item)
# for each song add to the song table
# one column is playlist id
#
#query: insert into playlist
#query: select playlist id (to have a value to insert into songs table identifying the playlist)
#query: insert into songs
#homework 5 syntax
#select query where url is == to the playlist url
|
16,630 | 1ba4740aea169759dd854db348dff1fc9063cd12 | # Generated by Django 2.2.6 on 2020-01-16 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tribal', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Organisation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=50)),
('password', models.CharField(max_length=25)),
('name', models.CharField(max_length=80)),
('mobile', models.IntegerField(max_length=13)),
('organisation_name', models.CharField(max_length=100)),
],
),
]
|
16,631 | 37e8d7694c8385c1f3c5502b279755a31c5e78c2 | """
The source code in this file is adapted by reusing
some part of the source code from the official
RabbitMQ documentation.
"""
from typing import Any, Optional, Union
from pika import (
BasicProperties,
BlockingConnection,
ConnectionParameters,
PlainCredentials
)
from pika.adapters.blocking_connection import BlockingChannel
from .constants import (
DEFAULT_EXCHANGER_NAME,
DEFAULT_EXCHANGER_TYPE,
DEFAULT_QUEUE,
DEFAULT_ROUTER,
RABBIT_MQ_HOST as HOST,
RABBIT_MQ_PORT as PORT,
RABBIT_MQ_VHOST as VHOST,
PREFETCH_COUNT
)
class RMQConnector:
def __init__(self, logger, host: str = HOST, port: int = PORT, vhost: str = VHOST) -> None:
self._logger = logger
self._host = host
self._port = port
self._vhost = vhost
# According to the documentation, Pika blocking
# connections are not thread-safe!
self._connection = None
self._channel = None
# Should try reconnecting again
self._try_reconnecting = False
# If the module has been stopped with a
# keyboard interruption, i.e., CTRL + C
self._gracefully_stopped = False
@staticmethod
def declare_and_bind_defaults(connection: BlockingConnection, channel: BlockingChannel) -> None:
if connection and connection.is_open:
if channel and channel.is_open:
# Declare the default exchange agent
RMQConnector.exchange_declare(
channel=channel,
exchange_name=DEFAULT_EXCHANGER_NAME,
exchange_type=DEFAULT_EXCHANGER_TYPE,
)
# Declare the default queue
RMQConnector.queue_declare(
channel,
queue_name=DEFAULT_QUEUE
)
# Bind the default queue to the default exchange
RMQConnector.queue_bind(
channel,
queue_name=DEFAULT_QUEUE,
exchange_name=DEFAULT_EXCHANGER_NAME,
routing_key=DEFAULT_ROUTER
)
# Connection related methods
@staticmethod
def open_blocking_connection(
credentials: PlainCredentials,
host: str = HOST,
port: int = PORT,
vhost: str = VHOST
) -> BlockingConnection:
blocking_connection = BlockingConnection(
parameters=ConnectionParameters(
host=host,
port=port,
virtual_host=vhost,
credentials=credentials,
# TODO: The heartbeat should not be disabled (0)!
heartbeat=0
),
)
return blocking_connection
@staticmethod
def open_blocking_channel(connection: BlockingConnection) -> Union[BlockingChannel, None]:
if connection and connection.is_open:
channel = connection.channel()
return channel
return None
@staticmethod
def exchange_bind(
channel: BlockingChannel,
destination_exchange: str,
source_exchange: str,
routing_key: str,
arguments: Optional[Any] = None
) -> None:
if arguments is None:
arguments = {}
if channel and channel.is_open:
channel.exchange_bind(
destination=destination_exchange,
source=source_exchange,
routing_key=routing_key,
arguments=arguments
)
@staticmethod
def exchange_declare(
channel: BlockingChannel,
exchange_name: str,
exchange_type: str,
passive: bool = False,
durable: bool = False,
auto_delete: bool = False,
internal: bool = False,
arguments: Optional[Any] = None
) -> None:
if arguments is None:
arguments = {}
if channel and channel.is_open:
exchange = channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
# Only check to see if the exchange exists
passive=passive,
# Survive a reboot of RabbitMQ
durable=durable,
# Remove when no more queues are bound to it
auto_delete=auto_delete,
# Can only be published to by other exchanges
internal=internal,
# Custom key/value pair arguments for the exchange
arguments=arguments
)
return exchange
@staticmethod
def exchange_delete(channel: BlockingChannel, exchange_name: str,
if_unused: bool = False) -> None:
# Deletes queue only if unused
if channel and channel.is_open:
channel.exchange_delete(exchange=exchange_name, if_unused=if_unused)
@staticmethod
def exchange_unbind(
channel: BlockingChannel,
destination_exchange: str,
source_exchange: str,
routing_key: str,
arguments: Optional[Any] = None
) -> None:
if arguments is None:
arguments = {}
if channel and channel.is_open:
channel.exchange_unbind(
destination=destination_exchange,
source=source_exchange,
routing_key=routing_key,
arguments=arguments
)
@staticmethod
def queue_bind(channel: BlockingChannel, queue_name: str, exchange_name: str, routing_key: str,
arguments: Optional[Any] = None) -> None:
if arguments is None:
arguments = {}
if channel and channel.is_open:
channel.queue_bind(queue=queue_name, exchange=exchange_name, routing_key=routing_key, arguments=arguments)
@staticmethod
def queue_declare(
channel: BlockingChannel,
queue_name: str,
passive: bool = False,
durable: bool = False,
exclusive: bool = False,
auto_delete: bool = False,
arguments: Optional[Any] = None
) -> None:
if arguments is None:
arguments = {}
if channel and channel.is_open:
queue = channel.queue_declare(
queue=queue_name,
# Only check to see if the queue exists and
# raise ChannelClosed exception if it does not
passive=passive,
# Survive reboots of the server
durable=durable,
# Only allow access by the current connection
exclusive=exclusive,
# Delete after consumer cancels or disconnects
auto_delete=auto_delete,
# Custom key/value pair arguments for the queue
arguments=arguments
)
return queue
@staticmethod
def queue_delete(channel: BlockingChannel, queue_name: str, if_unused: bool = False,
if_empty: bool = False) -> None:
if channel and channel.is_open:
channel.queue_delete(
queue=queue_name,
# Only delete if the queue is unused
if_unused=if_unused,
# Only delete if the queue is empty
if_empty=if_empty
)
@staticmethod
def queue_purge(channel: BlockingChannel, queue_name: str) -> None:
if channel and channel.is_open:
channel.queue_purge(queue=queue_name)
@staticmethod
def queue_unbind(channel: BlockingChannel, queue_name: str, exchange_name: str,
routing_key: str, arguments: Optional[Any] = None) -> None:
if arguments is None:
arguments = {}
if channel and channel.is_open:
channel.queue_unbind(
queue=queue_name,
exchange=exchange_name,
routing_key=routing_key,
arguments=arguments
)
@staticmethod
def set_qos(channel: BlockingChannel, prefetch_size: int = 0,
prefetch_count: int = PREFETCH_COUNT, global_qos: bool = False) -> None:
if channel and channel.is_open:
channel.basic_qos(
# No specific limit if set to 0
prefetch_size=prefetch_size,
prefetch_count=prefetch_count,
# Should the qos apply to all channels of the connection
global_qos=global_qos
)
@staticmethod
def confirm_delivery(channel: BlockingChannel) -> None:
if channel and channel.is_open:
channel.confirm_delivery()
@staticmethod
def basic_publish(channel: BlockingChannel, exchange_name: str, routing_key: str,
message_body: bytes, properties: BasicProperties) -> None:
if channel and channel.is_open:
channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=message_body,
properties=properties
)
|
16,632 | 92f1e0a66fde2bd81a9cc5aed3f26bbd61bffa6a | from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from quest.models import Action, Record, Quest
# Register your models here.
@admin.register(Action)
class ActionAdmin(ImportExportModelAdmin):#admin.ModelAdmin):
list_display = ('id', 'category', 'name', 'xp', )
list_display_links = ('id', 'name',)
list_filter = ('category',)
@admin.register(Quest)
class QuestAdmin(ImportExportModelAdmin):#admin.ModelAdmin):
list_display = ('id', 'category', 'name', 'xp',)
list_display_links = ('id', 'name',)
list_filter = ('category',)
@admin.register(Record)
class RecordAdmin(ImportExportModelAdmin):#admin.ModelAdmin):
list_display = ('id', 'user', 'action', 'memo', 'repeat', 'xp', 'date', 'time', 'checked',)
list_display_links = ('id', 'action',)
list_filter = ('user', 'action', 'date', )
list_editable = ['checked']
|
16,633 | 769e223662b2564b1a5562e0caeb4b2ad0ceb897 | from django.conf.urls import url
from django.views.generic.base import TemplateView
from rest_framework.compat import pygments_css
from rest_framework.exceptions import NotFound
from rest_framework.views import APIView
from tg_apicore.schemas import generate_api_docs
class APIDocumentationView(TemplateView):
""" API documentation view
Subclass it, set title and description attributes and implement the three get_*() methods.
"""
template_name = 'tg_apicore/docs/index.html'
# Pygments code style to use. Go to http://pygments.org/demo/ , select an example an
# you'll have a dropdown of style options on the right.
code_style = 'emacs'
title = "API"
description = ""
def generate_docs(self):
return generate_api_docs(
title=self.title, description=self.get_description(),
site_url=self.get_site_url(), base_path=self.get_base_path(), patterns=self.urlpatterns(),
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
docs = self.generate_docs()
context.update({
'api': docs,
'code_style': pygments_css(self.code_style),
})
return context
def get_description(self) -> str:
return self.description
def get_site_url(self) -> str:
""" Should return your site's url without path, e.g. https://example.com/ """
raise NotImplementedError()
def get_base_path(self) -> str:
""" Should return your API's base path (path prefix), e.g. /api/v1/ """
raise NotImplementedError()
def urlpatterns(self) -> list:
""" Should return urlpatterns of your API """
raise NotImplementedError()
class PageNotFoundView(APIView):
""" 404 view for API urls.
Django's standard 404 page returns HTML. We want everything under API url prefix to return 404 as JSON.
"""
authentication_classes = ()
permission_classes = ()
@classmethod
def urlpatterns(cls):
return [
# This one is for when the version is valid
url(r'^(?P<version>(\d{4}-\d{2}-\d{2}))/', cls.as_view()),
# This one is catch-all for everything else, including invalid versions
url(r'^', cls.as_view()),
]
def initial(self, request, *args, **kwargs):
# Overriding initial() seems to be like the easiest way that still keeps most of DRF's logic ala renderers.
super().initial(request, *args, **kwargs)
raise NotFound()
|
16,634 | 8662890cd5c0dcdcbb92748694bb1bbad78baf58 | import numpy as np
from utils import split_treatment_control
def ite_best(train_df, test_df, features, outcome, treatment):
"""
Best possible ITE learnt model (i.e. without exploiting the observable interference).
Only available for synthetic datasets.
"""
train_t_df, train_c_df = split_treatment_control(train_df, treatment)
by_feat_t = train_t_df.groupby(features)[outcome].mean()
by_feat_c = train_c_df.groupby(features)[outcome].mean()
by_feat = by_feat_t - by_feat_c
return test_df[features].join(by_feat, on=features)[outcome].values
def ite_2m(train_df, test_df, features, outcome, treatment, clf_t, clf_c):
"""
Individual Treatment Effects with Two Models (2T)
"""
np.random.seed(0)
train_t_df, train_c_df = split_treatment_control(train_df, treatment)
clf_t_trained = clf_t.fit(train_t_df[features], train_t_df[outcome])
clf_c_trained = clf_c.fit(train_c_df[features], train_c_df[outcome])
test_f_df = test_df[features]
return clf_t_trained.predict_proba(test_f_df)[:, 1] - clf_c_trained.predict_proba(test_f_df)[:, 1]
def mite_2m(train_df, test_df, features, outcome, treatment, exposure, clf_t, clf_c, clf_er):
"""
Post-Mediation Individual Treatment Effects with Two Models (2T)
"""
np.random.seed(0)
train_exposed_df, train_not_exposed_df = split_treatment_control(train_df, exposure)
train_t_df, _ = split_treatment_control(train_df, treatment)
clf_t_trained = clf_t.fit(train_exposed_df[features], train_exposed_df[outcome])
clf_c_trained = clf_c.fit(train_not_exposed_df[features], train_not_exposed_df[outcome])
clf_er_trained = clf_er.fit(train_t_df[features], train_t_df[exposure])
test_f_df = test_df[features]
return clf_er_trained.predict_proba(test_f_df)[:, 1] * \
(clf_t_trained.predict_proba(test_f_df)[:, 1] - clf_c_trained.predict_proba(test_f_df)[:, 1])
def _make_sdr(train_df, features, outcome, treatment):
train_t_df, train_c_df = split_treatment_control(train_df, treatment)
train_t_f_df = train_t_df[features]
train_c_f_df = train_c_df[features]
data_t = np.hstack((train_t_f_df, train_t_f_df, np.zeros(train_t_f_df.shape)))
data_c = np.hstack((train_c_f_df, np.zeros(train_c_f_df.shape), train_c_f_df))
data_full = np.concatenate((data_t, data_c))
data_full_y = np.concatenate((train_t_df[outcome], train_c_df[outcome]))
return data_full, data_full_y
def ite_sdr(train_df, test_df, features, outcome, treatment, clf):
"""
Individual Treatment Effects with Shared Data Representation (SDR)
"""
np.random.seed(0)
train_f, train_y = _make_sdr(train_df, features, outcome, treatment)
clf_trained = clf.fit(train_f, train_y)
test_f_df = test_df[features]
y_pred_t = clf_trained.predict_proba(np.hstack((test_f_df, test_f_df, np.zeros(test_f_df.shape))))[:, 1]
y_pred_c = clf_trained.predict_proba(np.hstack((test_f_df, np.zeros(test_f_df.shape), test_f_df)))[:, 1]
return y_pred_t - y_pred_c
def mite_sdr(train_df, test_df, features, outcome, treatment, exposure, clf, clf_er):
"""
Post-Mediation Individual Treatment Effects with Shared Data Representation (SDR)
"""
np.random.seed(0)
train_f, train_y = _make_sdr(train_df, features, outcome, exposure)
train_t_df, _ = split_treatment_control(train_df, treatment)
clf_trained = clf.fit(train_f, train_y)
clf_er_trained = clf_er.fit(train_t_df[features], train_t_df[exposure])
test_f_df = test_df[features]
y_pred_t = clf_trained.predict_proba(np.hstack((test_f_df, test_f_df, np.zeros(test_f_df.shape))))[:, 1]
y_pred_c = clf_trained.predict_proba(np.hstack((test_f_df, np.zeros(test_f_df.shape), test_f_df)))[:, 1]
return clf_er_trained.predict_proba(test_f_df)[:, 1] * (y_pred_t - y_pred_c)
|
16,635 | 5e2ffa201dcecdcd7eb935c5206a0c25b05336d4 | # addition and count
def add (n):
box=0
for i in range(1,n+1):
box=box + i
print("count=",box)
num = int(input("enter the number:"))
add (num)
|
16,636 | 3cda1ef15d044168edbc0efe932260de7b75762a |
import sys
import os
import time
import tkinter
print('学生信息管理系统 V1.0')
|
16,637 | 29e8ebebcd0df37c109e5d0281880a24c7119a8f | import requests
from pprint import pprint
from data import db_session
from data.films import Films
"""Api кинопоиска для парсинга фильмов"""
db_session.global_init("db/blogs.sqlite")
db_sess = db_session.create_session()
# data = {
# 'actors': data_film['actors'],
# 'age': data_film['age'],
# 'collapse': data_film['collapse'],
# 'countries': data_film['countries'],
# 'description': data_film['description'],
# 'directors': data_film['directors'],
# 'frames': data_film['frames'],
# 'genres': data_film['genres'],
# 'id': data_film['id'],
# 'id_kinopoisk': data_film['id_kinopoisk'],
# 'poster': data_film['poster'],
# 'premiere_world': data_film['premiere_world'],
# 'rating_imdb': data_film['rating_imdb'],
# 'rating_kinopoisk': data_film['rating_kinopoisk'],
# 'title': data_film['title'],
# 'year': data_film['year'],
# 'time': data_film['collapse']['duration'],
# 'producers': data_film['producers']
#
# }
# "1249198", "1109271", "8062", "1262160", "1239328", "1236795",
film = ["1249198", "1109271", "8062", "1262160", "1239328", "1236795", "1445243", "1309596", "688609"]
for i in film:
response = requests.get(f"https://api.kinopoisk.cloud/movies/{i}/token/0d12aad940f6c3a4cdd54cfce1d9e1b9")
# pprint(response.json())
data_film = response.json()
print(data_film)
# print(data_film['age'])
film = Films()
film.link_img = ", ".join(data_film['poster'])
film.title = ", ".join(data_film['title'])
film.actors = ", ".join(data_film['actors'])
film.age = ", ".join(data_film['age'])
film.genres = ", ".join(data_film['genres'])
film.id_kinopoisk = data_film['id_kinopoisk']
film.premiere_world = ", ".join(data_film['premiere_world'])
film.rating_imdb = data_film['rating_imdb']
film.rating_kinopoisk = data_film['rating_kinopoisk']
film.time = ", ".join(data_film['collapse']['duration'])
db_sess.add(film)
db_sess.commit()
|
16,638 | e096fc0fe38b1c9b82ebabb31bccdc7ae558cc8c | import BattleReplay
import BigWorld
import gui.Scaleform.daapi.view.battle.shared.markers2d.plugins as plug
from Avatar import PlayerAvatar
from AvatarInputHandler import AvatarInputHandler
from Vehicle import Vehicle
from aih_constants import CTRL_MODE_NAME
from constants import AIMING_MODE
from vehicle_systems.tankStructure import TankPartIndexes
import xvm_battle.python.battle as battle
import xvm_main.python.config as config
from xfw.events import registerEvent
from xfw_actionscript.python import *
from xvm_main.python.logger import *
targetName = None
targetVehicle = None
targetHealth = None
targetID = None
playerVehicleID = None
marker = None
visible = True
DISPLAY_IN_MODES = [CTRL_MODE_NAME.ARCADE,
CTRL_MODE_NAME.ARTY,
CTRL_MODE_NAME.DUAL_GUN,
CTRL_MODE_NAME.SNIPER,
CTRL_MODE_NAME.STRATEGIC]
class Arrow(object):
def __init__(self):
try:
self.__model = BigWorld.Model('../mods/shared_resources/xvm/res/markers/Arrow/arrow.model')
except:
self.__model = None
self.__motor = None
def hideMarker(self):
global marker
if self.__model in BigWorld.models():
self.__model.delMotor(self.__motor)
BigWorld.delModel(self.__model)
def showMarker(self, target):
if self.__model is not None:
self.hideMarker()
if target is not None:
self.__motor = BigWorld.Servo(target.matrix)
self.__model.addMotor(self.__motor)
BigWorld.addModel(self.__model)
class Cylinder(object):
def __init__(self):
try:
self.__modelBig = BigWorld.Model(
'../mods/shared_resources/xvm/res/markers/cylinder/cylinder_red_big.model')
except:
self.__modelBig = None
try:
self.__modelMedium = BigWorld.Model(
'../mods/shared_resources/xvm/res/markers/cylinder/cylinder_red_medium.model')
except:
self.__modelMedium = None
try:
self.__modelSmall = BigWorld.Model(
'../mods/shared_resources/xvm/res/markers/cylinder/cylinder_red_small.model')
except:
self.__modelSmall = None
self.__motor = None
def hideMarker(self):
if self.__modelSmall in BigWorld.models():
self.__modelSmall.delMotor(self.__motor)
BigWorld.delModel(self.__modelSmall)
if self.__modelMedium in BigWorld.models():
self.__modelMedium.delMotor(self.__motor)
BigWorld.delModel(self.__modelMedium)
if self.__modelBig in BigWorld.models():
self.__modelBig.delMotor(self.__motor)
BigWorld.delModel(self.__modelBig)
def showMarker(self, target):
if target is None:
return
vehicleLength = 0.0
self.hideMarker()
self.__motor = BigWorld.Servo(target.matrix)
if target.appearance.collisions is not None:
hullBB = target.appearance.collisions.getBoundingBox(
TankPartIndexes.HULL)
vehicleLength = abs(hullBB[1][2] - hullBB[0][2])
if vehicleLength < 4.0:
if self.__modelSmall is not None:
self.__modelSmall.addMotor(self.__motor)
BigWorld.addModel(self.__modelSmall)
elif vehicleLength < 6.0:
if self.__modelMedium is not None:
self.__modelMedium.addMotor(self.__motor)
BigWorld.addModel(self.__modelMedium)
else:
if self.__modelBig is not None:
self.__modelBig.addMotor(self.__motor)
BigWorld.addModel(self.__modelBig)
def resetTarget():
global targetName, targetVehicle, targetHealth, targetID, marker
targetName = None
targetVehicle = None
targetHealth = None
targetID = None
if marker is not None:
marker.hideMarker()
def setTarget(vehicleID):
global targetName, targetVehicle, targetHealth, targetID, marker
target = BigWorld.entity(vehicleID)
targetVehicle = target.typeDescriptor.type.shortUserString
targetName = target.publicInfo.name
targetHealth = target.health
targetID = target.id
if marker is not None:
marker.showMarker(target)
@registerEvent(AvatarInputHandler, 'onControlModeChanged')
def AvatarInputHandler_onControlModeChanged(self, eMode, **args):
global visible
if config.get('sight/enabled', True) and battle.isBattleTypeSupported:
newVisible = eMode in DISPLAY_IN_MODES
if newVisible != visible:
visible = newVisible
as_event('ON_AUTO_AIM')
@registerEvent(PlayerAvatar, 'onEnterWorld')
def Vehicle_onEnterWorld(self, prereqs):
global targetName, targetVehicle, targetHealth, playerVehicleID, targetID, marker, visible
if config.get('sight/enabled', True) and battle.isBattleTypeSupported:
marker = None
if config.get('sight/autoAim/enabled', False):
markerType = config.get('sight/autoAim/markerType', '')
if markerType.strip().lower() == 'cylinder':
marker = Cylinder()
elif markerType.strip().lower() == 'arrow':
marker = Arrow()
targetName = None
targetVehicle = None
targetHealth = None
targetID = None
visible = True
playerVehicleID = self.playerVehicleID
@registerEvent(Vehicle, 'onHealthChanged')
def onHealthChanged(self, newHealth, oldHealth, attackerID, attackReasonID):
global targetHealth
if config.get('sight/enabled', True) and battle.isBattleTypeSupported:
if targetID is not None and targetID == self.id:
targetHealth = self.health
if not self.isAlive():
resetTarget()
as_event('ON_AUTO_AIM')
@registerEvent(plug.VehicleMarkerTargetPlugin, '_VehicleMarkerTargetPlugin__addAutoAimMarker')
def VehicleMarkerTargetPlugin__addAutoAimMarker(self, event):
if self._vehicleID is not None:
setTarget(self._vehicleID)
as_event('ON_AUTO_AIM')
@registerEvent(plug.VehicleMarkerTargetPlugin, '_addMarker')
def _addMarker(self, vehicleID):
if BattleReplay.g_replayCtrl.isPlaying and vehicleID is not None:
setTarget(vehicleID)
as_event('ON_AUTO_AIM')
@registerEvent(AvatarInputHandler, 'setAimingMode')
def _setAimingMode(self, enable, mode):
if mode == AIMING_MODE.TARGET_LOCK and not enable:
resetTarget()
as_event('ON_AUTO_AIM')
#
# @registerEvent(PlayerAvatar, 'onLockTarget')
# def onLockTarget(self, state, playVoiceNotifications):
# global targetName, targetVehicle, targetHealth, targetID, marker
# if config.get('sight/enabled', True) and battle.isBattleTypeSupported:
# log('PlayerAvatar state = %s' % state)
# # target = BigWorld.target()
# target = self.autoAimVehicle
# if target is not None:
# log('onLockTarget state = %s target = %s' % (state, target.id))
# else:
# log('onLockTarget state = %s target = %s' % (state, None))
# if (state == 1) and target is not None:
# targetVehicle = target.typeDescriptor.type.shortUserString
# targetName = target.publicInfo.name
# targetHealth = target.health
# targetID = target.id
# if marker is not None:
# marker.showMarker(target)
# else:
# resetTarget()
# as_event('ON_AUTO_AIM')
# @registerEvent(FragsCollectableStats, 'addVehicleStatusUpdate')
# def FragsCollectableStats_addVehicleStatusUpdate(self, vInfoVO):
# if config.get('sight/enabled', True) and (not vInfoVO.isAlive()) and (playerVehicleID == vInfoVO.vehicleID):
# resetTarget()
# as_event('ON_AUTO_AIM')
@xvm.export('sight.autoAimName', deterministic=False)
def sight_autoAimName():
return targetName if visible else None
@xvm.export('sight.autoAimVehicle', deterministic=False)
def sight_autoAimVehicle():
return targetVehicle if visible else None
@xvm.export('sight.autoAimHealth', deterministic=False)
def sight_autoAimHealth():
return targetHealth if visible else None
|
16,639 | dbe5020f84aa2996e023cc8d1e26a1faf0e253e9 | alpha_dict = {
'a': 0.0575,
'b': 0.0128,
'c': 0.0263,
'd': 0.0285,
'e': 0.0913,
'f': 0.0173,
'g': 0.0133,
'h': 0.0313,
'i': 0.0599,
'j': 0.0006,
'k': 0.0084,
'l': 0.0335,
'm': 0.0235,
'n': 0.0596,
'o': 0.0689,
'p': 0.0192,
'q': 0.0008,
'r': 0.0508,
's': 0.0567,
't': 0.0706,
'u': 0.0334,
'v': 0.0069,
'w': 0.0119,
'x': 0.0073,
'y': 0.0164,
'z': 0.0007,
' ': 0.1928,
}
accu_dict = {
'a': 0,
'b': 0.0575,
'c': 0.0703,
'd': 0.0966,
'e': 0.12510000000000002,
'f': 0.21640000000000004,
'g': 0.23370000000000005,
'h': 0.24700000000000005,
'i': 0.27830000000000005,
'j': 0.33820000000000006,
'k': 0.33880000000000005,
'l': 0.34720000000000006,
'm': 0.38070000000000004,
'n': 0.40420000000000006,
'o': 0.46380000000000005,
'p': 0.5327000000000001,
'q': 0.5519000000000001,
'r': 0.5527000000000001,
's': 0.6035,
't': 0.6602,
'u': 0.7308,
'v': 0.7642,
'w': 0.7711,
'x': 0.783,
'y': 0.7903,
'z': 0.8067,
' ': 0.8074,
}
def dec2bin(x):
x -= int(x)
bins = []
while x:
x *= 2
bins.append(1 if x>=1. else 0)
x -= int(x)
return bins
def calc_xulie_pro(xulie):
xulie = list(xulie)
leiji_pos = 1
for i in range(len(xulie)):
leiji_pos = alpha_dict.get(xulie[i], 1) * leiji_pos
return leiji_pos
def bin2dec(b):
d = 0
for i, x in enumerate(b):
d += 2**(-i-1)*x
return d
if __name__ == "__main__":
daima = 'asf'
result = calc_xulie_pro(daima)
result = dec2bin(result)
print(result)
|
16,640 | 3a7fab634088508da3e2cff7c6834c6b718f2d07 | numbers =[1,5,2,8,6,10,3,4,2,10]
latters =['a','n','z','b','g','k']
value=min(numbers) #en küçük sayı değerini alır.
value=max(latters) #en büyük harfi alır.
value=numbers[3:6] #3. indek il 6. index arasındakileri yazdırır.
print(value)
latters[3]='AYŞE' #3. indekse Ayşe yazısını ekler.
latters[6:]='W','EKİNCİ','C' #en sona w,ekinci,c karakterlerini ekler.
latters.append('alfaba') #en sona alfaba karakterini ekler.
print(latters)
numbers.append(138) #en sona 138 sayısını ekler.
numbers.insert(4,101) #4. indekse 101 sayısını ekler.
numbers.pop(7) #7. indexteki sayıyı siler.
numbers.remove(8) #8 sayısını siler
numbers.sort() #listeyi sıralar.
print(numbers)
latters.reverse() #listesyi tersten sıralar.
print(latters)
value=len(latters) #kaç karakter olduğunu bulur.
print(value)
value=numbers.count(10) #10 sayısından kaç tane olduğunu bulur.
print(value)
latters.clear() #latters dizisinin hepsini siler.
print(latters)
#str="Python,Programlama,Java"karakter dizisini listeye çevir.
str="Python,Programlama,Java".split(',')
print(str)
#Kullanıcıdan alacağımız 3 tane araba marka bilgisini bir listede saklayın.
markalar=[]
marka=input("1.Marka : ")
markalar.append(marka)
marka=input("2.Marka : ")
markalar.append(marka)
marka=input("3.Marka : ")
markalar.append(marka)
print(markalar)
|
16,641 | 6fc2b1531f47f6b33fd40e9858a77f1506f44e42 | from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if not nums:
return [[]]
ans = [[]]
for num in nums:
ans+=[i + [num] for i in ans]
return ans
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if not nums:
return [[]]
ans = []
def backTrack(nums, temp):
if len(temp) <= len(nums):
ans.append(temp[:])
for i in range(len(nums)):
backTrack(nums[i + 1:], temp + [nums[i]])
backTrack(nums, [])
return ans |
16,642 | 4c2f2578bd3a33354c4e5c83ac93d07fabd6c825 | from dms.models import Location, DisasterType, Disaster
from dms.services.stats_summary import StatsSummaryService
from dms.tests.base import MongoTestCase
class DisasterSummaryStatsTest(MongoTestCase):
def setUp(self):
self.disaster_type = DisasterType(**dict(name='Flood', description="Some flood")).save()
self.kampala = Location(**dict(name='Kampala', type='district', parent=None)).save()
self.bukoto = Location(**dict(name='Bukoto', type='subcounty', parent=self.kampala)).save()
self.disaster_attr = dict(name=self.disaster_type, locations=[self.bukoto], description="Big Flood",
date="2014-12-01", status="Assessment")
def test_should_retrieve_message_count_affected_types_countrywide(self):
wakiso = Location(**dict(name='Wakiso', type='district', parent=None)).save()
wakiso_disaster_attr = self.disaster_attr.copy()
wakiso_disaster_attr['locations'] = [wakiso]
Disaster(**wakiso_disaster_attr).save()
Disaster(**self.disaster_attr).save()
attr2 = self.disaster_attr.copy()
attr2["status"] = "Closed"
Disaster(**attr2).save()
location_stats_service = StatsSummaryService(location=None)
stats = location_stats_service.aggregate_stats()
disasters_stats = stats.disasters
self.assertEqual(3, disasters_stats.count)
self.assertEqual(2, disasters_stats.affected)
self.assertEqual({'Flood': 3}, disasters_stats.types)
def test_should_retrieve_message_count_affected_types_of_a_district(self):
Disaster(**self.disaster_attr).save()
attr2 = self.disaster_attr.copy()
attr2["status"] = "Closed"
Disaster(**attr2).save()
location_stats_service = StatsSummaryService(location=self.kampala)
stats = location_stats_service.aggregate_stats()
disasters_stats = stats.disasters
self.assertEqual(2, disasters_stats.count)
self.assertEqual(1, disasters_stats.affected)
self.assertEqual({'Flood': 2}, disasters_stats.types)
def test_should_retrieve_message_count_affected_types_of_a_subcounty(self):
Disaster(**self.disaster_attr).save()
fire_type = DisasterType(**dict(name='Fire', description="whatever")).save()
attr2 = self.disaster_attr.copy()
attr2["locations"] = [Location(**dict(name='Location that is not Kampala', type='district')).save()]
attr2["name"] = fire_type
Disaster(**attr2).save()
location_stats_service = StatsSummaryService(location=self.bukoto)
stats = location_stats_service.aggregate_stats()
disasters_stats = stats.disasters
self.assertEqual(1, disasters_stats.count)
self.assertEqual(1, disasters_stats.affected)
self.assertEqual({'Flood': 1}, disasters_stats.types)
def test_types_of_a_subcounty(self):
Disaster(**self.disaster_attr).save()
fire_type = DisasterType(**dict(name='Fire', description="whatever")).save()
attr2 = self.disaster_attr.copy()
attr2["name"] = fire_type
Disaster(**attr2).save()
location_stats_service = StatsSummaryService(location=self.bukoto)
stats = location_stats_service.aggregate_stats()
disasters_stats = stats.disasters
self.assertEqual(2, disasters_stats.count)
self.assertEqual(1, disasters_stats.affected)
self.assertEqual({'Flood': 1, 'Fire': 1}, disasters_stats.types)
def test_should_return_0_if_no_disaster_everywhere(self):
location_stats_service = StatsSummaryService(location=self.bukoto)
stats = location_stats_service.aggregate_stats()
disasters_stats = stats.disasters
self.assertEqual(0, disasters_stats.count)
self.assertEqual(0, disasters_stats.affected)
self.assertEqual({}, disasters_stats.types)
|
16,643 | 268c88e8fce20db46f88f5586fe38ddb703de157 | from .problems_facade import get_available_days, get_day_calculator
|
16,644 | eecc5f3e6b4b0e539ab1d6e8d8eab5cb1195273a | import pickle as p
filename = 'features.pkl'
with open(filename, 'rb') as f:
x = p.load(f)
for a,b in x.items():
print("img_id: "+ a)
print("img_desc: "+ str(b))
break |
16,645 | c144a64e2e1fd0016533dfbd629783893d22feac | # -*- coding: utf-8 -*-
import time
import asyncio
import threading
"""
协程
相比于多线程,协程的特点在于它使用一个线程来完成并发任务,多任务切换由程序自己控制。在IO密集型任务中,可以把耗时长的IO操作做成异步处理。
根本区别:多进程与多线程性能更多取决于机器的性能,但协程则更多依赖程序员自身的能力。
协程的优势:
1. 协程中程序的切换不是依靠线程切换,而是通过程序自己控制。因此没有线程切换的开销,效率更高。
2. 协程不涉及锁机制。因为协程是单线程,不存在同时写变量的冲突,在协程中控制共享资源不加锁,只需要判断状态就好了。所以效率进一步提高。
如果想要利用多核CPU,那么可以采用多进程+协程的方式,充分提升性能。
"""
# --------------------------------------------------------------------------
# 使用yield和send()实现协程
# --------------------------------------------------------------------------
def consumer():
print("start task")
while True:
a = yield "now change to producer"
time.sleep(2)
print("consumer is working....{} times".format(a))
def producer(t):
next(t) # 启动producer
n = 0
while n < 3:
print("-------------------")
print("producer is working....{} times".format(n)) # producer 执行
time.sleep(2)
print("now change to consumer")
ret = t.send(n) # consumer 执行
print(ret)
n += 1
c = consumer() # c成为了一个生成器
producer(c) # 执行producer
# 上面的代码首先通过next函数将c这个生成器激活,执行到yield并暂停。
# 然后执行producer
# send()函数可以重新激活yield下面的语句,并同时将里面的参数赋值给yield左边的变量。
# --------------------------------------------------------------------------
# 使用asyncio库实现Python协程
# asyncio 是python3.4引入的标准库,直接内置了对异步IO的支持。其本质是一个消息循环
# 从asyncio获取一个EventLoop引用,然后把需要执行的协程扔给它里面去执行即可。
# event_loop: 程序会开启一个无限的循环,程序员将一些函数注册到事件循环上
# coroutine:一个协程对象,也就是被装饰器(或者async关键字)定义的函数。调用时并不会立即执行,而是返回一个协程对象。
# task:任务是对协程进行进一步封装。
# future:代表将来执行或者没有执行的任务的结果(同task类似)
# --------------------------------------------------------------------------
@asyncio.coroutine
def walk(n, steps):
start = 0
while start < steps:
print("学生{}往前走".format(n), threading.current_thread()) # 通过输出可以看到两个coroutine都是由同一个线程运行的
ret = yield from asyncio.sleep(2) # 这里模拟一个耗时的io操作
print("学生{}摔了一跤".format(n), threading.current_thread())
start += 1
loop = asyncio.get_event_loop() # 获取loop
tasks = [walk(1, 3), walk(2, 3)] # 两个协程
loop.run_until_complete(asyncio.gather(*tasks)) # asyncio.gather可以将所有协程封装成task,run_until_complete将阻塞直到所有任务都完成
loop.close()
# 上面的代码,当第一个任务执行完"学生往前走"之后,会卡在yield from那里。yield from 会交出当前函数的控制权,此时并不会等待sleep的时间,而是会将当前任务挂起,执行下一个任务。直到
# sleep时间到了或者其他的任务都挂起了,返回值给yield from,然后继续执行第一个任务的后续代码。
print("----------------------------------------------------------------------------------------------------------------")
# --------------------------------------------------------------------------
# 通过task.result()的方式可以直接获取已经完成了的task的返回值(通过return返回)
# run_until_complete内置了回调函数
# --------------------------------------------------------------------------
async def run(): # 把@asyncio.coroutine改成async,语法更简明。同理yield from也可以替换成await关键字
await asyncio.sleep(1)
return "run!!"
loop2 = asyncio.new_event_loop() # 获取一个新的loop,刚才的loop已经被上一个用例close了
c2 = run()
task2 = loop2.create_task(c2) # 将协程封装成一个task
print(task2) # pending
loop2.run_until_complete(task2)
print(task2) # finished
print(task2.result())
loop2.close()
# 也可以手动绑定回调函数,获取自己想要的结果(不推荐)
async def climb():
await asyncio.sleep(1)
return "climb!!"
def callback(future):
print("call back!")
print(future.result())
return future.result()
loop3 = asyncio.new_event_loop()
c3 = climb()
task3 = loop3.create_task(c3)
task3.add_done_callback(callback)
loop3.run_until_complete(task3)
loop3.close()
|
16,646 | 675c2913602fe10787864d97ffcec15d9a0c4f45 | import tensorflow as tf
import common
class TRANSITION(object):
def __init__(self, in_dim, out_dim, size, lr, do_keep_prob):
self.arch_params = {
'in_dim': in_dim,
'out_dim': out_dim,
'n_hidden_0': size[0], #800,
'n_hidden_1': size[1], #400,
'do_keep_prob': do_keep_prob
}
self.solver_params = {
'lr': lr, #0.0001,
'weight_decay': 0.000001,
'weights_stddev': 0.01,
}
self._init_layers()
def forward(self, state_, action, autoencoder):
'''
:param _input: N_batch x np.concatenate([[x_h, x_ct, x_h_, x_ct_, x_h__, x_ct__], v_t, x_t, a_t, self.is_aggressive, [ct_ind]])
:param _input: N_batch x action
:return: prediction: {x_h,x_ct}_t
'''
x_H_ = tf.slice(state_, [0, 0], [-1, 6])
x_ = tf.slice(state_, [0, 0], [-1, 2])
rest = tf.slice(state_, [0, 6], [-1, -1])
_input = tf.concat(concat_dim=1, values=[x_H_, action], name='input')
h0 = tf.nn.xw_plus_b(_input, self.weights['0'], self.biases['0'], name='h0')
relu0 = tf.nn.relu(h0)
h1 = tf.nn.xw_plus_b(relu0, self.weights['1'], self.biases['1'], name='h1')
relu1 = tf.nn.relu(h1)
delta = tf.nn.xw_plus_b(relu1, self.weights['c'], self.biases['c'], name='delta')
delta_do = tf.nn.dropout(delta, self.arch_params['do_keep_prob'])
delta_do = tf.slice(delta_do, [0,0], [-1, 2])
x = x_ + delta_do
x_H = tf.concat(concat_dim=1, values=[x, tf.slice(x_H_, [0, 0], [-1, 4])])
state = tf.concat(concat_dim=1, values=[x_H, rest], name='state')
return state
def backward(self, loss):
# create an optimizer
opt = tf.train.AdamOptimizer(learning_rate=self.solver_params['lr'])
# weight decay
if self.solver_params['weight_decay']:
loss += self.solver_params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in self.trainable_variables])
# compute the gradients for a list of variables
grads_and_vars = opt.compute_gradients(loss=loss, var_list=self.weights.values() + self.biases.values())
mean_abs_grad, mean_abs_w = common.compute_mean_abs_norm(grads_and_vars)
# apply the gradient
apply_grads = opt.apply_gradients(grads_and_vars)
return apply_grads, mean_abs_grad, mean_abs_w
def train(self, objective):
self.loss = objective
self.minimize, self.mean_abs_grad, self.mean_abs_w = self.backward(self.loss)
self.loss_summary = tf.scalar_summary('loss_t', objective)
def _init_layers(self):
weights = {
'0': tf.Variable(tf.random_normal([self.arch_params['in_dim'] , self.arch_params['n_hidden_0']], stddev=self.solver_params['weights_stddev'])),
'1': tf.Variable(tf.random_normal([self.arch_params['n_hidden_0'], self.arch_params['n_hidden_1']], stddev=self.solver_params['weights_stddev'])),
'c': tf.Variable(tf.random_normal([self.arch_params['n_hidden_1'], self.arch_params['out_dim']] , stddev=self.solver_params['weights_stddev'])),
}
biases = {
'0': tf.Variable(tf.random_normal([self.arch_params['n_hidden_0']], stddev=self.solver_params['weights_stddev'])),
'1': tf.Variable(tf.random_normal([self.arch_params['n_hidden_1']], stddev=self.solver_params['weights_stddev'])),
'c': tf.Variable(tf.random_normal([self.arch_params['out_dim']], stddev=self.solver_params['weights_stddev']))
}
self.weights = weights
self.biases = biases
self.trainable_variables = weights.values() + biases.values()
|
16,647 | 0a5c0784ace7d4b3b7be81b6071897f5b962a560 | # -*- coding:utf-8 -*-
__author__ = 'lynn'
__date__ = '2021/5/12 18:12'
import numpy as np
import cv2
# 自适应阈值出来了很多噪点,感觉更适合自然场景多些
def prepare_gray_(img_color):
img_gray = cv2.bitwise_not(cv2.cvtColor(img_color, cv2.COLOR_RGB2GRAY))
return cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)
def prepare_gray(img_color):
gray_img = cv2.bitwise_not(cv2.cvtColor(img_color, cv2.COLOR_RGB2GRAY)) # 像素反转
# cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
# return cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 15, -2)
return cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)
# _ret, thresh_img = cv2.threshold(gray_img, 180, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# return thresh_img
# def outline_frame(img_gray, border_thickness, horizontal_scale=20.0, vertical_scale=20.0):
def outline_frame(img_gray, subject, border_thickness=4):
# 语文这个阈值调小点25,因为作文好多线 其他学科暂时觉得越大越好,能找到足够多的点再去删
if subject == '语文':
horizontal_scale = 30
vertical_scale = 30
else:
horizontal_scale = 50
vertical_scale = 50
(height, width) = img_gray.shape
dilate_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (border_thickness, border_thickness))
erode_structure = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
# 勾勒横向直线
horizontal = img_gray.copy()
horizontal_size = int(width / horizontal_scale)
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
horizontal = cv2.erode(horizontal, horizontal_structure)
horizontal = cv2.dilate(horizontal, horizontal_structure)
horizontal = cv2.dilate(horizontal, dilate_structure)
horizontal = cv2.erode(horizontal, erode_structure)
# 勾勒纵向直线
vertical = img_gray.copy()
vertical_size = int(height / vertical_scale)
vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
vertical = cv2.erode(vertical, vertical_structure)
vertical = cv2.dilate(vertical, vertical_structure)
vertical = cv2.dilate(vertical, dilate_structure)
vertical = cv2.erode(vertical, erode_structure)
return horizontal, vertical
def center_point(points):
if len(points) == 0:
return None
dims = len(points[0])
size = len(points)
return tuple([sum([p[d] for p in points]) / size for d in range(dims)])
def clustering_points(points, max_gap,
norm=np.linalg.norm,
center_trans=lambda x: int(round(x))):
cluster = {}
for point in points:
if len(cluster) == 0:
cluster[point] = [point]
else:
temp = [(i, min([norm(np.array(point) - np.array(p)) for p in group])) for i, group in cluster.items()]
temp.sort(key=lambda d: d[1])
i, dist = temp[0]
if dist <= max_gap:
cluster[i].append(point)
else:
cluster[point] = [point]
for g, s in cluster.items():
c = center_point(s)
del cluster[g]
cluster[tuple([center_trans(i) for i in list(c)])] = s
return cluster
def dict_get(d, key, default):
if key not in d:
res = default()
d[key] = res
return res
else:
return d[key]
def dist_point_line(point, line,
is_line_segment=True,
norm=np.linalg.norm):
p = np.array(point)
a, b = [np.array(end) for end in line]
ab = b - a
ap = p - a
d = norm(ab)
r = 0 if d == 0 else ab.dot(ap) / (d ** 2)
ac = r * ab
if is_line_segment:
if r <= 0:
return norm(ap)
elif r >= 1:
return norm(p - b)
else:
return norm(ap - ac)
else:
return norm(ap - ac)
def flatten(coll):
flat = []
for e in coll:
flat.extend(e)
return flat
def groupby(coll, key):
res = {}
for e in coll:
k = key(e)
dict_get(res, k, lambda: []).append(e)
return res
def group_reverse_map(group_res,
value=lambda v: v, key=lambda k: k):
"""
Convert
{
k1:[u1,u2,...],
k2:[v1,v2,...],
...
}
To
{
u1:k1,
u2:k1,
...,
v1:k2,
v2:k2,
...
}
"""
return dict([(value(v), key(g)) for g, l in group_res.items() for v in l])
def imshow(img, name=''):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyWindow(name)
def maxindex(coll):
"""
返回集合中最大值的下标
"""
return None if len(coll) == 0 else coll.index(max(coll))
def minindex(coll):
"""
返回集合中最小值的下标
"""
return None if len(coll) == 0 else coll.index(min(coll))
def polygon_to_box(polygon):
print(polygon)
return (polygon[0], polygon[3], polygon[4], polygon[7])
def sort(coll, key=lambda x: x, reverse=False):
coll.sort(key=key, reverse=reverse)
return coll
def merge_lines(img_lines, threshold,
min_line_length=30, max_line_gap=10):
"""
Merge lines by ends clustering
"""
# raw_lines = cv2.HoughLinesP(img_lines, 1, np.pi / 180, threshold,
# minLineLength=min_line_length, maxLineGap=max_line_gap)
raw_lines = cv2.HoughLinesP(img_lines, 1, np.pi / 180, 160, minLineLength=500, maxLineGap=65)
lines = [sort([(line[0][0], line[0][1]), (line[0][2], line[0][3])]) for line in raw_lines]
ends = set(flatten(lines))
ends_map = group_reverse_map(clustering_points(ends, 5))
merged_set = set([tuple(sort([ends_map[line[0]], ends_map[line[1]]])) for line in lines])
return [(line[0], line[1]) for line in merged_set]
def merge_lines1(lines):
lines = [sort([(line[0], line[1]), (line[2], line[3])]) for line in lines]
ends = set(flatten(lines))
ends_map = group_reverse_map(clustering_points(ends, 3))
merged_set = set([tuple(sort([ends_map[line[0]], ends_map[line[1]]])) for line in lines])
return [(line[0], line[1]) for line in merged_set]
def detect_lines(image):
img_gray = prepare_gray(image)
frame_h, frame_v = outline_frame(img_gray, 6)
img_frames = cv2.bitwise_or(frame_h, frame_v)
# frame_lines = merge_lines(img_frames, 20)
img_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
ret, binary = cv2.threshold(img_gray, 230, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
fld = cv2.ximgproc.createFastLineDetector()
dlines = fld.detect(binary)
lines = [line[0] for line in dlines.tolist()]
lines_arr = np.array(lines)
width_ = lines_arr[:, 2] - lines_arr[:, 0]
height_ = lines_arr[:, 3] - lines_arr[:, 1]
lines_index1 = np.where(width_ >= 20)[0]
lines_index2 = np.where(height_ >= 20)[0]
lines_index = np.hstack([lines_index1, lines_index2])
new_lines = [lines[ele] for ele in lines_index]
frame_lines = merge_lines1(new_lines)
# img_lines = np.zeros(image.shape)
# for line in frame_lines:
# cv2.line(img_lines, tuple(map(int, line[0])), tuple(map(int, line[1])), (255, 255, 255), thickness=1)
# cv2.imwrite(r'E:\111_4_26_test_img\\' + str(0) + '_' + '2.jpg', img_lines)
if __name__ == '__main__':
img_path = r'E:\111_4_26_test_img\1.jpg'
image = cv2.imread(img_path)
detect_lines(image) |
16,648 | 446f42faedb7cbc7d5712c9d8b0d008d69ca8f5b | #!/usr/bin/env python
"""
Module "init_catalog" create catalog.db
with example content to test the application.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from model_catalog import Category, Base, Item, User, engine
# Create database session ( engine is created in model_catalog )
DBSession = sessionmaker(bind=engine)
session = DBSession()
def print_categories():
categories = session.query(Category).all()
output = ' '
for category in categories:
txt = str(category.id)
txt += ' : ' + category.name + ' : ' + str(category.user.id)
txt += ' : ' + category.user.username
txt += ' : ' + category.user.email
print(txt)
items = session.query(Item).filter_by(category_id=category.id)
for item in items:
txt = ' -- ' + str(item.id)
txt += ' : ' + ' Title = ' + item.title
txt += ' Description = ' + item.description
print(txt)
# Create 2 users
User1 = User(
username="naci",
email="naci@gmail.com",
picture='',
provider='local'
)
User1.hash_password("Udacity")
session.add(User1)
session.commit()
User2 = User(
username="reviewer",
email="",
picture="""
https://pbs.twimg.com/profile_images/2671170543/
18debd694829ed78203a5a36dd364160_400x400.png""",
provider='local'
)
User2.hash_password("Udacity")
session.add(User2)
session.commit()
##########################################################################
# 1. Category for Baseball
category = Category(user=User1, name="Baseball")
session.add(category)
session.commit()
# 1. Item for Category
item1 = Item(
user=User1,
title="Smal ball",
category=category,
description="very small")
session.add(item1)
session.commit()
##########################################################################
# 2. Category for Frisbee
category = Category(user=User2, name="Frisbee")
session.add(category)
session.commit()
##########################################################################
# 3. Category for Snowboarding
category = Category(user=User1, name="Snowboarding")
session.add(category)
session.commit()
# 1. Item for Category for Snowboarding
item1 = Item(
user=User1,
title="Snowboard",
category=category,
description="The hat"
)
session.add(item1)
session.commit()
##########################################################################
# 4. Category for Rock Climbing
category = Category(user=User2, name="Rock Climbing")
session.add(category)
session.commit()
##########################################################################
# 5. Category for Foosball
category = Category(user=User1, name="Foosball")
session.add(category)
session.commit()
##########################################################################
# 6. Category for Skating
category = Category(user=User2, name="Skating")
session.add(category)
session.commit()
##########################################################################
# 7. Category for Hockey
category = Category(user=User1, name="Hockey")
session.add(category)
session.commit()
##########################################################################
# 8. Category for Basketball
category = Category(user=User2, name="Basketball")
session.add(category)
session.commit()
# 1. Item for Category for Basketball
item1 = Item(
user=User2,
title="Ball",
category=category,
description="Big ball"
)
session.add(item1)
session.commit()
##########################################################################
# 8. Category for Soccer
category = Category(user=User2, name="Soccer")
session.add(category)
session.commit()
# 1. Item for Category for Soccer
item1 = Item(
user=User2,
title="Soccer Cleats",
category=category,
description="The shoes"
)
session.add(item1)
session.commit()
# 2. Item for Category for Soccer
item2 = Item(
user=User1,
title="Jersey",
category=category,
description="The shirt"
)
session.add(item2)
session.commit()
##########################################################################
print ("added Categories and its Items!")
print_categories()
|
16,649 | c5c9e2a08c8a1293761e017a4ab4c1e3af744831 | import socket
def CheckThereIsConnection(host="8.8.8.8", port=53, timeout=3):
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except socket.error:
return False
|
16,650 | e0b7cfdef680f0a899320a909a4e65f4c3c4ee8d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.db import models
from django import forms
from .models import Post, Category, Tag
class CommentAdmin(admin.ModelAdmin):
list_display = ('user_id', 'post_id', 'pub_date', 'content')
class PostAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {'widget': forms.Textarea(
attrs={'rows': 41,
'cols': 100
})},
}
list_display = ('title', 'pub_date')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'intro')
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
# Register your models here.
admin.site.register(Post, PostAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Tag, TagAdmin)
|
16,651 | a4a2910537e3a7a5307cd463eb5232abcba2eb74 |
def assignment(date):
date = date.split('/')
if len(date[0]) != 4:
return False
else:
if int(date[1]) >= 13 or len(date[1]) < 2:
return False
else:
return True
|
16,652 | de485502b96efbc021317aebff64545f23423ccd | import librosa
import torch_audiomentations
from torch import Tensor
import torch
from hw_asr.augmentations.base import AugmentationBase
class Stretch(AugmentationBase):
def __init__(self, min_=0.75, max_=1.25, *args, **kwargs):
self.min_ = min_
self.max_ = max_
def __call__(self, data: Tensor, **kwargs):
rate = (self.min_ + (self.max_ - self.min_) * torch.rand(1)).item()
return torch.from_numpy(
librosa.effects.time_stretch(
data,
rate
)
)
|
16,653 | 494dde8ad26f3a0064ed368913115648e78c4d8b | # Copyright (c) 2009-2014 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from .adapter import GaugeAdapter, OutputNotParseable,\
ResultsIndicatedAsInvalid
from ..model.data_point import DataPoint
from ..model.measurement import Measurement
class CaliperAdapter(GaugeAdapter):
"""CaliperPerformance parses the output of Caliper with
the ReBenchConsoleResultProcessor.
"""
re_log_line = re.compile(r"Measurement \(runtime\) for (.*?) in (.*?): (.*?)ns")
def check_for_error(self, line):
## for the moment we will simply not check for errors, because
## there are to many simple Error strings referring to class names
## TODO: find better solution
pass
def parse_data(self, data, run_id):
data_points = []
for line in data.split("\n"):
if self.check_for_error(line):
raise ResultsIndicatedAsInvalid(
"Output of bench program indicates errors.")
m = self.re_log_line.match(line)
if m:
time = float(m.group(3)) / 1000000
current = DataPoint(run_id)
current.add_measurement(Measurement(time, 'ms', run_id,
criterion = m.group(1)))
current.add_measurement(Measurement(time, 'ms', run_id,
criterion = 'total'))
data_points.append(current)
if len(data_points) == 0:
raise OutputNotParseable(data)
return data_points
|
16,654 | 965ea90eb7ab675e12a864537dff0395765d8a3d | def multiplier(n):
a = [] #вывод множителей
b = 2
while b * b <= n:
if n % b == 0:
a.append(b)
n //= b
else:
b += 1
if n > 1:
a.append(n)
return a
|
16,655 | a885b111b399dbafaaf1bd39574835221ad82105 | import numpy as np
from .record_inserter import inserter
from .numeric_inserter import ArrayInserter as NumericArrayInserter
from .utils import set_encoded
@inserter
class FileInserter(NumericArrayInserter):
""" Inserter for file-like objects.
File groups register as 'file' type, whereas the data registers as
'numeric'.
"""
record_type = 'file'
def record_dataset_attributes(self, dict_like):
attrs = dict(
RecordType='numeric',
Empty=self.empty,
Complex='no',
Sparse='no',
)
set_encoded(dict_like, **attrs)
@staticmethod
def can_insert(data):
""" Can insert file-like objects. """
return hasattr(data, 'read')
def prepare_data(self):
contents = self.data.read()
if not isinstance(contents, bytes):
contents = contents.encode('ascii')
self.data = np.frombuffer(contents, dtype=np.uint8)
NumericArrayInserter.prepare_data(self)
|
16,656 | 25e99ed5ed165357611d11826c5c6747f9a2c1c7 | """
Django settings for busshaming project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#$sxqt-6%d*d95@7*=j%bg*-32(ic@lst#396=0f$54_4*++r3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8000',
'localhost:8000',
'127.0.0.1:8080',
'localhost:8080',
)
ROOT_URLCONF = 'busshaming.urls'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'busshaming-local',
'HOST': 'localhost',
'PORT': '',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
|
16,657 | b8cc1ee4ed391e7a6c7263573bfeb2b3b07d832e | fruits=['apple',"banana","mango","apple",'guava','orange','jack-fruits']
print(fruits.count('apple'))
number=[2,9,4,7,0,1]
print(sorted(number)) # numbers are shorted in print but the list not sorted
print(number)
number.sort() # the list was sorted
print(number)
number.clear() # remove all elements of the list
print(number)
fruits_copy=fruits.copy() # copy a list into other list[\'';p=][]/">./"
print(fruits_copy) |
16,658 | fa92b286cc19ba45a8b53abedb92c5133e4e2d59 | from django.shortcuts import render
from post.models import Post
from django.core.paginator import Paginator
from django.db import connection
# Create your views here.
def query_post(request, num=1):
post_list = Post.objects.all().order_by('-create_time')
paginator = Paginator(post_list, 2)
num = int(num)
print(num)
current_page = paginator.page(num)
# print(current_page.index())
begin = num - 5 if num > 5 else 1
end = begin + 9 if begin + 9 < paginator.num_pages else paginator.num_pages
page_list = range(begin, end + 1)
return render(request, 'index.html', {'current_page': current_page, 'page_list': page_list, 'current_num': num})
def read(request, num):
num = int(num)
post = Post.objects.filter(id=num)[0]
return render(request, 'post.html', {'post': post})
def category_post(request, cid, num=1):
post_list = Post.objects.filter(category_id=cid).order_by('-create_time')
paginator = Paginator(post_list, 2)
num = int(num)
print(num)
current_page = paginator.page(num)
# print(current_page.index())
begin = num - 5 if num > 5 else 1
end = begin + 9 if begin + 9 < paginator.num_pages else paginator.num_pages
page_list = range(begin, end + 1)
return render(request, 'category_post.html', {'current_page': current_page,
'page_list': page_list,
'current_num': num,
'cid': cid})
def archive_post(request, year, month, num=1):
post_list = Post.objects.filter(create_time__year=year).filter(create_time__month=month).order_by('-create_time')
tim = year + '-' + month
paginator = Paginator(post_list, 2)
num = int(num)
print(num)
current_page = paginator.page(num)
# print(current_page.index())
begin = num - 5 if num > 5 else 1
end = begin + 9 if begin + 9 < paginator.num_pages else paginator.num_pages
page_list = range(begin, end + 1)
return render(request, 'archive_post.html', {'current_page': current_page,
'page_list': page_list,
'tim': tim,
'current_num': num})
|
16,659 | 52f060b9f55d2986dae4c3f3e1e4cf1b68d6cc99 | import os
from unittest import TestCase
import subprocess
class LocustTestCase(TestCase):
def test(self):
path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'django/project/db.sqlite3'
)
)
subprocess.call(['cp', os.devnull, path])
subprocess.Popen(
['python tests/django/project/manage.py migrate && '
'python tests/django/project/manage.py runserver'],
shell=True
)
popen = subprocess.Popen([
'locust', '-f', 'tests/_locust.py', '--clients=2',
'--no-web', '--host=http://127.0.0.1:8000'
])
pid = popen.pid
subprocess.call([
'''
sleep 20 && kill {pid}
'''.format(
pid=pid
)
], shell=True)
popen.wait()
subprocess.call([
'''
ps aux |grep manage | grep runserver |
awk {{'print $2'}} | xargs kill
'''
], shell=True)
self.assertEquals(popen.returncode, 0)
|
16,660 | 3fc26f25cab014c852277f270603a38d89a2a3ae | print ("我的第一隻Python程式")
name=input("what is your name\n")
print("hi,%s."%name)
|
16,661 | 198510006be3bf84a38b9a6dd9ae533c96edc0dc | # Main DRNN Restoration Script
from brahms_restore_ml.drnn.drnn import *
from brahms_restore_ml.audio_data_processing import PIANO_WDW_SIZE
import sys
import random
from scipy.io import wavfile
import numpy as np
import json
import math
import multiprocessing
def run_top_gs_result(num_str, best_config,
# train_mean, train_std,
x_train_files, y1_train_files, y2_train_files,
x_val_files, y1_val_files, y2_val_files, num_train, num_val, train_feat, train_seq,
patience, epsilon, recent_model_path, pc_run, dmged_piano_artificial_noise_mix,
infer_output_path,
# wdw_size,
brahms_path, combos_str, data_path=None, min_sig_len=None,
tuned_a430hz=False, use_basis_vectors=False,
loop_bare_noise=False, low_time_steps=False,
artificial_noise=False, ignore_noise_loss=False,
name_suffix=''):
train_batch_size = best_config['batch_size']
# # Temp test for LSTM -> until can grid search
# train_batch_size = 3 if train_batch_size < 3 else train_batch_size
# # TEMP - until F35 back up, make managable for PC, for bv_s grid search results, no dimred & no lowtsteps
# train_batch_size = 4
# # TEMP - make what PC can actually handle (3072 run, but definitely 2048)
# train_batch_size = 6
# for smaller
if low_time_steps:
train_batch_size = 50 # 50 # 50 sometimes caused OOM
else:
train_batch_size = 4 # no dimred & no lowtsteps
train_loss_const = best_config['gamma']
# # EVAL CHANGE - for BVS dense layers gs - 0.3 meaningless
# if low_time_steps:
# train_loss_const = 0.1 # 0.15 # 0.3 # bad for looped noise & normal piano data
train_epochs = best_config['epochs']
# # EVAL CHANGE - change back
if low_time_steps:
train_epochs = 40 # 150 # 10 # 100
# train_epochs = 15 # TEMP - optimize learning
# TEMP - exploit high epochs
if train_epochs > 10:
patience = 10 # 20
train_opt_name = best_config['optimizer']
train_opt_clipval = None if (best_config['clip value'] == -1) else best_config['clip value']
train_opt_lr = best_config['learning rate']
training_arch_config = {}
training_arch_config['layers'] = best_config['layers']
# # Temp test for LSTM -> until can grid search
# for i in range(len(best_config['layers'])):
# if best_config['layers'][i]['type'] == 'RNN':
# training_arch_config['layers'][i]['type'] = 'LSTM'
# TEMP - Don't allow dimred in RNNS/LSTMS, b/c not in lit
for i in range(len(best_config['layers'])):
if (best_config['layers'][i]['nrn_div'] != 1) and (best_config['layers'][i]['type'] == 'RNN' or
best_config['layers'][i]['type'] == 'LSTM'):
training_arch_config['layers'][i]['nrn_div'] = 1
training_arch_config['scale'] = best_config['scale']
training_arch_config['rnn_res_cntn'] = best_config['rnn_res_cntn']
training_arch_config['bias_rnn'] = best_config['bias_rnn']
training_arch_config['bias_dense'] = best_config['bias_dense']
training_arch_config['bidir'] = best_config['bidir']
training_arch_config['rnn_dropout'] = best_config['rnn_dropout']
training_arch_config['bn'] = best_config['bn']
# EVAL CHANGES
# if use_basis_vectors:
training_arch_config['bidir'] = False
training_arch_config['rnn_res_cntn'] = False
l1_reg = None
# # EVAL CHANGE
# l1_reg = 0.1 # 0.001
print('#', num_str, 'TOP TRAIN ARCH FOR USE:')
print(training_arch_config)
print('#', num_str, 'TOP TRAIN HPs FOR USE:')
print('Batch size:', train_batch_size, 'Epochs:', train_epochs,
'Loss constant:', train_loss_const, 'Optimizer:', best_config['optimizer'],
'Clip value:', best_config['clip value'], 'Learning rate:', best_config['learning rate'])
name = '_' + num_str + 'of' + combos_str + '_' + name_suffix
# Shouldn't need multiproccessing, limited number
# Temp test for LSTM -> until can grid search
send_end, recv_end = multiprocessing.Pipe()
process_train = multiprocessing.Process(target=evaluate_source_sep, args=(
x_train_files, y1_train_files, y2_train_files,
x_val_files, y1_val_files, y2_val_files,
num_train, num_val,
train_feat, train_seq,
train_batch_size,
train_loss_const, train_epochs,
train_opt_name, train_opt_clipval, train_opt_lr,
patience, epsilon, training_arch_config,
recent_model_path, pc_run,
# train_mean, train_std,
int(num_str), None, int(combos_str), '', send_end,
dmged_piano_artificial_noise_mix, data_path, min_sig_len, True,
tuned_a430hz, use_basis_vectors, None, None,
loop_bare_noise, low_time_steps, l1_reg, artificial_noise,
ignore_noise_loss))
process_train.start()
# Keep polling until child errors or child success (either one guaranteed to happen)
losses, val_losses = None, None
while process_train.is_alive():
time.sleep(60)
if recv_end.poll():
losses, val_losses = recv_end.recv()
break
if losses == None or val_losses == None:
print('\nERROR happened in child and it died')
exit(1)
process_train.join()
recv_end.close() # new, hopefully stops data persistence between eval_src_sep calls
epoch_r = range(1, len(losses)+1)
plt.plot(epoch_r, val_losses, 'b', label = 'Validation Loss')
plt.plot(epoch_r, losses, 'bo', label = 'Training Loss')
plt.title('Training & Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('brahms_restore_ml/drnn/train_val_loss_chart' + name + '.png')
# evaluate_source_sep(x_train_files, y1_train_files, y2_train_files,
# x_val_files, y1_val_files, y2_val_files,
# num_train, num_val,
# n_feat=train_feat, n_seq=train_seq,
# batch_size=train_batch_size,
# loss_const=train_loss_const, epochs=train_epochs,
# opt_name=train_opt_name, opt_clip_val=train_opt_clipval, opt_lr=train_opt_lr,
# patience=patience, epsilon=epsilon,
# recent_model_path=recent_model_path, pc_run=pc_run,
# config=training_arch_config, t_mean=train_mean, t_std=train_std,
# dataset2=dmged_piano_artificial_noise_mix)
# Temp test for LSTM -> until can grid search
process_infer = multiprocessing.Process(target=restore_with_drnn, args=(infer_output_path, recent_model_path, # wdw_size, epsilon,
# train_loss_const,
train_opt_name, train_opt_clipval, train_opt_lr, min_sig_len, brahms_path, None, None,
# training_arch_config,
# train_mean, train_std,
PIANO_WDW_SIZE, EPSILON,
pc_run, name, tuned_a430hz, use_basis_vectors, low_time_steps))
# TEMP - old
# process_infer = multiprocessing.Process(target=restore_with_drnn, args=(infer_output_path, recent_model_path, wdw_size, epsilon,
# train_loss_const, train_opt_name, train_opt_clipval, train_opt_lr, brahms_path, None, None,
# training_arch_config, train_mean, train_std, pc_run, '_'+num+'of'+combos_str))
process_infer.start()
process_infer.join()
# restore_with_drnn(infer_output_path, recent_model_path, wdw_size, epsilon,
# train_loss_const, train_opt_name, train_opt_clipval, train_opt_lr,
# test_filepath=brahms_path,
# config=training_arch_config, t_mean=train_mean, t_std=train_std, pc_run=pc_run,
# name_addon='_'+num+'of3072_lstm')
def main():
# PROGRAM ARGUMENTS #
if len(sys.argv) < 3 or len(sys.argv) > 5:
print('\nUsage: restore_with_drnn.py <mode> <PC> [-f] [gs_id]')
print('Parameter Options:')
print('Mode t - Train model, then restore brahms with model')
print(' g - Perform grid search (default: starts where last left off)')
print(' r - Restore brahms with last-trained model')
print('PC true - Uses HPs for lower GPU-memory consumption (< 4GB)')
print(' false - Uses HPs for higher GPU-memory limit (PC HPs + nonPC HPs = total for now)')
print('-f - (Optional) Force restart grid search (grid search mode) OR force random HPs (train mode)')
print('gs_id <single digit> - (Optional) grid search unique ID for running concurrently')
print('\nTIP: Keep IDs different for PC/non-PC runs on same machine')
sys.exit(1)
mode = sys.argv[1]
pc_run = True if (sys.argv[2].lower() == 'true') else False
# TRAIN DATA PIANO PARAMS
# Differentiate PC GS from F35 GS
# dmged_piano_artificial_noise_mix = True if pc_run else False
dmged_piano_artificial_noise_mix = False # TEMP while F35 down
dmged_piano_only = True # Promising w/ BVs
# TRAIN DATA NOISE PARAMS
loop_bare_noise = False # to control bare_noise in nn_data_gen, needs curr for low_time_steps
artificial_noise = True
test_on_synthetic = False
# wdw_size = PIANO_WDW_SIZE
data_path = 'brahms_restore_ml/drnn/drnn_data/'
arch_config_path = 'brahms_restore_ml/drnn/config/'
# gs_output_path = 'brahms_restore_ml/drnn/output_grid_search/' # for use w/ grid search mode
# gs_output_path = 'brahms_restore_ml/drnn/output_grid_search_pc_wb/' # PC
# gs_output_path = 'brahms_restore_ml/drnn/output_grid_search_lstm/' # F35
gs_output_path = 'brahms_restore_ml/drnn/output_grid_search_wb/' # best results
# gs_output_path = 'brahms_restore_ml/drnn/output_grid_search_low_tsteps_two/' # low tsteps 2
# gs_output_path = 'brahms_restore_ml/drnn/output_grid_search_low_tsteps_big/' # low tsteps 3
# gs_output_path = 'brahms_restore_ml/drnn/output_grid_search_dmgpiano_bvs/' # dmgpiano (bvs optional)
recent_model_path = 'brahms_restore_ml/drnn/recent_model'
# recent_model_path = 'brahms_restore_ml/drnn/recent_model_dmgedp_artn_151of3072'
# recent_model_path = 'brahms_restore_ml/drnn/recent_model_149of3072' # restore from curr best
# recent_model_path = 'brahms_restore_ml/drnn/recent_model_111of144_earlystop' # restore from curr best
# recent_model_path = 'brahms_restore_ml/drnn/recent_model_3of4' # restore from best in small gs
infer_output_path = 'brahms_restore_ml/drnn/output_restore/'
# infer_output_path = 'brahms_restore_ml/drnn/output_restore_gs3072_loopnoise/' # eval, do_curr_best, 3072 combos, looped noise
# infer_output_path = 'brahms_restore_ml/drnn/output_restore_151of3072_eval/' # eval, tweaks curr_best
# infer_output_path = 'brahms_restore_ml/drnn/output_restore_pbv_eval/' # eval, tweaks curr_best
# infer_output_path = 'brahms_restore_ml/drnn/output_restore_pbv_eval_nomask/' # eval, tweaks curr_best
brahms_path = 'brahms.wav'
# To run best model configs, data_from_numpy == True & mode == train
do_curr_best, curr_best_combos, curr_best_done_on_pc = False, '3072', False
# # F35 LSTM
# top_result_nums = [72, 128, 24, 176, 8, 192, 88, 112]
# F35 WB
top_result_nums = [151] # [151, 151, 151, 151, 151] # temp - do 1 run # [1488, 1568, 149, 1496, 1680, 86, 151, 152]
# # # top_result_nums = [1488, 1568, 149, 1496, 1680, 86, 151, 152]
# # PC WB
# top_result_nums = [997, 1184, 1312, 1310, 1311, 1736]
# # BVS Architectures
# top_result_nums = [6] # 13, 20, 6, 10, 23]
# # BVS Architectures #2
# top_result_nums = [10]
# # low timesteps 2
# top_result_nums = [34, 23] # [26, # gamma order: 0.05, 0.15, 0.3
# # low timesteps 3 (big)
# top_result_nums = [103, 111, 5] # gamma order: 0.2, 0.3, 0.4 # [111] # [111, 76, 142]
# # Dmg piano data
# top_result_nums = [1,2,3]
# Dmg piano w/ BVs data
# top_result_nums = [3,6,9]
# top_result_nums = [1] # try ignore noise loss
# top_result_nums = [1,3,1,3] # for larger help, bs constant
# top_result_nums = [1,3,1,3,1,3,1,3] # for larger help
top_result_paths = [gs_output_path + 'result_' + str(x) + '_of_' + curr_best_combos +
('.txt' if curr_best_done_on_pc else '_noPC.txt') for x in top_result_nums]
# NEW
output_file_addon = ''
data_from_numpy = True
tuned_a430hz = False # may not be helpful, as of now does A=436Hz by default
basis_vector_features = False # bust
if tuned_a430hz:
recent_model_path += '_a436hz' # tune_temp '_a430hz'
output_file_addon += '_a436hz' # tune_temp '_a430hz'
if basis_vector_features:
recent_model_path += '_bvs'
output_file_addon += '_bvs'
if dmged_piano_only:
recent_model_path += '_dmgedp'
output_file_addon += '_dmgedp'
if artificial_noise:
recent_model_path += '_artn'
output_file_addon += '_artn'
elif not loop_bare_noise:
recent_model_path += '_stretchn'
output_file_addon += '_stretchn'
if not do_curr_best:
recent_model_path += '_151of3072'
output_file_addon += '_151of3072'
if do_curr_best and (len(top_result_nums) == 1) and (mode == 't'):
recent_model_path += ('_' + str(top_result_nums[0]) + 'of' + str(curr_best_combos))
if do_curr_best and (len(top_result_nums) == 1) and (mode == 'r'):
# recent_model_path must have result in name
output_file_addon += ('_' + [x for x in recent_model_path.split('_')][-1])
gs_write_model = False # for small grid searches only, and for running ALL epochs - no early stop
low_time_steps = True # now default
# experiment
ignore_noise_loss = False
# print('RECENT MODEL PATH:', recent_model_path)
# print('OUTPUT FILE PATH ADD-ON:', output_file_addon)
# EMPERICALLY DERIVED HPs
# Note: FROM PO-SEN PAPER - about loss_const
# Empirically, the value γ is in the range of 0.05∼0.2 in order
# to achieve SIR improvements and maintain SAR and SDR.
train_batch_size = 50 if low_time_steps else (6 if pc_run else 12)
# train_batch_size = 3 if pc_run else 12 # TEMP - for no dimreduc
train_loss_const = 0.1
train_epochs = 10
train_opt_name, train_opt_clipval, train_opt_lr = 'Adam', 0.5, 0.001
training_arch_config = None
epsilon, patience, val_split = 10 ** (-10), train_epochs, 0.25
# INFER ONLY
if mode == 'r':
restore_with_drnn(infer_output_path, recent_model_path,
train_opt_name, train_opt_clipval, train_opt_lr,
MIN_SIG_LEN, test_filepath=brahms_path, pc_run=pc_run,
name_addon=output_file_addon, tuned_a430hz=tuned_a430hz,
use_basis_vectors=basis_vector_features,
low_tsteps=low_time_steps)
else:
train_configs, arch_config_optns = get_hp_configs(arch_config_path, pc_run=pc_run,
use_bv=basis_vector_features,
small_gs=gs_write_model,
low_tsteps=low_time_steps,
dmged_piano=dmged_piano_only)
# print('First arch config optn after return:', arch_config_optns[0])
if data_from_numpy:
# Load in train/validation numpy data
noise_piano_filepath_prefix = (data_path + 'dmgedp_artn_mix_numpy/mixed'
if dmged_piano_artificial_noise_mix else
(data_path + 'dmged_mix_a436hz_numpy/mixed' if (dmged_piano_only and tuned_a430hz) else # tune_temp
(data_path + 'dmged_mix_looped_noise_small_numpy/mixed' if (dmged_piano_only and loop_bare_noise and low_time_steps) else
(data_path + 'dmged_mix_art_noise_small_numpy/mixed' if (dmged_piano_only and artificial_noise and low_time_steps) else
(data_path + 'dmged_mix_looped_noise_numpy/mixed' if (dmged_piano_only and loop_bare_noise) else
(data_path + 'piano_noise_looped_small_numpy/mixed' if (loop_bare_noise and low_time_steps) else
(data_path + 'dmged_mix_art_noise_numpy/mixed' if (dmged_piano_only and artificial_noise) else
(data_path + 'piano_noise_art_small_numpy/mixed' if (artificial_noise and low_time_steps) else
(data_path + 'dmged_mix_small_numpy/mixed' if (dmged_piano_only and low_time_steps) else
(data_path + 'piano_noise_small_numpy/mixed' if low_time_steps else
(data_path + 'piano_noise_looped_numpy/mixed' if loop_bare_noise else
(data_path + 'piano_noise_art_numpy/mixed' if artificial_noise else
(data_path + 'dmged_mix_numpy/mixed' if dmged_piano_only else
(data_path + 'piano_noise_a436hz_numpy/mixed' if tuned_a430hz else # tune_temp
(data_path + 'piano_noise_numpy/mixed')))))))))))))))
piano_label_filepath_prefix = (data_path + 'piano_source_numpy/piano'
if dmged_piano_artificial_noise_mix else
(data_path + 'piano_source_a436hz_numpy/piano' if tuned_a430hz else # tune_temp
(data_path + 'piano_source_small_numpy/piano' if low_time_steps else
(data_path + 'piano_source_numpy/piano'))))
noise_label_filepath_prefix = (data_path + 'dmged_noise_numpy/noise'
if dmged_piano_artificial_noise_mix else
(data_path + 'noise_source_looped_small_numpy/noise' if (loop_bare_noise and low_time_steps) else
(data_path + 'noise_source_art_small_numpy/noise' if (artificial_noise and low_time_steps) else
(data_path + 'noise_source_looped_numpy/noise' if loop_bare_noise else
(data_path + 'noise_source_art_numpy/noise' if artificial_noise else
(data_path + 'noise_source_small_numpy/noise' if low_time_steps else
(data_path + 'noise_source_numpy/noise')))))))
else:
# Load in train/validation WAV file data
noise_piano_filepath_prefix = (data_path + 'dmged_mix_wav/features'
if dmged_piano_artificial_noise_mix else data_path + 'dmged_mix_wav/features')
piano_label_filepath_prefix = (data_path + 'final_piano_wav_a=430hz/psource'
if dmged_piano_artificial_noise_mix else
(data_path + 'final_piano_wav_a=436hz/psource' if tuned_a430hz else # tune_temp
(data_path + 'small_piano_wav/psource' if low_time_steps else
(data_path + 'final_piano_wav/psource'))))
noise_label_filepath_prefix = (data_path + 'dmged_noise_wav/nsource'
if dmged_piano_artificial_noise_mix else
(data_path + 'small_art_noise_wav/nsource' if (low_time_steps and artificial_noise) else
(data_path + 'small_noise_wav/nsource' if low_time_steps else
(data_path + 'artificial_noise_wav/nsource' if artificial_noise else
(data_path + 'final_noise_wav/nsource')))))
if dmged_piano_only: # new
dmged_piano_filepath_prefix = (data_path + 'dmged_piano_wav_a=436hz/psource' # tune_temp
if tuned_a430hz else
(data_path + 'small_dmged_piano_wav/psource' if low_time_steps else
(data_path + 'dmged_piano_wav_a=430hz/psource')))
# print('Damaged piano filepath prefix:', dmged_piano_filepath_prefix)
if loop_bare_noise and dmged_piano_only:
print('\nTRAINING WITH DATASET 4 (DMGED PIANO, NORMAL NOISE)')
elif artificial_noise and dmged_piano_only:
print('\nTRAINING WITH DATASET 6 (DMGED PIANO, ARTIFICIAL NOISE)')
elif dmged_piano_only:
print('\nTRAINING WITH DATASET 5 (DMGED PIANO, TIME SHRINK/STRETCH NOISE)')
elif artificial_noise:
print('\nTRAINING WITH DATASET 3 (NORMAL PIANO, ARTIFICIAL NOISE)')
elif loop_bare_noise:
print('\nTRAINING WITH DATASET 1 (NORMAL PIANO, NORMAL NOISE)')
else:
print('\nTRAINING WITH DATASET', '2 (ARTIFICIAL DMG)' if dmged_piano_artificial_noise_mix else '2 (NORMAL PIANO, TIME SHRINK/STRETCH NOISE)')
# if data_from_numpy:
# print('Mix filepath prefix:', noise_piano_filepath_prefix)
# print('Piano filepath prefix:', piano_label_filepath_prefix)
# print('Noise filepath prefix:', noise_label_filepath_prefix)
# TRAIN & INFER
if mode == 't':
random_hps = False
for arg_i in range(3, 6):
if arg_i < len(sys.argv):
if sys.argv[arg_i] == '-f':
random_hps = True
print('\nTRAINING TO USE RANDOM (NON-EMPIRICALLY-OPTIMAL) HP\'S\n')
# Define which files to grab for training. Shuffle regardless.
# (Currently sample is to test on 1 synthetic sample (not Brahms))
sample = test_on_synthetic
# sample = False # If taking less than total samples
if sample: # Used now for testing on synthetic data
# TOTAL_SMPLS += 1
# actual_samples = TOTAL_SMPLS - 1 # How many to leave out (1)
# sample_indices = list(range(TOTAL_SMPLS))
actual_samples = TOTAL_SHORT_SMPLS if low_time_steps else TOTAL_SMPLS # How many to leave out (1)
sample_indices = list(range(actual_samples + 1))
# FIX DMGED/ART DATA
random.shuffle(sample_indices)
test_index = sample_indices[actual_samples]
sample_indices = sample_indices[:actual_samples]
test_piano = piano_label_filepath_prefix + str(test_index) + '.wav'
test_noise = noise_label_filepath_prefix + str(test_index) + '.wav'
test_sr, test_piano_sig = wavfile.read(test_piano)
_, test_noise_sig = wavfile.read(test_noise)
test_sig = test_piano_sig + test_noise_sig
else:
actual_samples = TOTAL_SHORT_SMPLS if low_time_steps else TOTAL_SMPLS
sample_indices = list(range(actual_samples))
# comment out if debugging
random.shuffle(sample_indices)
x_files = np.array([(noise_piano_filepath_prefix + str(i) + ('.npy' if data_from_numpy else '.wav'))
for i in sample_indices])
y1_files = np.array([(piano_label_filepath_prefix + str(i) + ('.npy' if data_from_numpy else '.wav'))
for i in sample_indices])
y2_files = np.array([(noise_label_filepath_prefix + str(i) + ('.npy' if data_from_numpy else '.wav'))
for i in sample_indices])
if dmged_piano_only and (not data_from_numpy):
dmged_y1_files = np.array([(dmged_piano_filepath_prefix + str(i) + '.wav')
for i in sample_indices])
# print('ORDER CHECK: dmged_y1_files:', dmged_y1_files[:10])
# print('ORDER CHECK: x_files:', x_files[:10])
# print('ORDER CHECK: y1_files:', y1_files[:10])
# print('ORDER CHECK: y2_files:', y2_files[:10])
# OLD
# # # # Temp - do to calc max len for padding - it's 3081621 (for youtube src data)
# # # # it's 3784581 (for Spotify/Youtube Final Data)
# # # # it's 3784581 (for damaged Spotify/YouTube Final Data)
# # # max_sig_len = None
# # # for x_file in x_files:
# # # _, sig = wavfile.read(x_file)
# # # if max_sig_len is None or len(sig) >max_sig_len:
# # # max_sig_len = len(sig)
# # # print('NOTICE: MAX SIG LEN', max_sig_len)
# max_sig_len = MAX_SIG_LEN
# # # Temp - get training data dim (from dummy) (for model & data making)
# # max_len_sig = np.ones((max_sig_len))
# # dummy_train_spgm, _ = make_spectrogram(max_len_sig, wdw_size, epsilon,
# # ova=True, debug=False)
# # train_seq, train_feat = dummy_train_spgm.shape
# # print('NOTICE: TRAIN SEQ LEN', train_seq, 'TRAIN FEAT LEN', train_feat)
# train_seq, train_feat = TRAIN_SEQ_LEN, TRAIN_FEAT_LEN
# # NEW
# train_seq, train_feat, min_sig_len = get_raw_data_stats(y1_files,
# brahms_filename=brahms_path)
# print('NOTICE: TRAIN SEQ LEN', train_seq, 'TRAIN FEAT LEN', train_feat, 'MIN SIG LEN',
# min_sig_len)
if low_time_steps:
train_seq, train_feat, min_sig_len = TRAIN_SEQ_LEN_SMALL, TRAIN_FEAT_LEN, MIN_SIG_LEN_SMALL
else:
train_seq, train_feat, min_sig_len = TRAIN_SEQ_LEN, TRAIN_FEAT_LEN, MIN_SIG_LEN
# broken
# if basis_vector_features:
# train_seq += NUM_SCORE_NOTES
# Validation & Training Split
indices = list(range(actual_samples))
val_indices = indices[:math.ceil(actual_samples * val_split)]
x_train_files = np.delete(x_files, val_indices)
y1_train_files = np.delete(y1_files, val_indices)
y2_train_files = np.delete(y2_files, val_indices)
x_val_files = x_files[val_indices]
y1_val_files = y1_files[val_indices]
y2_val_files = y2_files[val_indices]
num_train, num_val = len(y1_train_files), len(y1_val_files)
dmged_y1_train_files = np.delete(dmged_y1_files, val_indices) if (dmged_piano_only and (not data_from_numpy)) else None
dmged_y1_val_files = dmged_y1_files[val_indices] if (dmged_piano_only and (not data_from_numpy)) else None
# # DEBUG PRINT
# print('Indices ( len =', len(indices), '):', indices[:10])
# print('Val Indices ( len =', len(val_indices), '):', val_indices[:10])
# print('Num train:', num_train, 'num val:', num_val)
# print('x_train_files:', x_train_files[:10])
# print('x_val_files:', x_val_files[:10])
# print('y1_train_files:', y1_train_files[:10])
# print('y1_val_files:', y1_val_files[:10])
# print('y2_train_files:', y2_train_files[:10])
# print('y2_val_files:', y2_val_files[:10])
# if (dmged_y1_train_files is not None) and (dmged_y1_val_files is not None):
# print('dmged_y1_train_files:', dmged_y1_train_files[:10])
# print('dmged_y1_val_files:', dmged_y1_val_files[:10])
# CUSTOM TRAINING Dist training needs a "global_batch_size"
# if not pc_run:
# batch_size_per_replica = train_batch_size // 2
# train_batch_size = batch_size_per_replica * mirrored_strategy.num_replicas_in_sync
print('Train Input Stats:')
if do_curr_best:
print('N Feat:', train_feat, 'Seq Len:', train_seq)
else:
print('N Feat:', train_feat, 'Seq Len:', train_seq, 'Batch Size:', train_batch_size)
# print('ORDER CHECK: y1_train_files:', y1_train_files[:10])
if do_curr_best:
for i, top_result_path in enumerate(top_result_paths):
# TEMP - For damaged data - b/c "_noPC" is missing in txt file
# num = top_result_path.split('_')[-3] if dmged_piano_artificial_noise_mix else top_result_path.split('_')[-4]
num = top_result_path.split('_')[-3] if curr_best_done_on_pc else top_result_path.split('_')[-4]
gs_result_file = open(top_result_path, 'r')
for _ in range(4):
_ = gs_result_file.readline()
best_config = json.loads(gs_result_file.readline())
# EVAL EDITS
name_suffix = 'g=0.1_stretchartn_dmgp'
# name_suffix = 'g=0.1_dmgp_pbv'
# if i == 0:
# name_suffix += '_e150'
# name_suffix += '_1Dense'
# best_config['epochs'] = 150
# elif i == 1:
# name_suffix += '_e150'
# name_suffix += '_3Dense'
# best_config['epochs'] = 150
# elif i == 2:
# name_suffix += '_e300'
# name_suffix += '_1Dense'
# best_config['epochs'] = 300
# else:
# name_suffix += '_e300'
# name_suffix += '_3Dense'
# best_config['epochs'] = 300
# if i == 0:
# name_suffix += '_e150'
# name_suffix += '_b10'
# name_suffix += '_1Dense'
# best_config['epochs'] = 150
# best_config['batch_size'] = 10
# elif i == 1:
# name_suffix += '_e150'
# name_suffix += '_b10'
# name_suffix += '_3Dense'
# best_config['epochs'] = 150
# best_config['batch_size'] = 10
# elif i == 2:
# name_suffix += '_e150'
# name_suffix += '_b40'
# name_suffix += '_1Dense'
# best_config['epochs'] = 150
# best_config['batch_size'] = 40
# elif i == 3:
# name_suffix += '_e150'
# name_suffix += '_b40'
# name_suffix += '_3Dense'
# best_config['epochs'] = 150
# best_config['batch_size'] = 40
# elif i == 4:
# name_suffix += '_e300'
# name_suffix += '_b10'
# name_suffix += '_1Dense'
# best_config['epochs'] = 300
# best_config['batch_size'] = 10
# elif i == 5:
# name_suffix += '_e300'
# name_suffix += '_b10'
# name_suffix += '_3Dense'
# best_config['epochs'] = 300
# best_config['batch_size'] = 10
# elif i == 6:
# name_suffix += '_e300'
# name_suffix += '_b40'
# name_suffix += '_1Dense'
# best_config['epochs'] = 300
# best_config['batch_size'] = 40
# else:
# name_suffix += '_e300'
# name_suffix += '_b40'
# name_suffix += '_3Dense'
# best_config['epochs'] = 300
# best_config['batch_size'] = 40
# name_suffix += str(best_config['gamma'])
# Temp test for LSTM -> until can grid search
# # TEMP - until F35 back up, make managable for PC
# if (len(best_config['layers']) < 4) or (len(best_config['layers']) == 4 and best_config['layers'][0]['type'] == 'Dense'):
run_top_gs_result(num, best_config,
# TRAIN_MEAN, TRAIN_STD,
x_train_files, y1_train_files, y2_train_files,
x_val_files, y1_val_files, y2_val_files, num_train, num_val, train_feat, train_seq,
patience, epsilon, recent_model_path, pc_run, dmged_piano_artificial_noise_mix,
infer_output_path,
# wdw_size,
brahms_path, curr_best_combos, data_path=data_path,
min_sig_len=min_sig_len, tuned_a430hz=tuned_a430hz,
use_basis_vectors=basis_vector_features,
loop_bare_noise=loop_bare_noise,
low_time_steps=low_time_steps,
artificial_noise=artificial_noise,
ignore_noise_loss=ignore_noise_loss,
name_suffix=name_suffix)
else:
# REPL TEST - arch config, all config, optiizer config
if random_hps:
# MEM BOUND TEST
# arch_rand_index = 0
# Index into random arch config, and other random HPs
arch_rand_index = random.randint(0, len(arch_config_optns)-1)
# arch_rand_index = 0
# print('ARCH RAND INDEX:', arch_rand_index)
training_arch_config = arch_config_optns[arch_rand_index]
# print('ARCH CONFIGS AT PREV & NEXT INDICES:\n', arch_config_optns[arch_rand_index-1],
# '---\n', arch_config_optns[arch_rand_index+1])
# print('In random HPs section, rand_index:', arch_rand_index)
# print('FIRST ARCH CONFIG OPTION SHOULD HAVE RNN:\n', arch_config_optns[0])
for hp, optns in train_configs.items():
# print('HP:', hp, 'OPTNS:', optns)
# MEM BOUND TEST
# hp_rand_index = 0
hp_rand_index = random.randint(0, len(optns)-1)
if hp == 'batch_size':
# print('BATCH SIZE RAND INDEX:', hp_rand_index)
train_batch_size = optns[hp_rand_index]
elif hp == 'epochs':
# print('EPOCHS RAND INDEX:', hp_rand_index)
train_epochs = optns[hp_rand_index]
elif hp == 'loss_const':
# print('LOSS CONST RAND INDEX:', hp_rand_index)
train_loss_const = optns[hp_rand_index]
elif hp == 'optimizer':
# hp_rand_index = 2
# print('OPT RAND INDEX:', hp_rand_index)
train_opt_clipval, train_opt_lr, train_opt_name = (
optns[hp_rand_index]
)
# train_optimizer, clip_val, lr, opt_name = (
# optns[hp_rand_index]
# )
# Early stop for random HPs
# TIME TEST
patience = 4
# training_arch_config = arch_config_optns[0]
print('RANDOM TRAIN ARCH FOR USE:')
print(training_arch_config)
print('RANDOM TRAIN HPs FOR USE:')
print('Batch size:', train_batch_size, 'Epochs:', train_epochs,
'Loss constant:', train_loss_const, 'Optimizer:', train_opt_name,
'Clip value:', train_opt_clipval, 'Learning rate:', train_opt_lr)
# elif do_curr_best:
# gs_result_file = open(top_result_path, 'r')
# for _ in range(4):
# _ = gs_result_file.readline()
# best_config = json.loads(gs_result_file.readline())
# train_batch_size = best_config['batch_size']
# # # Avoid OOM
# # if pc_run and train_batch_size > 10:
# # train_batch_size = 10
# train_loss_const = best_config['gamma']
# train_epochs = best_config['epochs']
# train_opt_name = best_config['optimizer']
# train_opt_clipval = None if (best_config['clip value'] == -1) else best_config['clip value']
# train_opt_lr = best_config['learning rate']
# training_arch_config = {}
# training_arch_config['layers'] = best_config['layers']
# training_arch_config['scale'] = best_config['scale']
# training_arch_config['rnn_res_cntn'] = best_config['rnn_res_cntn']
# training_arch_config['bias_rnn'] = best_config['bias_rnn']
# training_arch_config['bias_dense'] = best_config['bias_dense']
# training_arch_config['bidir'] = best_config['bidir']
# training_arch_config['rnn_dropout'] = best_config['rnn_dropout']
# training_arch_config['bn'] = best_config['bn']
# # # Avoid OOM
# # for i, layer in enumerate(best_config['layers']):
# # if layer['type'] == 'LSTM':
# # training_arch_config['layers'][i]['type'] = 'RNN'
# print('TOP TRAIN ARCH FOR USE:')
# print(training_arch_config)
# print('TOP TRAIN HPs FOR USE:')
# print('Batch size:', train_batch_size, 'Epochs:', train_epochs,
# 'Loss constant:', train_loss_const, 'Optimizer:', best_config['optimizer'],
# 'Clip value:', best_config['clip value'], 'Learning rate:', best_config['learning rate'])
# else:
# print('CONFIG:', training_arch_config)
# OLD
# # # TEMP - update for each unique dataset
# # Note - If not numpy, consider if dataset2. If numpy, supply x files.
# # train_mean, train_std = get_data_stats(y1_train_files, y2_train_files, num_train,
# # train_seq=train_seq, train_feat=train_feat,
# # wdw_size=wdw_size, epsilon=epsilon,
# # # pad_len=max_sig_len)
# # pad_len=max_sig_len, x_filenames=x_train_files)
# # print('REMEMBER Train Mean:', train_mean, 'Train Std:', train_std, '\n')
# # # Train Mean: 1728.2116672701493 Train Std: 6450.4985228518635 - 10/18/20 - preprocess & mix final data
# # # Train Mean: 3788.6515897900226 Train Std: 17932.36734269604 - 11/09/20 - damged piano artificial noise data
# # FIX DMGED/ART DATA
# if dmged_piano_artificial_noise_mix:
# train_mean, train_std = TRAIN_MEAN_DMGED, TRAIN_STD_DMGED
# else:
# train_mean, train_std = TRAIN_MEAN, TRAIN_STD
model = evaluate_source_sep(x_train_files, y1_train_files, y2_train_files,
x_val_files, y1_val_files, y2_val_files,
num_train, num_val,
n_feat=train_feat, n_seq=train_seq,
batch_size=train_batch_size,
loss_const=train_loss_const, epochs=train_epochs,
opt_name=train_opt_name, opt_clip_val=train_opt_clipval,
opt_lr=train_opt_lr,
patience=patience, epsilon=epsilon,
recent_model_path=recent_model_path, pc_run=pc_run,
config=training_arch_config, # t_mean=train_mean, t_std=train_std,
dataset2=dmged_piano_artificial_noise_mix,
data_path=data_path, min_sig_len=min_sig_len,
data_from_numpy=data_from_numpy,
tuned_a430hz=tuned_a430hz,
use_basis_vectors=basis_vector_features,
dmged_y1_train_files=dmged_y1_train_files,
dmged_y1_val_files=dmged_y1_val_files,
loop_bare_noise=loop_bare_noise,
low_time_steps=low_time_steps,
artificial_noise=artificial_noise,
ignore_noise_loss=ignore_noise_loss)
if sample:
restore_with_drnn(infer_output_path, recent_model_path, # wdw_size, epsilon,
# train_loss_const,
train_opt_name, train_opt_clipval, train_opt_lr,
min_sig_len,
# test_filepath=None,
test_sig=test_sig, test_sr=test_sr,
# config=training_arch_config, t_mean=train_mean, t_std=train_std,
pc_run=pc_run, name_addon=output_file_addon,
tuned_a430hz=tuned_a430hz,
use_basis_vectors=basis_vector_features,
low_tsteps=low_time_steps)
else:
restore_with_drnn(infer_output_path, recent_model_path, # wdw_size, epsilon,
# train_loss_const,
train_opt_name, train_opt_clipval, train_opt_lr,
min_sig_len,
test_filepath=brahms_path,
# config=training_arch_config, t_mean=train_mean, t_std=train_std,
pc_run=pc_run, name_addon=output_file_addon,
tuned_a430hz=tuned_a430hz,
use_basis_vectors=basis_vector_features,
low_tsteps=low_time_steps)
# GRID SEARCH
elif mode == 'g':
# Dennis - think of good metrics (my loss is obvious first start)
restart, gs_id = False, ''
for arg_i in range(3, 7):
if arg_i < len(sys.argv):
if sys.argv[arg_i] == '-f':
restart = True
print('\nGRID SEARCH TO FORCE RESTART\n')
elif sys.argv[arg_i].isdigit() and len(sys.argv[arg_i]) == 1:
gs_id = sys.argv[arg_i]
print('GRID SEARCH ID:', gs_id, '\n')
early_stop_pat = 10 if low_time_steps else 5
# Define which files to grab for training. Shuffle regardless.
actual_samples = TOTAL_SHORT_SMPLS if low_time_steps else TOTAL_SMPLS
sample_indices = list(range(actual_samples))
random.shuffle(sample_indices)
x_files = np.array([(noise_piano_filepath_prefix + str(i) + '.npy')
for i in sample_indices])
y1_files = np.array([(piano_label_filepath_prefix + str(i) + '.npy')
for i in sample_indices])
y2_files = np.array([(noise_label_filepath_prefix + str(i) + '.npy')
for i in sample_indices])
# Validation & Training Split
indices = list(range(actual_samples))
val_indices = indices[:math.ceil(actual_samples * val_split)]
x_train_files = np.delete(x_files, val_indices)
y1_train_files = np.delete(y1_files, val_indices)
y2_train_files = np.delete(y2_files, val_indices)
x_val_files = x_files[val_indices]
y1_val_files = y1_files[val_indices]
y2_val_files = y2_files[val_indices]
# OLD
# max_sig_len = MAX_SIG_LEN
# Temp - get training data dim (from dummy) (for model & data making)
# max_len_sig = np.ones((MAX_SIG_LEN))
# dummy_train_spgm = make_spectrogram(max_len_sig, wdw_size,
# ova=True, debug=False)[0].astype('float32').T
# TRAIN_SEQ_LEN, TRAIN_FEAT_LEN = dummy_train_spgm.shape
# train_seq, train_feat = TRAIN_SEQ_LEN, TRAIN_FEAT_LEN
# NEW
# train_seq, train_feat, min_sig_len = get_raw_data_stats(y1_files, y2_files, x_files,
# brahms_filename=brahms_path)
if low_time_steps:
train_seq, train_feat, min_sig_len = TRAIN_SEQ_LEN_SMALL, TRAIN_FEAT_LEN, MIN_SIG_LEN_SMALL
else:
train_seq, train_feat, min_sig_len = TRAIN_SEQ_LEN, TRAIN_FEAT_LEN, MIN_SIG_LEN
# broken
# if basis_vector_features:
# train_seq += NUM_SCORE_NOTES
print('Grid Search Input Stats:')
print('N Feat:', train_feat, 'Seq Len:', train_seq)
# OLD
# # TEMP - update for each unique dataset
# # num_train, num_val = len(y1_train_files), len(y1_val_files)
# # Note - If not numpy, consider if dataset2. If numpy, supply x files.
# # train_mean, train_std = get_data_stats(y1_train_files, y2_train_files, num_train,
# # train_seq=train_seq, train_feat=train_feat,
# # wdw_size=wdw_size, epsilon=epsilon,
# # pad_len=max_sig_len)
# # print('REMEMBER Train Mean:', train_mean, 'Train Std:', train_std, '\n')
# # Train Mean: 1728.2116672701493 Train Std: 6450.4985228518635 - 10/18/20
# train_mean, train_std = TRAIN_MEAN, TRAIN_STD
grid_search(x_train_files, y1_train_files, y2_train_files,
x_val_files, y1_val_files, y2_val_files,
n_feat=train_feat, n_seq=train_seq,
epsilon=epsilon,
# t_mean=train_mean, t_std=train_std,
train_configs=train_configs,
arch_config_optns=arch_config_optns,
gsres_path=gs_output_path,
early_stop_pat=early_stop_pat,
pc_run=pc_run, gs_id=gs_id,
restart=restart,
dataset2=dmged_piano_artificial_noise_mix,
tuned_a430hz=tuned_a430hz,
use_basis_vectors=basis_vector_features,
save_model_path=recent_model_path if gs_write_model else None, # NEW - for small grid searchse
low_time_steps=low_time_steps)
if __name__ == '__main__':
main() |
16,662 | 52d1c31e9329dbbaa4dde6dd455c4af90cbb1be8 | from flask import Flask, render_template
from config import Config
from forms import *
from models import *
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
db.init_app(app)
app.config['SECRET_KEY'] = 'you-will-never-guess'
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/add_car', methods=['GET', 'POST'])
def add_car():
form= AddCarForm()
if form.submit():
db.create_all()
car = Car(user_id=form.user_id.data, car_vin=form.car_vin.data)
db.session.add(car)
db.session.commit()
flash('Congratulations, you registered a car!')
return render_template('add_car.html', title='Add car', form=form)
if __name__ == '__main__':
app.run(debug=True)
|
16,663 | 370078580aa2502aebc40a50aac4dc34d503e5af | from django.apps import AppConfig
class NessieConfig(AppConfig):
name = 'nessie'
|
16,664 | 5234f0163d0a3dc0511ca2096c48fa46fbf5c56b | # f2 3位水仙花数计算B
def flowercal():
flowerls = []
for i in range(100,1000):
if i == (i//100)**3 + ((i%100)//10)**3 + (i%10)**3:
flowerls.append(i)
print(",".join(str(item) for item in flowerls))
flowercal()
|
16,665 | e4ebac790764e331e5973bdc353da8c1fd5fee02 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/17 下午9:21
# @Author : tudoudou
# @File : googLeNet.py
# @Software: PyCharm
import warnings
from keras.models import Model
from keras import layers
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
def conv2D_bn(inputs, filters, kernel_size, padding='same', axis=1):
t = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, data_format='channels_first')(inputs)
t = BatchNormalization(axis=axis, scale=False)(t)
t = Activation('relu')(t)
return t
def my_InceptionV3():
inputs = Input(shape=(1, 40, 50))
t = conv2D_bn(inputs=inputs, filters=8, kernel_size=(5, 5),padding='valid') #
t = conv2D_bn(t, 8, (5, 5),padding='valid')
t = conv2D_bn(t, 16, (2, 2))
t = MaxPooling2D((2, 2), (1, 1))(t)
branch1x1 = conv2D_bn(t, 16, (1, 1))
branch5x5 = conv2D_bn(t, 12, (1, 1))
branch5x5 = conv2D_bn(branch5x5, 16, (5, 5))
branch3x3dbl = conv2D_bn(t, 16, (1, 1))
branch3x3dbl = conv2D_bn(branch3x3dbl, 16, (3, 3))
branch3x3dbl = conv2D_bn(branch3x3dbl, 16, (3, 3))
branch_pool = AveragePooling2D((2, 2), strides=(1, 1),padding='same')(t)
branch_pool = conv2D_bn(branch_pool, 8, (1, 1))
t = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=1,
name='mixed0')
t = GlobalAveragePooling2D(name='avg_pool')(t)
t = [Dense(11, activation='softmax', name='sotfmax11_%d' % (i + 1))(t) for i in range(4)]
model = Model(inputs, t, name='my_InceptionV3')
return model
# model = my_InceptionV3()
# model.compile(optimizer='rmsprop',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# from keras.utils import plot_model
#
# plot_model(model, to_file='model.png', show_shapes=True)
|
16,666 | bc7438cd91b9085c47395811636ceef6ddf84588 | import unittest
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src")
from GUI.Chat.User import UserListEntry
class User_Test(unittest.TestCase):
def test_inequality(self):
userA = UserListEntry("a")
userB = UserListEntry("b")
self.assertNotEqual(userA, userB)
def test_less(self):
userA = UserListEntry("@a")
userB = UserListEntry("b")
self.assertTrue(userA < userB)
def test_greater(self):
userA = UserListEntry("~a")
userB = UserListEntry("%a")
self.assertTrue(userA > userB)
def test_equal(self):
userA = UserListEntry("%a")
userB = UserListEntry("%a")
self.assertTrue(userA == userB)
if __name__ == '__main__':
unittest.main() |
16,667 | 58caa8fcc4e5873080f7daa29b59a477e9bb8934 | #!/usr/bin/env python
#
#
# Software License Agreement (Apache License)
# Copyright (c) 2017, <Advanced Remanufacturing Technology Centre/Mingli Han>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import smach
import smach_ros
import config
# define state Idle
class Idle(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['outcome1','outcome2'],
input_keys=['idle_in'],
output_keys=['idle_out'])
def execute(self, userdata):
rospy.loginfo('Executing state Idle')
# Receive Start Tag
var = config.client.get_node("ns=3;s=\"PackML_Status\".\"UN\".\"Cmd_Start\"")
var.set_value(True)
startTag = var3.get_value()
print("Received start command: ", startTag)
if userdata.idle_in == True and startTag == True:
userdata.idle_out = True
return 'outcome1'
else:
return 'outcome2'
|
16,668 | cf9f73d2e56821c58b77628d195a9a231a3219df | #!/usr/bin/env python
#
# Copyright 2019 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import mayaUtils
import testUtils
import ufeUtils
import usdUtils
from maya import cmds
from maya import standalone
from maya.internal.ufeSupport import ufeCmdWrapper as ufeCmd
from pxr import Sdf
import mayaUsd.ufe
import ufe
import unittest
import os
def firstSubLayer(context, routingData):
prim = context.get('prim')
if prim is None:
print('Prim not in context')
return
if len(prim.GetStage().GetRootLayer().subLayerPaths)==0:
return
routingData['layer'] = prim.GetStage().GetRootLayer().subLayerPaths[0]
class DuplicateCmdTestCase(unittest.TestCase):
'''Verify the Maya delete command, for multiple runtimes.
UFE Feature : SceneItemOps
Maya Feature : duplicate
Action : Duplicate objects in the scene.
Applied On Selection :
- Multiple Selection [Mixed, Non-Maya]. Maya-only selection tested by
Maya.
Undo/Redo Test : Yes
Expect Results To Test :
- Duplicate objects in the scene.
Edge Cases :
- None.
'''
pluginsLoaded = False
@classmethod
def setUpClass(cls):
fixturesUtils.readOnlySetUpClass(__file__, loadPlugin=False)
if not cls.pluginsLoaded:
cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def setUp(self):
''' Called initially to set up the Maya test environment '''
# Load plugins
self.assertTrue(self.pluginsLoaded)
# Open top_layer.ma scene in testSamples
mayaUtils.openTopLayerScene()
# Create some extra Maya nodes
cmds.polySphere()
# Clear selection to start off
cmds.select(clear=True)
def testDuplicate(self):
'''Duplicate Maya and USD objects.'''
# Select two objects, one Maya, one USD.
spherePath = ufe.Path(mayaUtils.createUfePathSegment("|pSphere1"))
sphereItem = ufe.Hierarchy.createItem(spherePath)
sphereHierarchy = ufe.Hierarchy.hierarchy(sphereItem)
worldItem = sphereHierarchy.parent()
ball35Path = ufe.Path([
mayaUtils.createUfePathSegment(
"|transform1|proxyShape1"),
usdUtils.createUfePathSegment("/Room_set/Props/Ball_35")])
ball35Item = ufe.Hierarchy.createItem(ball35Path)
ball35Hierarchy = ufe.Hierarchy.hierarchy(ball35Item)
propsItem = ball35Hierarchy.parent()
worldHierarchy = ufe.Hierarchy.hierarchy(worldItem)
worldChildrenPre = worldHierarchy.children()
propsHierarchy = ufe.Hierarchy.hierarchy(propsItem)
propsChildrenPre = propsHierarchy.children()
ufe.GlobalSelection.get().append(sphereItem)
ufe.GlobalSelection.get().append(ball35Item)
# Set the edit target to the layer in which Ball_35 is defined (has a
# primSpec, in USD terminology). Otherwise, duplication will not find
# a source primSpec to copy. Layers are the (anonymous) session layer,
# the root layer, then the Assembly_room_set sublayer. Trying to find
# the layer by name is not practical, as it requires the full path
# name, which potentially differs per run-time environment.
ball35Prim = usdUtils.getPrimFromSceneItem(ball35Item)
stage = ball35Prim.GetStage()
layer = stage.GetLayerStack()[2]
stage.SetEditTarget(layer)
cmds.duplicate()
# The duplicate command doesn't return duplicated non-Maya UFE objects.
# They are in the selection, in the same order as the sources.
snIter = iter(ufe.GlobalSelection.get())
sphereDupItem = next(snIter)
sphereDupName = str(sphereDupItem.path().back())
ball35DupItem = next(snIter)
ball35DupName = str(ball35DupItem.path().back())
worldChildren = worldHierarchy.children()
propsChildren = propsHierarchy.children()
self.assertEqual(len(worldChildren)-len(worldChildrenPre), 1)
self.assertEqual(len(propsChildren)-len(propsChildrenPre), 1)
self.assertIn(sphereDupItem, worldChildren)
self.assertIn(ball35DupItem, propsChildren)
cmds.undo()
# The duplicated items should no longer appear in the child list of
# their parents.
def childrenNames(children):
return [str(child.path().back()) for child in children]
worldHierarchy = ufe.Hierarchy.hierarchy(worldItem)
worldChildren = worldHierarchy.children()
propsHierarchy = ufe.Hierarchy.hierarchy(propsItem)
propsChildren = propsHierarchy.children()
worldChildrenNames = childrenNames(worldChildren)
propsChildrenNames = childrenNames(propsChildren)
self.assertNotIn(sphereDupName, worldChildrenNames)
self.assertNotIn(ball35DupName, propsChildrenNames)
# The duplicated items shoudl reappear after a redo
cmds.redo()
snIter = iter(ufe.GlobalSelection.get())
sphereDupItem = next(snIter)
ball35DupItem = next(snIter)
worldChildren = worldHierarchy.children()
propsChildren = propsHierarchy.children()
self.assertEqual(len(worldChildren)-len(worldChildrenPre), 1)
self.assertEqual(len(propsChildren)-len(propsChildrenPre), 1)
self.assertIn(sphereDupItem, worldChildren)
self.assertIn(ball35DupItem, propsChildren)
cmds.undo()
# The duplicated items should not be assigned to the name of a
# deactivated USD item.
cmds.select(clear=True)
# Deactivate the even numbered props:
evenPropsChildrenPre = propsChildrenPre[0:35:2]
for propChild in evenPropsChildrenPre:
ufe.GlobalSelection.get().append(propChild)
sceneItem = usdUtils.getPrimFromSceneItem(propChild)
sceneItem.SetActive(False)
worldHierarchy = ufe.Hierarchy.hierarchy(worldItem)
worldChildren = worldHierarchy.children()
propsHierarchy = ufe.Hierarchy.hierarchy(propsItem)
propsChildren = propsHierarchy.children()
propsChildrenPostDel = propsHierarchy.children()
# Duplicate Ball_1
ufe.GlobalSelection.get().append(propsChildrenPostDel[0])
cmds.duplicate()
snIter = iter(ufe.GlobalSelection.get())
ballDupItem = next(snIter)
ballDupName = str(ballDupItem.path().back())
self.assertNotIn(ballDupItem, propsChildrenPostDel)
self.assertNotIn(ballDupName, propsChildrenNames)
self.assertEqual(ballDupName, "Ball_36")
cmds.undo() # undo duplication
def testDuplicateLoadedAndUnloaded(self):
'''Duplicate a USD object when the object payload is loaded or unloaded under an unloaded ancestor.'''
# Test helpers
def getItem(path):
'''Get the UFE scene item and USD prim for an item under a USD path'''
fullPath = ufe.Path([
mayaUtils.createUfePathSegment(
"|transform1|proxyShape1"),
usdUtils.createUfePathSegment(path)])
item = ufe.Hierarchy.createItem(fullPath)
prim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(fullPath))
return item, prim
def executeContextCmd(ufeItem, subCmd):
'''Execute a context-menu command, supports among other things Load and Unload.'''
contextOps = ufe.ContextOps.contextOps(ufeItem)
cmd = contextOps.doOpCmd([subCmd])
self.assertIsNotNone(cmd)
ufeCmd.execute(cmd)
def loadItem(ufeItem):
'''Load the payload of a scene item.'''
executeContextCmd(ufeItem, 'Load')
def loadItemWithDescendants(ufeItem):
'''Load the payload of a scene item and its descendants.'''
executeContextCmd(ufeItem, 'Load with Descendants')
def unloadItem(ufeItem):
'''Unload the payload of a scene item.'''
executeContextCmd(ufeItem, 'Unload')
def duplicate(ufeItem):
'''Duplicate a scene item and return the UFE scene item of the new item.'''
# Set the edit target to the layer in which Ball_35 is defined (has a
# primSpec, in USD terminology). Otherwise, duplication will not find
# a source primSpec to copy. Layers are the (anonymous) session layer,
# the root layer, then the Assembly_room_set sublayer. Trying to find
# the layer by name is not practical, as it requires the full path
# name, which potentially differs per run-time environment.
prim = usdUtils.getPrimFromSceneItem(ufeItem)
stage = prim.GetStage()
layer = stage.GetLayerStack()[2]
stage.SetEditTarget(layer)
ufe.GlobalSelection.get().clear()
ufe.GlobalSelection.get().append(ufeItem)
cmds.duplicate()
# The duplicate command doesn't return duplicated non-Maya UFE objects.
# They are in the selection, in the same order as the sources.
sel = ufe.GlobalSelection.get()
self.assertEqual(1, len(sel))
return sel.front()
# Retrieve the ancestor props and one child ball item.
propsItem, propsPrim = getItem("/Room_set/Props")
ball35Item, ball35Prim = getItem("/Room_set/Props/Ball_35")
ball7Item, ball7Prim = getItem("/Room_set/Props/Ball_7")
# Unload the Props and verify everything is unloaded.
# Note: items without payload are considered loaded, so only check balls.
unloadItem(propsItem)
self.assertFalse(ball35Prim.IsLoaded())
self.assertFalse(ball7Prim.IsLoaded())
# Duplicate the ball 35 and verify the new ball is unloaded
# because the original was unloaded due to the ancestor being unloaded.
ball35DupItem = duplicate(ball35Item)
ball35DupPath = ball35DupItem.path()
ball35DupPrim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(ball35DupPath))
self.assertFalse(ball35Prim.IsLoaded())
self.assertFalse(ball7Prim.IsLoaded())
self.assertFalse(ball35DupPrim.IsLoaded())
# Explicitly load the ball 35 and verify its load status.
loadItem(ball35Item)
self.assertTrue(ball35Prim.IsLoaded())
self.assertFalse(ball7Prim.IsLoaded())
# Duplicate the ball 35 and verify the new ball is loaded even though
# the props ancestor unloaded rule would normally make in unloaded.
ball35DupItem = duplicate(ball35Item)
ball35DupPath = ball35DupItem.path()
ball35DupPrim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(ball35DupPath))
self.assertTrue(ball35Prim.IsLoaded())
self.assertFalse(ball7Prim.IsLoaded())
self.assertTrue(ball35DupPrim.IsLoaded())
# Load the props items and its descendants and verify the status of the balls.
loadItemWithDescendants(propsItem)
self.assertTrue(ball35Prim.IsLoaded())
self.assertTrue(ball7Prim.IsLoaded())
# Duplicate the ball 35 and verify the new ball is loaded since
# everything is marked as loaded.
ball35DupItem = duplicate(ball35Item)
ball35DupPath = ball35DupItem.path()
ball35DupPrim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(ball35DupPath))
self.assertTrue(ball35Prim.IsLoaded())
self.assertTrue(ball7Prim.IsLoaded())
self.assertTrue(ball35DupPrim.IsLoaded())
# Unload the ball 35 items and verify the status of the balls.
unloadItem(ball35Item)
self.assertFalse(ball35Prim.IsLoaded())
self.assertTrue(ball7Prim.IsLoaded())
# Duplicate the ball 35 and verify the new ball is unloaded even though
# normally it would be loaded since the ancestor is loaded.
ball35DupItem = duplicate(ball35Item)
ball35DupPath = ball35DupItem.path()
ball35DupPrim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(ball35DupPath))
self.assertFalse(ball35Prim.IsLoaded())
self.assertTrue(ball7Prim.IsLoaded())
self.assertFalse(ball35DupPrim.IsLoaded())
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2022, 'Requires Maya fixes only available in Maya 2022 or greater.')
def testSmartTransformDuplicate(self):
'''Test smart transform option of duplicate command.'''
torusFile = testUtils.getTestScene("groupCmd", "torus.usda")
torusDagPath, torusStage = mayaUtils.createProxyFromFile(torusFile)
usdTorusPathString = torusDagPath + ",/pTorus1"
cmds.duplicate(usdTorusPathString)
cmds.move(10, 0, 0, r=True)
smartDup = cmds.duplicate(smartTransform=True)
usdTorusItem = ufeUtils.createUfeSceneItem(torusDagPath, '/pTorus3')
torusT3d = ufe.Transform3d.transform3d(usdTorusItem)
transVector = torusT3d.inclusiveMatrix().matrix[-1]
correctResult = [20, 0, 0, 1]
self.assertEqual(correctResult, transVector)
def testEditRouter(self):
'''Test edit router functionality.'''
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
# Create the following hierarchy:
#
# ps
# |_ A
# |_ B
#
# We A and duplicate it.
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
stage = mayaUsd.lib.GetPrim(psPathStr).GetStage()
stage.DefinePrim('/A', 'Xform')
stage.DefinePrim('/A/B', 'Xform')
psPath = ufe.PathString.path(psPathStr)
psPathSegment = psPath.segments[0]
aPath = ufe.Path([psPathSegment, usdUtils.createUfePathSegment('/A')])
a = ufe.Hierarchy.createItem(aPath)
bPath = aPath + ufe.PathComponent('B')
b = ufe.Hierarchy.createItem(bPath)
# Add a sub-layer, where the parent edit should write to.
subLayerId = cmds.mayaUsdLayerEditor(stage.GetRootLayer().identifier, edit=True, addAnonymous="aSubLayer")[0]
mayaUsd.lib.registerEditRouter('duplicate', firstSubLayer)
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(a)
cmds.duplicate()
sublayer01 = Sdf.Find(subLayerId)
self.assertIsNotNone(sublayer01)
self.assertIsNotNone(sublayer01.GetPrimAtPath('/A1/B'))
@unittest.skipUnless(ufeUtils.ufeFeatureSetVersion() >= 4, 'Test only available in UFE v4 or greater')
def testUfeDuplicateCommandAPI(self):
'''Test that the duplicate command can be invoked using the 3 known APIs.'''
testFile = testUtils.getTestScene('MaterialX', 'BatchOpsTestScene.usda')
shapeNode,shapeStage = mayaUtils.createProxyFromFile(testFile)
geomItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane1')
self.assertIsNotNone(geomItem)
# Test NoExecute API:
duplicateCmd = ufe.SceneItemOps.sceneItemOps(geomItem).duplicateItemCmdNoExecute()
self.assertIsNotNone(duplicateCmd)
duplicateCmd.execute()
duplicateItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNotNone(duplicateItem)
self.assertEqual(duplicateItem, duplicateCmd.sceneItem)
duplicateCmd.undo()
nonExistentItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNone(nonExistentItem)
duplicateCmd.redo()
duplicateItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNotNone(duplicateItem)
self.assertEqual(duplicateItem, duplicateCmd.sceneItem)
duplicateCmd.undo()
# Test Exec but undoable API:
duplicateCmd = ufe.SceneItemOps.sceneItemOps(geomItem).duplicateItemCmd()
self.assertIsNotNone(duplicateCmd)
duplicateItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNotNone(duplicateItem)
self.assertEqual(duplicateItem, duplicateCmd.item)
duplicateCmd.undoableCommand.undo()
nonExistentItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNone(nonExistentItem)
duplicateCmd.undoableCommand.redo()
duplicateItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNotNone(duplicateItem)
self.assertEqual(duplicateItem, duplicateCmd.item)
duplicateCmd.undoableCommand.undo()
# Test non-undoable API:
geomItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane1')
self.assertIsNotNone(geomItem)
duplicatedItem = ufe.SceneItemOps.sceneItemOps(geomItem).duplicateItem()
self.assertIsNotNone(duplicateCmd)
plane7Item = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane7')
self.assertIsNotNone(plane7Item)
self.assertEqual(plane7Item, duplicatedItem)
@unittest.skipUnless(ufeUtils.ufeFeatureSetVersion() >= 4, 'Test only available in UFE v4 or greater')
def testUfeDuplicateHomonyms(self):
'''Test that duplicating two items with similar names end up in two new duplicates.'''
testFile = testUtils.getTestScene('MaterialX', 'BatchOpsTestScene.usda')
shapeNode,shapeStage = mayaUtils.createProxyFromFile(testFile)
geomItem1 = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane1')
self.assertIsNotNone(geomItem1)
geomItem2 = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane2')
self.assertIsNotNone(geomItem2)
batchOpsHandler = ufe.RunTimeMgr.instance().batchOpsHandler(geomItem1.runTimeId())
self.assertIsNotNone(batchOpsHandler)
sel = ufe.Selection()
sel.append(geomItem1)
sel.append(geomItem2)
cmd = batchOpsHandler.duplicateSelectionCmd(sel, {"inputConnections": False})
cmd.execute()
self.assertNotEqual(cmd.targetItem(geomItem1.path()).path(), cmd.targetItem(geomItem2.path()).path())
@unittest.skipUnless(ufeUtils.ufeFeatureSetVersion() >= 4, 'Test only available in UFE v4 or greater')
def testUfeDuplicateDescendants(self):
'''MAYA-125854: Test that duplicating a descendant of a selected ancestor results in the
duplicate from the ancestor.'''
testFile = testUtils.getTestScene('MaterialX', 'BatchOpsTestScene.usda')
shapeNode,shapeStage = mayaUtils.createProxyFromFile(testFile)
# Take 3 items that are in a hierarchical relationship.
shaderItem1 = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane2/mtl/ss2SG')
self.assertIsNotNone(shaderItem1)
geomItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane2')
self.assertIsNotNone(geomItem)
shaderItem2 = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane2/mtl/ss2SG/MayaNG_ss2SG/MayaConvert_file2_MayafileTexture')
self.assertIsNotNone(shaderItem2)
batchOpsHandler = ufe.RunTimeMgr.instance().batchOpsHandler(geomItem.runTimeId())
self.assertIsNotNone(batchOpsHandler)
# Put them in a selection, making sure one child item is first, and that another child item is last.
sel = ufe.Selection()
sel.append(shaderItem1)
sel.append(geomItem)
sel.append(shaderItem2)
cmd = batchOpsHandler.duplicateSelectionCmd(sel, {"inputConnections": False})
cmd.execute()
duplicatedGeomItem = cmd.targetItem(geomItem.path())
self.assertEqual(ufe.PathString.string(duplicatedGeomItem.path()), "|stage|stageShape,/pPlane7" )
# Make sure the duplicated shader items are descendants of the duplicated geom pPlane7.
sel.clear()
sel.append(duplicatedGeomItem)
duplicatedShaderItem1 = cmd.targetItem(shaderItem1.path())
self.assertEqual(ufe.PathString.string(duplicatedShaderItem1.path()),
"|stage|stageShape,/pPlane7/mtl/ss2SG" )
self.assertTrue(sel.containsAncestor(duplicatedShaderItem1.path()))
duplicatedShaderItem2 = cmd.targetItem(shaderItem2.path())
self.assertEqual(ufe.PathString.string(duplicatedShaderItem2.path()),
"|stage|stageShape,/pPlane7/mtl/ss2SG/MayaNG_ss2SG/MayaConvert_file2_MayafileTexture" )
self.assertTrue(sel.containsAncestor(duplicatedShaderItem2.path()))
# Test that the ancestor search terminates correctly:
nonDuplicatedGeomItem = ufeUtils.createUfeSceneItem(shapeNode, '/pPlane1')
self.assertIsNotNone(nonDuplicatedGeomItem)
self.assertIsNone(cmd.targetItem(nonDuplicatedGeomItem.path()))
def testMultiLayerOpinions(self):
'''
Test duplicating a prim that has opinion on multile layers.
'''
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
# Create a stage with two layer
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
stage = mayaUsd.lib.GetPrim(psPathStr).GetStage()
subLayerId = cmds.mayaUsdLayerEditor(stage.GetRootLayer().identifier, edit=True, addAnonymous="aSubLayer")[0]
self.assertIsNotNone(subLayerId)
topLayer, bottomLayer = stage.GetLayerStack()[1:]
self.assertIsNotNone(topLayer)
self.assertIsNotNone(bottomLayer)
# Create an xform with opinions in both layers.
stage.SetEditTarget(bottomLayer)
stage.DefinePrim('/A', 'Xform')
psPath = ufe.PathString.path(psPathStr)
psPathSegment = psPath.segments[0]
aPath = ufe.Path([psPathSegment, usdUtils.createUfePathSegment('/A')])
aItem = ufe.Hierarchy.createItem(aPath)
stage.SetEditTarget(topLayer)
aTrf = ufe.Transform3d.transform3d(aItem)
aTrf.translate(1., 2., 3.)
self.assertEqual(ufe.Vector3d(1., 2., 3.), aTrf.translation())
# Select the item and duplicate it.
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(aItem)
cmds.duplicate()
a1Prim = stage.GetPrimAtPath('/A1')
self.assertIsNotNone(a1Prim)
a1Path = ufe.Path([psPathSegment, usdUtils.createUfePathSegment('/A1')])
a1Item = ufe.Hierarchy.createItem(a1Path)
a1Trf = ufe.Transform3d.transform3d(a1Item)
self.assertEqual(ufe.Vector3d(1., 2., 3.), a1Trf.translation())
def testReferencedPrim(self):
'''
Test duplicating a prim that is in a referenced file.
'''
cmds.file(new=True, force=True)
cubeRefFile = testUtils.getTestScene("cubeRef", "cube-root.usda")
cubeRefDagPath, cubeRefStage = mayaUtils.createProxyFromFile(cubeRefFile)
cubeUfePathString = ','.join([cubeRefDagPath, "/RootPrim/PrimWithRef/CubeMesh"])
cmds.duplicate(cubeUfePathString)
dupItem = ufeUtils.createUfeSceneItem(cubeRefDagPath, '/RootPrim/PrimWithRef/CubeMesh1')
self.assertIsNotNone(dupItem)
dupTrf = ufe.Transform3d.transform3d(dupItem)
self.assertEqual(ufe.Vector3d(2., 0., -2.), dupTrf.translation())
def testPrimWithReference(self):
'''
Test duplicating a prim that contains a reference.
The content of the reference should not become part of the destination prim.
The destination should still simply contain the reference arc.
'''
cmds.file(new=True, force=True)
cubeRefFile = testUtils.getTestScene("cubeRef", "cube-root.usda")
cubeRefDagPath, cubeRefStage = mayaUtils.createProxyFromFile(cubeRefFile)
cubeUfePathString = ','.join([cubeRefDagPath, "/RootPrim/PrimWithRef"])
cmds.duplicate(cubeUfePathString)
# Verify that the duplicated item still references the cube.
cubeRefByDupItem = ufeUtils.createUfeSceneItem(cubeRefDagPath, '/RootPrim/PrimWithRef1/CubeMesh')
self.assertIsNotNone(cubeRefByDupItem)
dupTrf = ufe.Transform3d.transform3d(cubeRefByDupItem)
self.assertEqual(ufe.Vector3d(2., 0., -2.), dupTrf.translation())
# Make sure the geometry did not get flattened into the duplicate
rootLayer = cubeRefStage.GetRootLayer()
rootLayerText = rootLayer.ExportToString()
self.assertNotIn('"Geom"', rootLayerText)
self.assertNotIn('Mesh', rootLayerText)
def testPrimWithPayload(self):
'''
Test duplicating a prim that has a payload.
The content of the payload should not become part of the destination prim.
The destination should still simply contain the payload arc.
'''
cmds.file(new=True, force=True)
withPayloadFile = testUtils.getTestScene("payload", "FlowerPot.usda")
withPayloadDagPath, withPayloadStage = mayaUtils.createProxyFromFile(withPayloadFile)
withPayloadUfePathString = ','.join([withPayloadDagPath, "/FlowerPot"])
cmds.duplicate(withPayloadUfePathString)
dupItem = ufeUtils.createUfeSceneItem(withPayloadDagPath, '/FlowerPot1')
self.assertIsNotNone(dupItem)
# Make sure the geometry did not get flattened into the duplicate
rootLayer = withPayloadStage.GetRootLayer()
rootLayerText = rootLayer.ExportToString()
self.assertNotIn('"Geom"', rootLayerText)
self.assertNotIn('Mesh', rootLayerText)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
16,669 | e9fcf027227dc2c76d58b56afc8a0ae3827d8785 | import time
from PyQt4 import QtCore
import sys
from PyQt4 import QtGui
import os
import Kmean
import Region_grow
import graph_based
import thread
import otsu
import Err
import fuzzy
import GlobalThresholding
from PIL import Image
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
img=""
grd_img=""
K=10
delta=0
iterNum=5
Num_par=2
sigma=0
mean_size=1
K_graph=1000
row=1
col=23
thre=12
algo_index=1
file1=""
file2=""
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
class Ui_Form(QtGui.QWidget):
def __init__(self):
super(Ui_Form, self).__init__()
self.setupUi()
def setupUi(self):
hbox = QtGui.QHBoxLayout(self)
self.topleft = QtGui.QFrame()
self.topleft.setFrameShape(QtGui.QFrame.StyledPanel)
self.topright = QtGui.QFrame()
self.topright.setFrameShape(QtGui.QFrame.StyledPanel)
self.top = QtGui.QFrame()
self.top.setFrameShape(QtGui.QFrame.StyledPanel)
self.splitter1 = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.splitter1.addWidget(self.topleft)
self.splitter1.addWidget(self.topright)
self.splitter1.setSizes([41,59])
self.splitter2 = QtGui.QSplitter(QtCore.Qt.Vertical)
self.splitter2.addWidget(self.top)
self.splitter2.addWidget(self.splitter1)
self.splitter2.setSizes([30,80])
self.header_top_left = QtGui.QLabel(self.topleft)
self.header_top_left.setGeometry(3,3,180,40)
self.header_top_left.setText("<font color='#0000c6' > <h3>VARIABLES :-</h3></font>")
self.answer1 = QtGui.QLabel()
q1Edit = QtGui.QLineEdit(self.topleft)
q1Edit.textChanged.connect(self.q1Changed)
q1Edit.setGeometry(115,40,50,20)
self.var1 = QtGui.QLabel(self.topleft)
self.var1.setGeometry(5,30,105,40)
self.var1.setText("<font color='#97bf0d' > <h4>Iteration Value :</h4></font>")
self.answer2 = QtGui.QLabel()
q2Edit = QtGui.QLineEdit(self.topleft)
q2Edit.textChanged.connect(self.q2Changed)
q2Edit.setGeometry(115,80,50,20)
self.var2 = QtGui.QLabel(self.topleft)
self.var2.setGeometry(5,70,100,40)
self.var2.setText("<font color='#97bf0d' > <h4>K Value :</h4></font>")
self.answer3 = QtGui.QLabel()
q3Edit = QtGui.QLineEdit(self.topleft)
q3Edit.textChanged.connect(self.q3Changed)
q3Edit.setGeometry(115,120,50,20)
self.var3 = QtGui.QLabel(self.topleft)
self.var3.setGeometry(5,110,100,40)
self.var3.setText("<font color='#97bf0d' > <h4>Variable3 :</h4></font>")
self.Img_inf = QtGui.QLabel(self.topleft)
self.Img_inf.setGeometry(5,150,180,200)
self.b1 = QtGui.QRadioButton(self.top)
self.b1.setStyleSheet('QRadioButton {background-color: #feb0b1; color: blue;}')
self.b1.setText("K-mean Algorithm")
self.b1.setChecked(True)
self.b1.toggled.connect(lambda:self.btnstate(self.b1))
self.b1.setGeometry(10,50,200,30)
self.b2 = QtGui.QRadioButton(self.top)
self.b2.setStyleSheet('QRadioButton {background-color: #fec0b1; color: blue;}')
self.b2.setText("Global Thresholding")
self.b2.toggled.connect(lambda:self.btnstate(self.b2))
self.b2.setGeometry(10,80,200,30)
self.b3 = QtGui.QRadioButton(self.top)
self.b3.setStyleSheet('QRadioButton {background-color: #feb0b1; color: blue;}')
self.b3.setText("Regin Grow Method")
self.b3.toggled.connect(lambda:self.btnstate(self.b3))
self.b3.setGeometry(10,110,200,30)
self.b4 = QtGui.QRadioButton(self.top)
self.b4.setStyleSheet('QRadioButton {background-color: #fec0b1; color: blue;}')
self.b4.setText("Graph Based Method")
self.b4.toggled.connect(lambda:self.btnstate(self.b4))
self.b4.setGeometry(250,50,200,30)
self.b5 = QtGui.QRadioButton(self.top)
self.b5.setStyleSheet('QRadioButton {background-color: #feb0b1; color: blue;}')
self.b5.setText("Otsu Method")
self.b5.toggled.connect(lambda:self.btnstate(self.b5))
self.b5.setGeometry(250,80,200,30)
self.b6 = QtGui.QRadioButton(self.top)
self.b6.setStyleSheet('QRadioButton {background-color: #fec0b1; color: blue;}')
self.b6.setText("Watershed Method")
self.b6.toggled.connect(lambda:self.btnstate(self.b6))
self.b6.setGeometry(250,110,200,30)
self.b7 = QtGui.QRadioButton(self.top)
self.b7.setStyleSheet('QRadioButton {background-color: #fec0b1; color: blue;}')
self.b7.setText("Fuzzy C-Mean")
self.b7.toggled.connect(lambda:self.btnstate(self.b7))
self.b7.setGeometry(10,140,200,30)
self.DisText = QtGui.QLabel(self.top)
self.DisText.setGeometry(500,10,480,200)
self.DisText.setText("<font color='blue' > <h1> K-mean Algorithm </h1></font><font color='#97bf0d' > <h3>This algorithm uses to find K cluster in image. For which <br/> you have to select K value and Iteration value .If you <br> have no idea than select K = 5 and iterarion no more then<br> 10.<br></h3></font>")
self.progressBar = QtGui.QProgressBar(self.topright)
self.progressBar.setGeometry(QtCore.QRect(100, 13, 508, 23))
self.progressBar.setProperty("value", 10)
self.progressBar.setTextVisible(False)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.pic = QtGui.QLabel(self.topright)
self.pic.setGeometry(10, 20, 300, 400)
self.pic2 = QtGui.QLabel(self.topright)
self.pic2.setGeometry(320, 20, 300, 400)
self.hist = QtGui.QLabel(self.topleft)
self.hist.setGeometry(200, 20, 300, 230)
self.hist2 = QtGui.QLabel(self.topleft)
self.hist2.setGeometry(200, 260, 300, 230)
self.runButton = QtGui.QPushButton("run",self.topright)
self.runButton.setStyleSheet('QPushButton {background-color: #A3C1DA; color: red;}')
self.runButton.setGeometry(QtCore.QRect(20, 10, 70, 30))
self.runButton.setObjectName(_fromUtf8("runButton"))
self.runButton.clicked.connect(self.onStart)
open_btn = QtGui.QPushButton('Open', self.top)
open_btn.setStyleSheet('QPushButton {background-color: #A3C1DA; color: red;}')
open_btn.resize(80,30)
open_btn.move(25, 10)
open_btn.clicked.connect(self.showDialog)
self.fileText = QtGui.QLabel(self.top)
self.fileText.setGeometry(108,10,500,40)
self.inputfile = QtGui.QLabel(self.topright)
self.inputfile.setGeometry(20,425,200,40)
self.runingAlgo = QtGui.QLabel(self.topright)
self.runingAlgo.setGeometry(620,7,200,40)
self.runingAlgo.setText("Choose file")
self.inputfile.setText("<font color='#0000c3' > <h3>INPUT IMAGE </h3></font>")
self.outputfile = QtGui.QLabel(self.topright)
self.outputfile.setGeometry(390,425,200,40)
self.outputfile.setText("<font color='#0000c3' > <h3>OUTPUT IMAGE </h3></font>")
openFile = QtGui.QAction(QtGui.QIcon('open.png'), 'Open', self.top)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
open2_btn = QtGui.QPushButton('Select GT Image', self.topleft)
open2_btn.setStyleSheet('QPushButton {background-color: #A3C1DA; color: red;}')
open2_btn.resize(180,30)
open2_btn.move(10, 340)
open2_btn.clicked.connect(self.showDialog2)
self.errorText = QtGui.QLabel(self.topleft)
self.errorText.setGeometry(10,380,200,100)
open2File = QtGui.QAction(QtGui.QIcon('open.png'), 'Select GT Image', self.topleft)
open2File.setShortcut('Ctrl+O')
open2File.setStatusTip('Open new File')
open2File.triggered.connect(self.showDialog2)
"""menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)"""
self.myLongTask = TaskThread()
self.myLongTask.taskFinished.connect(self.onFinished)
hbox.addWidget(self.splitter2)
self.setLayout(hbox)
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Cleanlooks'))
self.setGeometry(50, 50, 1200, 800)
self.setWindowTitle('Image Segmentation tool kit')
self.show()
def onStart(self):
global img,algo_index
try:
if algo_index==1:
global K,iterNum
K=int(self.returnAnswer1())
iterNum=int(self.returnAnswer2())
elif algo_index==2:
global Num_par,delta
Num_par=int(self.returnAnswer1())
delta=int(self.returnAnswer2())
elif algo_index==3:
global row,col,thre
row=int(self.returnAnswer1())
col=int(self.returnAnswer2())
thre=int(self.returnAnswer3())
elif algo_index==4:
global sigma,K_graph,mean_size
sigma=int(self.returnAnswer1())
K_graph=int(self.returnAnswer2())
mean_size=int(self.returnAnswer3())
elif algo_index==5:
pass
elif algo_index==6:
global K,iterNum
K=int(self.returnAnswer1())
iterNum=int(self.returnAnswer2())
elif algo_index==7 :
global K,sigma
K=int(self.returnAnswer1())
sigma=float(self.returnAnswer2())
except:
self.runingAlgo.setText("<font color='red' > <h3>Error:Wrong Option </h3></font>")
return
if img!="" :
self.progressBar.setRange(0,0)
self.runingAlgo.setText("<font color='#97bf0d' > <h4> Running....</h4> </font>")
self.myLongTask.start()
else:
self.runingAlgo.setText("<font color='#0000c6' > <h4>No file choosen... </h4></font>")
def onFinished(self):
# Stop the pulsation
self.runingAlgo.setText("<font color='#00ccff' > <h2> Done !</h2></font>")
self.progressBar.setRange(0,1)
pixmap = QtGui.QPixmap(os.getcwd()+"/test.jpg")
pixmap3 = pixmap.scaled(300,300, QtCore.Qt.KeepAspectRatio)
self.pic2.setPixmap(pixmap3)
#histo_gram_start
da=io.imread('test.jpg')
data2=rgb2gray( da)
hist, bins = np.histogram(data2, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.savefig("hist2.png")
plt.close()
del(data2)
pixma = QtGui.QPixmap(os.getcwd()+"/hist2.png")
pixma3 = pixma.scaled(300,200, QtCore.Qt.KeepAspectRatio)
self.hist2.setPixmap(pixma3)
global img,grd_img,file1,file2
if grd_img!="":
data1=io.imread(os.getcwd()+"/test.jpg")
data2=io.imread(str(grd_img))
val=Err.err(data1,data2)
self.errorText.setText("<font color='#97bf0d' > <h4>GT Image :"+str(file2)+"<br><br> Output Image :"+str(file1)+"<br><br> Similarity Index :"+str(val)+"%</h4> </font>")
def showDialog(self):
self.fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd())
global img
img=self.fname
data=io.imread(str(img))
file1=img.split('/')[-1]
layer=len(data.shape)
if layer==1:
row=data.shape[0]
col=data.shape[1]
self.Img_inf.setText("<font color='#0000c3' ><h3> ANALYSIS </h3></font><font color='#97bf0d' ><h4>Image Name :"+str(file1)+"<br><br> Size : "+str(row)+" X "+str(col)+"<br><br> Type : One layer</h4></font>")
if layer==3:
row=data.shape[0]
col=data.shape[1]
self.Img_inf.setText("<font color='#0000c3' ><h3> ANALYSIS </h3></font><font color='#97bf0d' ><<h4>Image Name :"+str(file1)+"<br><br> Size : "+str(row)+" X "+str(col)+"<br><br> Type : RGB Coloured</h4></font>")
#histogram_start
d=rgb2gray(data)
hist, bins = np.histogram(d, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.savefig("hist.png")
plt.close()
pixma = QtGui.QPixmap("hist.png")
pixma3 = pixma.scaled(300,200, QtCore.Qt.KeepAspectRatio)
self.hist.setPixmap(pixma3)
del(data)
self.fileText.setText("<font color='#97bf0d' > <h5>"+str(img)+"</h5></font>")
pixmap = QtGui.QPixmap(self.fname)
pixmap3 = pixmap.scaled(300,300, QtCore.Qt.KeepAspectRatio)
self.pic.setPixmap(pixmap3)
def showDialog2(self):
self.fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd())
global grd_img,img,file1,file2
grd_img=self.fname
print grd_img,img,file1,file2
if img !="":
file1=img.split('/')[-1]
file2=grd_img.split('/')[-1]
self.errorText.setText("<font color='#97bf0d' > <h4>GT Image :"+str(file2)+"<br><br> Output Image :"+str(file1)+"</h4> </font>")
def q1Changed(self, text):
self.answer1.setText(text)
def q2Changed(self, text):
self.answer2.setText(text)
def q3Changed(self, text):
self.answer3.setText(text)
def returnAnswer1(self):
return self.answer1.text()
def returnAnswer2(self):
return self.answer2.text()
def returnAnswer3(self):
return self.answer3.text()
def btnstate(self,b):
if b.text() == "K-mean Algorithm":
if b.isChecked() == True:
global algo_index
algo_index=1
self.var1.setText("<font color='#97bf0d' > <h4>Iteration Value :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>K Value :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>variable3 :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> K-mean Algorithm </h1></font><font color='#97bf0d' > <h3>This algorithm uses to find K cluster in image. For which <br/> you have to select K value and Iteration value .If you <br> have no idea than select K = 5 and iterarion no more then<br> 10.<br></h3></font>")
elif b.text() == "Global Thresholding":
if b.isChecked() == True:
global algo_index
algo_index=2
self.var1.setText("<font color='#97bf0d' > <h4>N value :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>Delta Value :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>variable3 :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> Global Thresholding </h1></font><font color='#97bf0d' > <h3>This algorithm uses to find the threshold values so that<br> we can divide the image in N part ( Generally in Two part<br> Foreground / Background ).Here you have to select N <br> and delta, If you have no idea select N=2 and delta = 30.<br> </h3></font>")
elif b.text() == "Regin Grow Method":
if b.isChecked() == True:
global algo_index
algo_index=3
self.var1.setText("<font color='#97bf0d' > <h4>Row Value :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>Col Value :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>Threshold :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> Regin Grow Method </h1></font><font color='#97bf0d' > <h3>This algorithm generally uses to find associated region <br>for given seeds ( points ). For this you have to select <br>seeds point (row ,col ) and threshold. If you have no idea,<br> Select threshold = 40. <br></h3></font>")
elif b.text() == "Graph Based Method":
if b.isChecked() == True:
global algo_index
algo_index=4
self.var1.setText("<font color='#97bf0d' > <h4>Sigma Value :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>K Value :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>MinSize :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> Graph Based Method </h1></font><font color='#97bf0d' > <h3>This algorithm use the value of a threshold to merge <br>different components to produce a segmented image.<br>It takes a minsize variable for merging very small size <br>components.If you have no idea, Select sigma = 0 and <br>minsize = 20 and K = 500.<br></h3></font>")
elif b.text() == "Otsu Method":
if b.isChecked() == True:
global algo_index
algo_index=5
self.var1.setText("<font color='#97bf0d' > <h4>Variable1 :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>Variable2 :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>Variable3 :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> Otsu Method </h1></font><font color='#97bf0d' > <h3>This algorithm uses to find the threshold values so that<br> we can divide the image in two part Foreground and <br> Background .<br><br><br> </h3></font>")
elif b.text() == "Watershed Method":
if b.isChecked() == True:
global algo_index
algo_index=6
self.var1.setText("<font color='#97bf0d' > <h4>Variable1 :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>Variable2 :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>Variable3 :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> Watershed Method </h1></font><font color='#97bf0d' > <h3>There are some information about this algorithm like<br/> K value needed or something else</h3></font>")
elif b.text() == "Fuzzy C-Mean":
if b.isChecked() == True:
global algo_index
algo_index=7
self.var1.setText("<font color='#97bf0d' > <h4>C value :</h4></font>")
self.var2.setText("<font color='#97bf0d' > <h4>sigma :</h4></font>")
self.var3.setText("<font color='#97bf0d' > <h4>Variable3 :</h4></font>")
self.DisText.setText("<font color='blue' > <h1> Fuzzy C-Mean </h1></font><font color='#97bf0d' > <h3>This algorithm uses to find C cluster in image on basis <br>of probability. For which you have to select C value <br>and sigma. If you have no idea than select C = 5 and<br> sigma = 0.3<br>.</font>")
class TaskThread(QtCore.QThread):
taskFinished = QtCore.pyqtSignal()
def run(self):
global img,algo_index
li=img.split('/')
file1=li[len(li)-1]
data=io.imread(str(img))
if algo_index==1:
global K,iterNum
data=Kmean.kmean(data,iterNum,K)
elif algo_index==2:
global Num_par,delta
data=GlobalThresholding.global_threshold(data,Num_par,delta)
elif algo_index==3:
global row,col,thre
data=Region_grow.region_grow_priorityQ(data,[row,col],thre)
elif algo_index==4:
global sigma,mean_size,K_graph
print K_graph
data=graph_based.graph_based_seg(data,mean_size,K_graph,sigma)
elif algo_index==5:
data=otsu.otsu(data)
elif algo_index==6:
global K,iterNum
data=Kmean.kmean(data,iterNum,K)
elif algo_index==7:
global K,sigma
data=fuzzy.fuzzy(data,K,sigma)
im = Image.fromarray(data)
im.save(os.getcwd()+"/test.jpg")
self.taskFinished.emit()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
ui = Ui_Form()
ui.show()
sys.exit(app.exec_())
|
16,670 | 69256a5ab8d5931a799cd6c871fc5b6ad8cf0f4c | # -*- coding: utf-8 -*-
# @Time : 2018/11/24 5:23 PM
# @Author : zeali
# @Email : zealiemai@gmail.com
from core.template import gen_token
from dueros.card.ImageCard import ImageCard
from dueros.card.ListCard import ListCard
from dueros.card.ListCardItem import ListCardItem
def gen_image_card(items=[], hints=[]):
card = ImageCard()
card.set_token(gen_token())
for item in items:
card.add_item(item.get('src'), item.get('thumbnail'))
card.add_cue_words(hints)
return card
def gen_list_card(items=[], hints=[]):
card = ListCard()
for item in items:
card_item = ListCardItem()
card_item.set_title(item.get('title'))
card_item.set_url(item.get('url'))
card_item.set_image(item.get('image'))
card_item.set_content(item.get('content'))
card.add_item(card_item)
card.add_cue_words(hints)
return card
|
16,671 | ffad62e3e12dfcab7e6e3982a01fa9789f9b9e18 | from src.pairs.TriangularDeck import TriangularDeck
from src.pairs.util import intMapper
from src.pairs.GameState import GameState
from src.pairs.DPSolver import DPSolver
from src.pairs.Deck import Deck
import numpy as np
def main():
deck = TriangularDeck(4)
game = GameState(deck, 7, 3)
solver = DPSolver(game)
solver.solve(game)
diff_dp_fifty = solver.get_action_table() == solver.get_fifty_percent_table()
print(np.count_nonzero(diff_dp_fifty == 0))
diff_dp_exp = solver.get_action_table() == solver.get_expected_point_action_table()
print(np.count_nonzero(diff_dp_exp == 0))
if __name__ == "__main__":
main()
|
16,672 | 0af9c7571673f2a6c61316987ce3696b357b7cfb | from owe.utils.stats import *
from owe.utils.utils import *
from owe.utils.embedding_utils import *
|
16,673 | 9a2bb2528c4f0393b85b7dd3091da8f515c8b307 | #!/usr/bin/env python
# coding:utf-8
from PoboAPI import *
import datetime
import numpy as np
#开始时间,用于初始化一些参数
def OnStart(context) :
print("I\'m starting...")
#登录交易账号,需在主页用户管理中设置账号,并把期货测试替换成您的账户名称
context.myacc = None
if "回测期货" in context.accounts :
print("登录交易账号[回测期货]")
if context.accounts["回测期货"].Login() :
context.myacc = context.accounts["回测期货"]
def OnMarketQuotationInitialEx(context, exchange,daynight):
if exchange != 'SHFE':
return
#获取主力合约
g.code = GetMainContract('SHFE', 'rb',20)
#订阅K线数据,用于驱动OnBar事件
SubscribeBar(g.code, BarType.Day)
tday=GetNthTradingDay(2018,12,6,"SHFE")
print("tday "+str(tday))
def GetNthTradingDay(year,month,nth,exchange="SHFE"):#查询第几个交易日
firstday=datetime.date(year=year, month=month, day=1)#first day of the month
if month<12:
lastday=datetime.date (year, month+1, 1) - datetime.timedelta (days = 1)
else:
lastday=datetime.date (year+1, 1, 1) - datetime.timedelta (days = 1)
#print(firstday,lastday)
tradingdayslist= GetTradingDates(exchange,firstday,lastday)
#print(tradingdayslist)
return tradingdayslist[nth-1]
#实时行情事件,当有新行情出现时调用该事件Ex
def OnBar(context,code,bartype):
#过滤掉不需要的行情通知
if code != g.code:
return
dyndata = GetQuote(g.code)
#计算均线
MA = GetIndicator("MA",code,params=(5,10),bar_type = BarType.Day)
MA1 = MA["MA(5)"]
MA2 = MA["MA(10)"]
if len(MA2)<2:
return
#ma1上穿ma2时买入螺纹主力1手
elif MA1[-1] >= MA2[-1] and MA1[-2]<MA2[-2]:
QuickInsertOrder(context.myacc,g.code,'buy','open',dyndata.now,10)
#ma1下穿ma2时卖出平仓
elif MA1[-1] <= MA2[-1] and MA1[-2]>MA2[-2]:
QuickInsertOrder(context.myacc,g.code,'sell','close',dyndata.now,10)
|
16,674 | 8ced2bfd3549e136b499862ae203bf77b4b67bee | import numpy as np
import random
vetor = []
vetortorre=[]
tam = int(input("insira o tamanho do vetor \n"))
for i in range(tam):
vetor.append(
int(input("insira o valor do vetor na posicao " + str(i) + "\n")))
ordenavetor = np.sort(vetor)
print(ordenavetor)
n=int(input("alcance da torre\n"))
for i in range(tam):
if(i==0):
print("precisa de outra torre")
vetortorre.append(ordenavetor[i]+n)
print(str(vetortorre[i]))
i2=0
if(ordenavetor[i]-vetortorre[i2]>n):
vetortorre.append(ordenavetor[i]+n)
print(str(ordenavetor[i]))
i2=i2+1
x=[]
y=[]
x=ordenavetor
print(vetortorre)
for n2 in vetortorre:
y.append(n2-n)
y.append(n2+n)
print(y)
print(x)
|
16,675 | 651519db3d8cc207808d5f3dc1ea96f56452d723 | import glob
import pandas as pd
import reddit_utils
import pushshift
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.dates as mdates
import datetime
from datetime import timedelta
def get_relative_freq(word, subreddit, rolling=False):
word_df = reddit_utils.utc_to_datetime(pd.DataFrame(pushshift.daycounts(word=word, subreddit=subreddit)), 'key')
total_df = reddit_utils.utc_to_datetime(pd.DataFrame(pushshift.daycounts(subreddit=subreddit)), 'key')
word_df = word_df.merge(total_df, on='day', how='outer')
word_df['freq'] = word_df['doc_count_x'] / word_df['doc_count_y']
if rolling:
word_df['rolling_freq'] = word_df['freq'].rolling(window=7).mean()
return word_df
def get_comment_timeseries(subreddit, rolling=False):
total_df = reddit_utils.utc_to_datetime(pd.DataFrame(pushshift.daycounts(subreddit=subreddit)), 'key')
if rolling:
total_df['rolling_freq'] = total_df['doc_count'].rolling(window=7).mean()
return total_df
def states_word_freq_tiled(states, counter, word):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 14})
rc('text', usetex=True)
years_fmt = mdates.DateFormatter('%b\n%Y')
i = 0
NUM_ROWS = 6
if len(states)%3 != 0:
NUM_ROWS = 5
NUM_COLS = 3
fig, axs = plt.subplots(NUM_ROWS, NUM_COLS, sharex='col', sharey='row', figsize=(16,11))
fig.subplots_adjust(hspace=0.2,wspace=0.15)
c = -1
r = 0
d0 = '2019-12-31'
d1 = '2020-04-19'
d1 = datetime.date(int(d1.split('-')[0]), int(d1.split('-')[1]), int(d1.split('-')[2]))
d0 = datetime.date(int(d0.split('-')[0]), int(d0.split('-')[1]), int(d0.split('-')[2]))
delta = d1 - d0
date_list = [d0 + timedelta(days=x) for x in range(0, delta.days, delta.days // 4)]
for date in date_list[1:]:
newdate = date.replace(day=1)
date_list[date_list.index(date)] = newdate
for state in states:
print(state)
c += 1
if c == NUM_COLS:
c = 0
r += 1
print(str(r)+', '+str(c))
word_df = get_relative_freq('coronavirus', state, rolling=True)
print(min(word_df['day']))
print(max(word_df['day']))
axs[r, c].plot(word_df.day, word_df.freq, lw=0.6, c = 'deepskyblue', alpha=0.8)
axs[r, c].plot(word_df.day, word_df.rolling_freq, lw=1.5, c = 'deepskyblue', alpha=1)
axs[r, c].set_title(state)
# axs[r, c].set_yscale('log')
axs[r, c].set_xlim([min(date_list),max(date_list)])
axs[r, c].set_xticks(date_list)
axs[r, c].set_ylim([0,0.4])
axs[r, c].set_yticks([0, 0.1, 0.2, 0.3, 0.4])
# axs[r, c].set_yticklabels(['-4','-3','-2','-1','0'])
axs[r, c].tick_params(axis='both', which='major', labelsize=14)
axs[r, c].grid()
if len(states)%3 != 0:
axs[-1, -1].axis('off')
for ax in axs.flat:
ax.xaxis.set_major_formatter(years_fmt)
fig.suptitle('Rel. Frequency of \"'+word+'\" on State Subreddits')
plt.savefig('../figures/'+word+'_states'+counter+'_tiled.png')
# plt.show()
plt.close()
def cities_word_freq_tiled(cities, word, tag):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 14})
rc('text', usetex=True)
years_fmt = mdates.DateFormatter('%b\n%Y')
i = 0
NUM_ROWS = 6
NUM_COLS = 3
fig, axs = plt.subplots(NUM_ROWS, NUM_COLS, sharex='col', sharey='row', figsize=(16,11))
fig.subplots_adjust(hspace=0.2,wspace=0.15)
c = -1
r = 0
d0 = '2019-12-31'
d1 = '2020-04-19'
d1 = datetime.date(int(d1.split('-')[0]), int(d1.split('-')[1]), int(d1.split('-')[2]))
d0 = datetime.date(int(d0.split('-')[0]), int(d0.split('-')[1]), int(d0.split('-')[2]))
delta = d1 - d0
date_list = [d0 + timedelta(days=x) for x in range(0, delta.days, delta.days // 4)]
for date in date_list[1:]:
newdate = date.replace(day=1)
date_list[date_list.index(date)] = newdate
for city in cities:
print(city)
c += 1
if c == NUM_COLS:
c = 0
r += 1
print(str(r)+', '+str(c))
word_df = get_relative_freq('coronavirus', city, rolling=True)
print(min(word_df['day']))
print(max(word_df['day']))
axs[r, c].plot(word_df.day, word_df.freq, lw=0.6, c = 'deepskyblue', alpha=0.8)
axs[r, c].plot(word_df.day, word_df.rolling_freq, lw=1.5, c = 'deepskyblue', alpha=1)
axs[r, c].set_title(city)
# axs[r, c].set_yscale('log')
axs[r, c].set_xlim([min(date_list),max(date_list)])
axs[r, c].set_xticks(date_list)
axs[r, c].set_ylim([0,0.4])
axs[r, c].set_yticks([0, 0.1, 0.2, 0.3, 0.4])
# axs[r, c].set_yticklabels(['-4','-3','-2','-1','0'])
axs[r, c].tick_params(axis='both', which='major', labelsize=14)
axs[r, c].grid()
for ax in axs.flat:
ax.xaxis.set_major_formatter(years_fmt)
fig.suptitle('Rel. Frequency of \"'+word+'\" on City Subreddits')
if tag:
plt.savefig('../figures/'+tag+'_'+word+'_cities_tiled.png')
else:
plt.savefig('../figures/'+word+'_cities_tiled.png')
# plt.show()
plt.close()
def states_word_freq(states, word):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 18})
rc('text', usetex=True)
years_fmt = mdates.DateFormatter('%b\n%Y')
# colors = plt.cm.tab20(np.linspace(0,1,len(states)))
# i = 0
d0 = '2019-12-30'
d1 = '2020-04-19'
d1 = datetime.date(int(d1.split('-')[0]), int(d1.split('-')[1]), int(d1.split('-')[2]))
d0 = datetime.date(int(d0.split('-')[0]), int(d0.split('-')[1]), int(d0.split('-')[2]))
delta = d1 - d0
date_list = [d0 + timedelta(days=x) for x in range(0, delta.days, delta.days // 6)]
for date in date_list[1:]:
newdate = date.replace(day=1)
date_list[date_list.index(date)] = newdate
fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(111)
for state in states:
word_df = get_relative_freq('coronavirus',state, rolling=True)
ax.plot(word_df['day'], word_df['rolling_freq'], c = 'blue', alpha = 0.4, lw=1.2, label=state)
# i += 1
ax.xaxis.set_major_formatter(years_fmt)
# ax.legend(loc='upper left', ncol=3, fontsize=10, frameon=False)
ax.set_xticks(date_list)
plt.grid()
plt.title('Rel. Frequency of \"'+word+'\" on State Subreddits')
plt.savefig('../figures/'+word+'_states_rolling.png')
# plt.show()
plt.close()
def cities_word_freq(cities, word, tag=None):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 18})
rc('text', usetex=True)
years_fmt = mdates.DateFormatter('%b\n%Y')
colors = plt.cm.tab20(np.linspace(0,1,len(cities)))
i = 0
d0 = '2019-12-30'
d1 = '2020-04-19'
d1 = datetime.date(int(d1.split('-')[0]), int(d1.split('-')[1]), int(d1.split('-')[2]))
d0 = datetime.date(int(d0.split('-')[0]), int(d0.split('-')[1]), int(d0.split('-')[2]))
delta = d1 - d0
date_list = [d0 + timedelta(days=x) for x in range(0, delta.days, delta.days // 6)]
for date in date_list[1:]:
newdate = date.replace(day=1)
date_list[date_list.index(date)] = newdate
fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(111)
for city in cities:
word_df = get_relative_freq('coronavirus',city, rolling=True)
ax.plot(word_df['day'], word_df['rolling_freq'], c = colors[i], alpha = 0.7, lw=1.2, label=city)
i += 1
ax.xaxis.set_major_formatter(years_fmt)
ax.set_xticks(date_list)
ax.legend(loc='upper left', ncol=2, fontsize=14, frameon=False)
plt.grid()
plt.title('Rel. Frequency of \"'+word+'\" on City Subreddits')
if tag:
plt.savefig('../figures/'+tag+'_'+word+'_cities_rolling.png')
else:
plt.savefig('../figures/'+word+'_cities_rolling.png')
# plt.show()
plt.close()
def states_timeseries(states):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 18})
rc('text', usetex=True)
years_fmt = mdates.DateFormatter('%b\n%Y')
# colors = plt.cm.tab20(np.linspace(0,1,len(states)))
# i = 0
d0 = '2019-12-30'
d1 = '2020-04-19'
d1 = datetime.date(int(d1.split('-')[0]), int(d1.split('-')[1]), int(d1.split('-')[2]))
d0 = datetime.date(int(d0.split('-')[0]), int(d0.split('-')[1]), int(d0.split('-')[2]))
delta = d1 - d0
date_list = [d0 + timedelta(days=x) for x in range(0, delta.days, delta.days // 6)]
for date in date_list[1:]:
newdate = date.replace(day=1)
date_list[date_list.index(date)] = newdate
fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(111)
for state in states:
df = get_comment_timeseries(state, rolling=True)
df = df[df['day'] >= d0]
ax.plot(df['day'], df['rolling_freq'], c = 'blue', alpha = 0.4, lw=1.2, label=state)
# i += 1
ax.xaxis.set_major_formatter(years_fmt)
# ax.legend(loc='upper left', ncol=3, fontsize=10, frameon=False)
ax.set_xticks(date_list)
plt.grid()
plt.title('Comment Frequency on State Subreddits')
plt.savefig('../figures/states_rolling.png')
# plt.show()
plt.close()
def cities_timeseries(cities):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 18})
rc('text', usetex=True)
years_fmt = mdates.DateFormatter('%b\n%Y')
colors = plt.cm.tab20(np.linspace(0,1,len(cities)))
i = 0
d0 = '2019-12-30'
d1 = '2020-04-19'
d1 = datetime.date(int(d1.split('-')[0]), int(d1.split('-')[1]), int(d1.split('-')[2]))
d0 = datetime.date(int(d0.split('-')[0]), int(d0.split('-')[1]), int(d0.split('-')[2]))
delta = d1 - d0
date_list = [d0 + timedelta(days=x) for x in range(0, delta.days, delta.days // 6)]
for date in date_list[1:]:
newdate = date.replace(day=1)
date_list[date_list.index(date)] = newdate
fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(111)
for city in cities:
df = get_comment_timeseries(city, rolling=True)
df = df[df['day'] >= d0]
ax.plot(df['day'], df['rolling_freq'], c = colors[i], alpha = 0.7, lw=1.2, label=city)
i += 1
ax.xaxis.set_major_formatter(years_fmt)
ax.set_xticks(date_list)
ax.legend(loc='upper left', ncol=2, fontsize=14, frameon=False)
plt.grid()
plt.title('Comment Frequency on City Subreddits')
plt.savefig('../figures/cities_rolling.png')
# plt.show()
plt.close()
def main():
cities = reddit_utils.get_list_of_cities()
states = reddit_utils.get_list_of_states()
top5 = reddit_utils.get_top5_cities()
states1 = states[:18]
states2 = states[18:36]
states3 = states[36:]
word = "coronavirus"
# States
# states_word_freq(states, word)
# states_word_freq_tiled(states1, '1', word)
# states_word_freq_tiled(states2, '2', word)
# states_word_freq_tiled(states3, '3', word)
# states_timeseries(states)
# Cities
# cities_word_freq(cities, word)
# cities_word_freq_tiled(cities, word)
# cities_timeseries(cities)
# Top 5 Cities
cities_word_freq(top5, word, 'top5')
cities_word_freq_tiled(top5, word, 'top5')
if __name__ == "__main__":
main() |
16,676 | 22ba5ff934518e4a07d6049bac171312e2c3c623 | import click
import os
from rover.rover.tools import n_pages
from .rover.main import show_missions, show_downlink_status, show_stats, get_cameras, downloader
@click.group()
def cli():
pass
@cli.command("missions", help="show list of active missions.")
def missions():
show_missions()
@cli.group(help="show mission status.")
def status():
pass
@status.command("current", help="info of current/latest downlink.")
@click.argument("mission")
def current(mission):
show_downlink_status(mission_id=mission, timeline="current")
@status.command("past", help="info of past downlink(s).")
@click.argument("mission")
def past(mission):
show_downlink_status(mission_id=mission, timeline="previous")
@status.command("future", help="info of upcoming downlinks.")
@click.argument("mission")
def upcoming(mission):
show_downlink_status(mission_id=mission, timeline="upcoming")
@cli.command("stats", help="Show mission statistics.")
@click.argument("mission")
def stats(mission):
show_stats(mission_id=mission)
@cli.group(help="show camera instrument info.")
def camera():
pass
@camera.command("names", help="show camera instrument names.")
@click.argument("mission")
def camnames(mission):
get_cameras(mission_id=mission, what="names")
@camera.command("ids", help="show camera instrment ids.")
@click.argument("mission")
def camids(mission):
get_cameras(mission_id=mission, what="ids")
@cli.group(help="download mission raw images/metadata.")
def download():
pass
@download.command("images", help="download images.")
@click.argument("mission")
@click.option('-r', '--resolution', type=str,
default="full", show_default=True,
help="resolution of the images to be download.\
Available options: small, medium, large, full")
@click.option('-p', '--path',
default="./", show_default=True,
help="path to store the downloaded images.")
@click.option('-pn', '--pagenum', type=int, show_default=True,
help="value of the page to download images from.")
@click.option('-np', '--npages', type=int, show_default=True,
help="number of pages to download the images from.")
def imgs(mission, resolution, path, pagenum, npages):
downloader(mission_id=mission,
what="images",
path=path,
resolution=resolution,
pagenum=pagenum,
npages=npages
)
@download.command("metadata", help="download metadata.")
@click.argument("mission")
@click.option('-p', '--path',
default="./", show_default=True,
help="path to store the downloaded images.")
@click.option('-pn', '--pagenum', type=int, show_default=True,
help="value of the page to download images from.")
@click.option('-np', '--npages', type=int, show_default=True,
help="number of pages to download the images from.")
def meta(mission, path, pagenum, npages):
downloader(mission_id=mission,
what="metadata",
path=path,
pagenum=pagenum,
npages=npages
)
|
16,677 | 569cdb1eec0958d49d3b615018262496c80bb20f | #!/usr/bin/env python
# coding: utf-8
import pyglet.canvas
import numpy as np
# Input display information
inch = 23
aspect_width = 16
aspect_height = 9
# Input a variety
# size variation = [1, 2]
# eccentricity = [1, 2]
# Input stereogram size in cm unit
size = 5
# Input line size in cm unit
line_length = 0.7 # 30pix is 42 min of arc on 57cm distance
# Input a number you like to initiate
s = 1
# Input luminance of background
lb = 85 # 215, 84%
# Get display information
display = pyglet.canvas.get_display()
screens = display.get_screens()
resolution = screens[len(screens) - 1].height
c = (aspect_width ** 2 + aspect_height ** 2) ** 0.5
d_height = 2.54 * (aspect_height / c) * inch
deg1 = round(resolution * (1 / d_height))
resolution = screens[len(screens) - 1].height
c = (aspect_width ** 2 + aspect_height ** 2) ** 0.5
d_height = 2.54 * (aspect_height / c) * inch
sz = round(resolution * (size / d_height))
ll = round(resolution * line_length / d_height)
f = round(sz * 0.023 / 2) # 3.6 min of arc in 5 deg presentation area, actually 0.6 mm
eccentricity = round(1 / np.sqrt(2.0) / d_height * resolution) |
16,678 | e8676d3dc0428892d484b3ba2cdaa3472a0fde04 | a = int(input("Введите число a: "))
b = int(input("Введите число b: "))
c = int(input("Введите число c: "))
if a / b == c:
print(a, "разделить на", b, "равно", c)
else:
print(a, "разделить на", b, "не равно", c)
if a ** b == c:
print(a, "в степени", b, "равно" , c)
else:
print(a, "в степени", b, "не равно", c)
|
16,679 | 5a17ebc48d315b9cbbc31857ad2012f58e49ad11 | #!/usr/bin/env python
from pdb import set_trace as br
from operator import itemgetter
from numpy.polynomial.polynomial import Polynomial
from modules.utils import OUT_CONFIG
from modules.geometry.hit import HitManager
from modules.geometry.sl import SL
from modules.geometry.segment import Segment
from modules.geometry import Geometry, COOR_ID
from modules.reco import config, plot
from modules.analysis import config as CONFIGURATION
import os
import itertools
import bokeh
import numpy as np
############################################# INPUT ARGUMENTS
import argparse
parser = argparse.ArgumentParser(description='Track reconstruction from input hits.')
parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+')
parser.add_argument('-f', '--format', help='Input hits format', default='time_wire')
parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False)
parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15)
parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html')
parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False)
parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+')
args = parser.parse_args()
# Checking validity of the input format
if args.format not in OUT_CONFIG:
raise ValueError('Wrong input format (-f) specified')
# Checking existence of input files
for file_path in args.inputs:
if not os.path.exists(os.path.expandvars(file_path)):
print('--- ERROR ---')
print(' \''+file_path+'\' file not found')
print(' please provide the correct path to the file containing raw hits' )
print()
exit()
def process(input_files):
"""Reconstruct tracks from hits in all events from the provided input files"""
n_words_event = len(OUT_CONFIG['event']['fields'])
n_words_hit = len(OUT_CONFIG[args.format]['fields'])
# Initialising event
event = -1
G = Geometry(CONFIGURATION)
H = HitManager()
SLs = {}
for iSL in config.SL_SHIFT.keys():
SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])
# Defining which SLs should be plotted in which global view
GLOBAL_VIEW_SLs = {
'xz': [SLs[0], SLs[2]],
'yz': [SLs[1], SLs[3]]
}
# Analyzing the hits in each event
for file_path in input_files:
# Reading input file line by line
with open(file_path, 'r') as file_in:
file_line_nr = 0
for line in file_in:
file_line_nr += 1
if file_line_nr <= 1:
continue
hits_lst = []
H.reset()
words = line.strip().split()
event = int(words[0])
# Skipping event if it was not specified in command line
if args.events is not None and event not in args.events:
continue
nhits = int(words[1])
print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))
if args.glance:
continue
# Skipping event with too many hits (most likely a spark event that will take forever to process)
if nhits > args.max_hits:
continue
# Extracting hit information
for iHit in range(nhits):
start = n_words_event + iHit*n_words_hit
ww = words[start:start+n_words_hit]
hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])
H.add_hits(hits_lst)
# Removing hits with time outside the timebox region
H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)
# Calculating local+global hit positions
H.calc_pos(SLs)
# Creating figures of the chambers
figs = {}
figs['sl'] = plot.book_chambers_figure(G)
figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)
# Analyzing hits in each SL
sl_fit_results = {}
for iSL, sl in SLs.items():
# print('- SL', iSL)
hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')
if args.plot:
# Drawing the left and right hits in local frame
figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,
fill_color='red', fill_alpha=0.7, line_width=0)
figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,
fill_color='blue', fill_alpha=0.7, line_width=0)
# Performing track reconstruction in the local frame
sl_fit_results[iSL] = []
layer_groups = hits_sl.groupby('layer').groups
n_layers = len(layer_groups)
# Stopping if lass than 3 layers of hits
if n_layers < config.NHITS_MIN_LOCAL:
continue
hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]
# Building the list of all possible hit combinations with 1 hit from each layer
hits_layered = list(itertools.product(*hitid_layers))
# Building more combinations using only either left or right position of each hit
for hit_ids in hits_layered:
# print('- -', hit_ids)
posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values
posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values
posx_combs = list(itertools.product(*posx))
# Fitting each combination
fit_results_lr = []
fit_range = (min(posz), max(posz))
for iC, posx_comb in enumerate(posx_combs):
pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)
chi2 = stats[0][0] / n_layers
if chi2 < config.FIT_CHI2_MAX:
a0, a1 = pfit
fit_results_lr.append((chi2, hit_ids, pfit))
# Keeping only the best fit result from the given set of physical hits
fit_results_lr.sort(key=itemgetter(0))
if fit_results_lr:
sl_fit_results[iSL].append(fit_results_lr[0])
# Sorting the fit results of a SL by Chi2
sl_fit_results[iSL].sort(key=itemgetter(0))
if sl_fit_results[iSL]:
# Drawing fitted tracks
posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)
for iR, res in enumerate(sl_fit_results[iSL][:5]):
col = config.TRACK_COLORS[iR]
posx = res[2](posz)
figs['sl'][iSL].line(x=posx, y=posz,
line_color=col, line_alpha=0.7, line_width=3)
if args.plot:
# Drawing the left and right hits in global frame
for view, sls in GLOBAL_VIEW_SLs.items():
sl_ids = [sl.id for sl in sls]
hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]
figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],
fill_color='red', fill_alpha=0.7, line_width=0)
figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],
fill_color='blue', fill_alpha=0.7, line_width=0)
# Building 3D segments from the fit results in each SL
posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)
for sl in sls:
for iR, res in enumerate(sl_fit_results[sl.id][:5]):
posx = res[2](posz)
start = (posx[0], 0, posz[0])
end = (posx[1], 0, posz[1])
segL = Segment(start, end)
segG = segL.fromSL(sl)
segG.calc_vector()
# Extending the global segment to the full height of the view
start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])
end = segG.pointAtZ(plot.PLOT_RANGE['y'][1])
# Getting XY coordinates of the global segment for the current view
iX = COOR_ID[view[0]]
posx = [start[iX], end[iX]]
posy = [start[2], end[2]]
# Drawing the segment
col = config.TRACK_COLORS[sl.id]
figs['global'][view].line(x=posx, y=posy,
line_color=col, line_alpha=0.7, line_width=3)
print(sl.id, iR, posx, posy)
# Storing the figures to an HTML file
if args.plot:
plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]]
plots.append([figs['global'][v] for v in ['xz', 'yz']])
bokeh.io.output_file(args.output.format(event), mode='cdn')
bokeh.io.save(bokeh.layouts.layout(plots))
process(args.inputs)
|
16,680 | 4a200f763691aac47cb4b361dbac5f9dd18fc7d4 | """segmentation_dataset_generator_controller controller."""
from controller import Robot
from controller import Connector
from controller import RangeFinder
from controller import Camera
from controller import Supervisor
import random
import socket
import struct
import pickle
import numpy as np
import math
import cv2
import os
#robot = Robot()
supervisor = Supervisor()
connector = None
timestep = 100
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
motors = [supervisor.getMotor("shoulder_pan_joint"), supervisor.getMotor("shoulder_lift_joint"), supervisor.getMotor("elbow_joint"),
supervisor.getMotor("wrist_1_joint"), supervisor.getMotor("wrist_2_joint"), supervisor.getMotor("wrist_3_joint")]
#motor_sensors = [robot.getPositionSensor("shoulder_pan_joint_sensor"), robot.getPositionSensor("shoulder_lift_joint_sensor"), robot.getPositionSensor("elbow_joint_sensor"),
#robot.getPositionSensor("wrist_1_joint_sensor"), robot.getPositionSensor("wrist_2_joint_sensor"), robot.getPositionSensor("wrist_3_joint_sensor")]
camera = supervisor.getCamera('cameraRGB')
depth_camera = supervisor.getRangeFinder('cameraDepth')
ur_node = supervisor.getFromDef('UR5')
camera_transform_node = ur_node.getField('children').getMFNode(0)
camera_node = camera_transform_node.getField('children').getMFNode(1)
#depth_camera.enable(35)
phone_part_objects = [
supervisor.getFromDef('Bottom_Cover'),
supervisor.getFromDef('Bottom_Cover_2'),
supervisor.getFromDef('White_Cover'),
supervisor.getFromDef('White_Cover_2'),
supervisor.getFromDef('Black_Cover'),
supervisor.getFromDef('Black_Cover_2'),
supervisor.getFromDef('Blue_Cover'),
supervisor.getFromDef('Blue_Cover_2'),
supervisor.getFromDef('PCB'),
supervisor.getFromDef('PCB_2')
]
index_to_class_name = [
'BottomCover',
'BottomCover',
'WhiteCover',
'WhiteCover',
'BlackCover',
'BlackCover',
'BlueCover',
'BlueCover',
'PCB',
'PCB',
]
pbr_apperance_nodes = []
original_colors = []
for part in phone_part_objects:
children = part.getField('children')
shape = children.getMFNode(1)
pbr_apperance_node = shape.getField('appearance')
color = pbr_apperance_node.getSFNode().getField('baseColor').getSFColor()
pbr_apperance_nodes.append(pbr_apperance_node)
original_colors.append(color)
translation_fields = [node.getField('translation') for node in phone_part_objects]
rotation_fields = [node.getField('rotation') for node in phone_part_objects]
front_cover_initial_pos = [-0.17, -0.16]
back_cover_initial_pos = [-0.16, -0.16]
pcb_initial_pos = [-0.14, -0.13]
default_rotation = [1, 0, 0, 1.57]
max_movement = [0.08, 0.08]
def randomize_phone_parts():
height = 1.1
height_step = 0.05
for index, (translation_field, rotation_field) in enumerate(zip(translation_fields, rotation_fields)):
current_position = translation_field.getSFVec3f()
random_rotation = random.random() * 0.2 + 1.57
rotation = [1, 0, 0, random_rotation]
rotation_field.setSFRotation(rotation)
random_x_shift = random.random() * (max_movement[0] * 2) - max_movement[0]
random_z_shift = random.random() * (max_movement[1] * 2) - max_movement[1]
if index < 2:
# back cover
translation_field.setSFVec3f(
[back_cover_initial_pos[0] + random_x_shift, height, back_cover_initial_pos[1] + random_z_shift])
elif 2 <= index < 8:
# front cover
translation_field.setSFVec3f(
[front_cover_initial_pos[0] + random_x_shift, height, front_cover_initial_pos[1] + random_z_shift])
else:
translation_field.setSFVec3f(
[pcb_initial_pos[0] + random_x_shift, height, pcb_initial_pos[1] + random_z_shift])
height += height_step
for part in phone_part_objects:
part.resetPhysics()
def set_color_for_all_except_index(ii):
for i, node in enumerate(pbr_apperance_nodes):
if i == ii:
node.getSFNode().getField('baseColor').setSFColor([1, 1, 0])
continue
node.getSFNode().getField('baseColor').setSFColor([0, 0, 0])
def restore_colors():
for i, node in enumerate(pbr_apperance_nodes):
node.getSFNode().getField('baseColor').setSFColor(original_colors[i])
def toggle_visibility_for_all_parts(visible):
for part in phone_part_objects:
part.setVisibility(camera_node, visible)
def transform_image(img_array):
np_img = np.array(img_array, dtype=np.uint8)
np_img = np_img.transpose((1, 0, 2))
np_img_bgr = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
return np_img_bgr
randomize_phone_parts()
wait_time = 3.5 # seconds
last_run_time = supervisor.getTime()
take_image = True
is_first_run = True
images_to_take = 200
image_index = 0
camera.enable(1)
supervisor.step(1)
toggle_visibility_for_all_parts(False)
supervisor.step(1)
background_img = transform_image(camera.getImageArray())
cv2.imwrite('background.png', background_img)
toggle_visibility_for_all_parts(True)
restore_colors()
camera.disable()
dataset_path = 'dataset'
if not os.path.isdir(dataset_path):
os.mkdir(dataset_path)
while supervisor.step(timestep) != -1:
if supervisor.getTime() - last_run_time >= wait_time and image_index < images_to_take:
camera.enable(1)
supervisor.step(1)
print(f'Taking image {image_index+1}/{images_to_take}')
save_path = os.path.join(dataset_path, f'img{image_index}')
if not os.path.isdir(save_path):
os.mkdir(save_path)
full_img = transform_image(camera.getImageArray())
cv2.imwrite(os.path.join(save_path, 'full_image.png'), full_img)
for index in range(0, len(phone_part_objects)):
set_color_for_all_except_index(index)
supervisor.step(1)
image = transform_image(camera.getImageArray())
image_subtracted = cv2.subtract(image, background_img)
image_grayscale = cv2.cvtColor(image_subtracted, cv2.COLOR_BGR2GRAY)
_, image_binary = cv2.threshold(image_grayscale, 5, 255, cv2.THRESH_BINARY)
kernel = np.ones((5, 5), np.uint8)
image_binary = cv2.morphologyEx(image_binary, cv2.MORPH_CLOSE, kernel)
image_binary = cv2.morphologyEx(image_binary, cv2.MORPH_OPEN, kernel)
save_name = f"mask{index}_{index_to_class_name[index]}.png"
cv2.imwrite(os.path.join(save_path, save_name), image_binary)
restore_colors()
camera.disable()
randomize_phone_parts()
last_run_time = supervisor.getTime()
image_index += 1
elif image_index >= images_to_take:
break
|
16,681 | 678ed3247125fdd17e2fd121713823b5e74bafc3 | from django.db import models
from django.contrib.auth.models import User
#Querying User model with use_natural_foreign_keys=True returns username instead of key
class UserManager(models.Manager):
def unatural_key(self):
return self.username
User.natural_key = unatural_key
class ForumPost(models.Model):
post_id = models.AutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
post_title = models.CharField(max_length=512)
post_body = models.CharField(max_length=2048, blank=True)
post_image = models.URLField(blank=True)
post_datetime = models.DateTimeField(auto_now_add=True)
connect_count = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return str(self.post_id)
class ReplyPost(models.Model):
reply_id = models.AutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
post_id = models.ForeignKey(ForumPost, on_delete=models.CASCADE)
parent_id = models.ForeignKey('self', default=None, blank=True, null=True, on_delete=models.CASCADE)
reply_body = models.CharField(max_length=2048)
reply_datetime = models.DateTimeField(auto_now_add=True)
connect_count = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return str(self.reply_id)
class ForumConnector(models.Model):
post_id = models.ForeignKey(ForumPost, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class ReplyConnector(models.Model):
reply_id = models.ForeignKey(ReplyPost, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE) |
16,682 | c7f70cd4c28882d8114a79c919e820d8c271fb97 | import requests
import time
url = 'http://localhost:8000/'
payload = "welcome world"
now = int(time.time())
r = requests.post(url, data=payload)
print("timestamp: " + str(now) + " | message sent! - status: " + str(r.status_code))
|
16,683 | 105457c0de9c599e05ba8cf0467594dcb85bbc09 | import pandas as pd
import csv
import plotly.figure_factory as ff
import statistics as st
import random
import plotly.graph_objects as go
df=pd.read_csv("School1.csv")
data=df["Math_score"].tolist()
#fig=ff.create_distplot([data],["Math_score"],show_hist=False)
#fig.show()
popMean=st.mean(data)
popStd=st.stdev(data)
print("Std of pop dist: ",popStd)
print("mean of pop dist: ",popMean)
def randSetOfMean(counter):
datas=[]
for i in range(0,counter):
randIndex=random.randint(0,len(data)-1)
val=data[randIndex]
datas.append(val)
mean=st.mean(datas)
return mean
meanList=[]
for i in range(0,100):
meansets=randSetOfMean(30)
meanList.append(meansets)
stddev=st.stdev(meanList)
meanLmean=st.mean(meanList)
print("Std of sampling dist: ",stddev)
print("mean of sampling dist: ",meanLmean)
firStdStart, firStdEnd=meanLmean-stddev,meanLmean+stddev
secStdStart, secStdEnd=meanLmean-(stddev*2),meanLmean+(stddev*2)
thrStdStart, thrStdEnd=meanLmean-(stddev*3),meanLmean+(stddev*3)
#intervertion 1 mean calc
df1=pd.read_csv("school_1_Sample.csv")
data1=df1["Math_score"].tolist()
mean1=st.mean(data1)
""" zscore=(meanLmean-mean1)/stddev
print("z score is ",zscore)
fig=ff.create_distplot([meanList],["sampling mean"],show_hist=False)
fig.add_trace(go.Scatter(x=[meanLmean,meanLmean],y=[0,0.2],mode="lines",name="mean"))
fig.add_trace(go.Scatter(x=[mean1,mean1],y=[0,0.2],mode="lines",name="mean sample"))
fig.add_trace(go.Scatter(x=[firStdEnd,firStdEnd],y=[0,0.2],mode="lines",name="stdev 1"))
fig.add_trace(go.Scatter(x=[secStdEnd,secStdEnd],y=[0,0.2],mode="lines",name="stdev 2"))
fig.add_trace(go.Scatter(x=[thrStdEnd,thrStdEnd],y=[0,0.2],mode="lines",name="stdev 3"))
fig.show() """
#intervention 2 mean calc
df2=pd.read_csv("data2.csv")
data2=df2["Math_score"].tolist()
mean2=st.mean(data2)
zscore2=(meanLmean-mean2)/stddev
print("z score is ",zscore2)
fig=ff.create_distplot([meanList],["sampling mean"],show_hist=False)
fig.add_trace(go.Scatter(x=[meanLmean,meanLmean],y=[0,0.2],mode="lines",name="mean"))
fig.add_trace(go.Scatter(x=[mean2,mean2],y=[0,0.2],mode="lines",name="mean sample2"))
fig.add_trace(go.Scatter(x=[firStdEnd,firStdEnd],y=[0,0.2],mode="lines",name="stdev 1"))
fig.add_trace(go.Scatter(x=[secStdEnd,secStdEnd],y=[0,0.2],mode="lines",name="stdev 2"))
fig.add_trace(go.Scatter(x=[thrStdEnd,thrStdEnd],y=[0,0.2],mode="lines",name="stdev 3"))
fig.show()
#intervention 3 mean calc
df3=pd.read_csv("data3.csv")
data3=df3["Math_score"].tolist()
mean3=st.mean(data3)
zscore=(meanLmean-mean3)/stddev
print("z score is ",zscore)
fig=ff.create_distplot([meanList],["sampling mean"],show_hist=False)
fig.add_trace(go.Scatter(x=[meanLmean,meanLmean],y=[0,0.2],mode="lines",name="mean"))
fig.add_trace(go.Scatter(x=[mean3,mean3],y=[0,0.2],mode="lines",name="mean sample"))
fig.add_trace(go.Scatter(x=[firStdEnd,firStdEnd],y=[0,0.2],mode="lines",name="stdev 1"))
fig.add_trace(go.Scatter(x=[secStdEnd,secStdEnd],y=[0,0.2],mode="lines",name="stdev 2"))
fig.add_trace(go.Scatter(x=[thrStdEnd,thrStdEnd],y=[0,0.2],mode="lines",name="stdev 3"))
fig.show()
|
16,684 | 35bf9cae2f482af7ca275cddbdd1c1686a88a643 | def colorindo(string, cores):
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
YELLOW = "\033[1;33m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
dicionario = {
'vermelho': RED,
'azul': BLUE,
'ciano': CYAN,
'amarelo': YELLOW,
'verde': GREEN,
'negrito': BOLD,
}
return dicionario[cores] + string + RESET
|
16,685 | c551ac8ed6081253269872ac6a472f2b73c9b474 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on March 01 9:47 PM 2020
Created in PyCharm
Created as Misc/bh_binary_power
@author: Dylan Neff, Dylan
"""
import matplotlib.pyplot as plt
import numpy as np
# Constants
g = 6.67e-11 # m^3 kg^-1 s^-2
c = 2.9927e8 # m s^-2
m_solar = 1.989e30 # kg
def main():
m_chirp = 30 * m_solar / 2**(1.0/5) # kg
taus = np.linspace(0.00002, 7, 1000000)
powers = power(taus, m_chirp)
plt.plot(taus, powers, color='blue', label='Power')
plt.axvline(6.04746, linestyle='--', color='green', label='10Hz')
plt.axvline(0.000028, linestyle='--', color='red', label='1000Hz')
plt.xlabel(r'$\tau = t_{coal} - t$ (seconds)')
plt.ylabel('Power (watts)')
plt.semilogx()
plt.legend()
plt.show()
plt.plot(taus, powers, color='blue', label='Power')
plt.axvline(6.04746, linestyle='--', color='green', label='10Hz')
plt.axvline(0.000028, linestyle='--', color='red', label='1000Hz')
plt.xlabel(r'$\tau = t_{coal} - t$ (seconds)')
plt.ylabel('Power (watts)')
plt.legend()
plt.show()
print('donzo')
def power(tau, m_chirp):
p = 32.0 / 5 * c**5 / g * \
(g * m_chirp * np.pi * 134.0 / c**3)**(10.0/3) * (1.21 * m_solar / m_chirp)**(20.0/18) * (1.0 / tau)**(5.0/4)
return p
if __name__ == '__main__':
main()
|
16,686 | 56f5c60a19b43f34ff724d9b0ae5f4851f4de5e0 | # Generated by Django 2.0.13 on 2021-01-26 17:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shop', '0005_product_bidding_date'),
]
operations = [
migrations.CreateModel(
name='BuyingTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(default=0, help_text='수량')),
('buying_number', models.CharField(help_text='상품명', max_length=200)),
('user', models.ForeignKey(help_text='티켓구매자', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
16,687 | 745ec82b38ca3de1b6375cd37f826dd894a2c710 | '''
Saravanan Ramanathan
January 2016
'''
# Function to generate multi-core task set
from taskparam import ts
import mrand as MR
from math import *
from random import *
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import pylab
import random
import numpy
# Function to generate all task set
def run(): # Generate tasksets
for m in ts.m: # no. of cores
for hhu in range(ts.hhb_l,ts.hhb_h+1,ts.hhb_step): # Hi-Hi Utilization
hubound=m*float(hhu)/100 # Total bound = m * hhu
for hlu in range(ts.hlb_l,hhu,ts.hlb_step): # Hi-Lo Utilization
lubound=m*float(hlu)/100 # Total bound = m * hlu
for lu in range(ts.lb_l,100-hlu+1,ts.lb_step): # Lo task utilization
lbound=m*float(lu)/100 # Total bound = m * lu
for p in ts.t_prob: # Prob that task is Hi-task
NHmin = max(ceil(hubound/ts.u_h),m+1) # Calculate number of hi-tasks
NLmin = ceil(lbound/ts.u_h) # Find lower bound on low criticality tasks
if(100>=max(hhu,hlu+lu)>=30): # condition to configure utilization range
loop(m,hhu,hlu,lu,p,hubound,lubound,lbound,NHmin,NLmin)# Generate now
# Generate task with given parameters
def loop(m,hhu,hlu,lu,p,hubound,lubound,lbound,NHmin,NLmin):
print m,hhu,hlu,lu,p # Print the parameters of task set
prefix=str(m)+"_"+str(hhu)+"_"+str(hlu)+"_"+str(lu)+"_"+str(p) # Prefix to write down tasks
f = open(ts.path+"_"+prefix+".in","w") # Open file in write mode
cond = 1 # Loop condition variable
# Comment the next three lines if you want to generate exact number of task sets for each combination
numSet = int(ceil(ts.numSet/len(ts.t_prob))) # Divide by number of percentage values
comb = 2*(max(hhu,hlu+lu)/10)-1 # Determine the number of iterations given an utilization bound range - here 0.1 divided by 10
t_comb = comb**2 # Total combinations including nested interations
ts.numTask = int(ceil(numSet/float(t_comb))) # Determine the number of task sets for each combination - rounded to an integer
# Generate for total number of task sets
for j in range(ts.numTask): # For total number of tasksets
ai = max(m+1,ceil(NHmin/p),ceil(NLmin/(1-p))) # Determine the number of tasks in a task set
#print ai,10*m
if(ai >= 10*m): # Restrict total tasks in a task set to 10*m - enough for all p values
Ntotal = 10*m
else:
Ntotal = randint (ai, 10*m)
# If any restrictions on the number of HC tasks - e.g. 3*m HC tasks.
#NH = int(min(max((p*Ntotal),NHmin),3*m))
#if(NH<3*m):
# NL = int(max((Ntotal-NH),NLmin))
#else:
# NL = int(max((((1-p)/p)*NH),NLmin))
NH = int(min((p*Ntotal),NHmin)) # Number of HC tasks in a task set
NL = int(max((Ntotal-NH),NLmin)) # Number of LC tasks in a task set
#print Ntotal,NH,NL,NHmin,hubound,lubound,lbound
while cond:
tasks=generate(m,hubound,lubound,lbound,p,NH,NL) # Call generate task function
if tasks!=None: # If taskset not empty
writeTasks(f,tasks) # Write task set into file
break
f.write("---%d\n"%(j)) # Write taskset count
f.close() # Close file
# Function to generate task
def generate(m,hubound,lubound,lbound,p,NH,NL):
Ull = 0 # Lo - lo U vector
Uhh = 0 # Hi - hi U vector
Uhl = 0 # Hi - lo U vector
num = 0 # Count tasks
temp_task = [] # Temporary task set
qh,q1= MR.randfixedsum(NH,1,hubound,ts.u_l,ts.u_h) # randfixessum to calculate uiH of Hi-tasks
ql,q1 = pick_low_util(qh,NH,lubound,hubound,ts.u_l) # Calculate uiL of Hi-tasks
for j in range(NH): # For all Hi-tasks
uh = qh[j] # Pick Hi utilization from list
ul = ql[j] # Pick Lo utilization from list
if(ts.t_log_u==0):
#T = int(uniform(ts.t_l,ts.t_h)) # Pick uniform period in range (pl, ph)
T = int(uniform(max(ceil(2/float(ul)),ts.t_l),ts.t_h))
else:
T = int(floor((exp(uniform(log(max(ceil(2/float(ul))+ts.t_l,ts.t_l)),log(ts.t_h+ts.t_l))))/ts.t_l)*ts.t_l) # Pick log-uniform period in range (pl, ph)
crit = 1 # Define criticality
c_lo = ul*T # Compute execution time
c_hi = uh*T
if (c_lo < 2 or c_hi < 2):
print c_lo,c_hi,ceil(2/float(ul))
print "Errrrr"
c = raw_input("Enter")
#return None
if (c_lo > c_hi+0.000001): # Return error if CiL > CiH
print c_lo,c_hi
print "Errrrr"
c = raw_input("Enter")
Uhl+=float(c_lo)/T # Add utilization in U vector
Uhh+=float(c_hi)/T
if (ceil(c_hi) < T):
if(ts.d_log_u==0):
d=randint(ceil(c_hi),T) # Compute uniform constrained deadline
else:
d=int(floor((exp(uniform(log(ceil(c_hi)),log(T))))/ceil(c_hi))*ceil(c_hi)) # Compute log-uniform deadline
else:
d=T
if ((Uhh>hubound+0.000001) or (Uhl>lubound+0.000001)): # If Util greater than bound then break
print "BErrrrr"
c = raw_input("Enter")
if(c_hi<=0 or c_lo<=0): # Append task if execut. requirement is > 0
print "CErrrrr"
c = raw_input("Enter")
temp_task.append((T,c_lo,c_hi,crit,d)) # Append period, exe.time, crit, deadline
num = num + 1 # Total no. of tasks
x,xl= MR.randfixedsum(NL,1,lbound,ts.u_l,ts.u_h) # Use randfixedsum - uiL of Lo-tasks
#print x
for j in range(NL):
uh = x[j] # Pick Lo utilization from list
ul = uh # Assign Lo utilization = Hi utilization
if(ts.t_log_u==0):
#T = int(uniform(ts.t_l,ts.t_h)) # Pick uniform period in range (pl, ph)
T = int(uniform(max(ceil(2/float(ul))+ts.t_l,ts.t_l),ts.t_h))
else:
T = int(floor((exp(uniform(log(max(ceil(2/float(ul)),ts.t_l)),log(ts.t_h+ts.t_l))))/ts.t_l)*ts.t_l) # Pick log-uniform period in range (pl, ph)
crit = 0 # Define criticality
c_lo = ul*T # Compute execution time
c_hi = uh*T
if (c_lo < 2 or c_hi < 2):
print c_lo,c_hi,ceil(2/float(ul))
print "Errrrr"
c = raw_input("Enter")
#return None
Ull+=float(c_lo)/T # Add utilization in U vector
if (ceil(c_hi) < T):
if(ts.d_log_u==0):
d=randint(ceil(c_hi),T) # Compute uniform constrained deadline
else:
d=int(floor((exp(uniform(log(ceil(c_hi)),log(T))))/ceil(c_hi))*ceil(c_hi)) # Compute log-uniform deadline
else:
d=T
if (Ull>lbound+0.000001): # If Util greater than bound then break
print "BErrrrr"
c = raw_input("Enter")
if(c_lo<=0):
print "CErrrrr"
c = raw_input("Enter")
temp_task.append((T,c_lo,c_hi,crit,d)) # Append period, exe.time, crit, deadline
num = num + 1 # Total no. of tasks
if ((Uhh<hubound-m*float(ts.hhb_step)/100) or (Uhl<lubound-m*float(ts.hlb_step)/100) or (Ull<lbound-m*float(ts.lb_step)/100)): # Max uilization error
#print temp_task
return None # Return none
return temp_task # Return task set
def pick_low_util(qh,NH,lubound,hubound,u_l):
#print "qh",qh
B = lubound
Basign = 0
Nrem = NH-1
Hrem = hubound
#lh = sorted(qh,reverse=True) # Sort list in decreasing order
lh=list(numpy.array(qh).reshape(-1,))
ql=lh
for i in range(NH):
Hrem -= lh[i]
ql[i] = uniform(max(u_l,B-Hrem),min((B-(Nrem*u_l)),lh[i]))
Nrem -= 1
B -= ql[i]
#ql[NH-1] = B
for i in range(NH):
Basign += ql[i]
#print "B,lubound",Basign,lubound
#print "qh",qh
#print "ql",ql
if(Basign > lubound+0.000001):
c = raw_input("Enter +")
if(Basign < lubound-0.000001 ):
c = raw_input("Enter -")
return ql,1
# Function to write taskset into file
def writeTasks(f,tasks):
for task in tasks:
f.write("%d %f %f %d %d\n"%(task[0],task[1],task[2],task[3],task[4])) # Ordering of writing
def main(): # Main function
run()
#tset = generate(2,1.4,0.5,0.7,0.5,3,2)
#print tset
#r = test.ECDF(tset)
#print r
main() # Call main()
|
16,688 | 50cc5a08455dc81fd8224fce2ec0aa21047f416b | # -*- coding: utf-8 -*-
import xlrd
import IPy
import time
import json
from datetime import datetime
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from django.shortcuts import render, get_object_or_404, get_list_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.admin import widgets
from django.urls import reverse, reverse_lazy
from django.views.generic.base import View
from django.views.generic.edit import UpdateView, DeleteView
from django.forms import ModelForm, inlineformset_factory
from django import forms
from django.utils import timezone
from django.db.models import F, Q
from django.db import transaction
from django.utils.encoding import escape_uri_path
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from captcha.models import CaptchaStore
from .models import Info, Equitstaff
from .form import LoginForm, InfoForm
# ajax验证码校验
def ajax_captcha(request):
print(request.is_ajax())
if request.is_ajax():
print(request.GET['response'])
print(request.GET['hashkey'])
if CaptchaStore.objects.filter(response=request.GET['response'], hashkey=request.GET['hashkey']).exists():
json_data = {'status': 1}
else:
json_data = {'status': 0}
return JsonResponse(json_data)
else:
json_data = {'status': 0}
return JsonResponse(json_data)
# 登录 v0.2
def login_view(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = authenticate(request, username=username, password=password) # 验证用户是否存在
if user and user.is_active:
try:
equit_staff_id = user.equitstaff.id # 验证用户是否有设备管理系统权限
except (KeyError, Equitstaff.DoesNotExist):
return HttpResponse('无设备管理系统权限')
else:
login(request, user)
print(request.user.id)
request.session.set_expiry(0) # 浏览器关闭后session立即失效
# return HttpResponseRedirect('/equit/index/') # 登录v0.1
return HttpResponseRedirect('/equit/index/')
# return HttpResponseRedirect(reverse('equit:index', args=(equit_staff_id, ))) # 登录v0.2
else:
login_form = LoginForm()
return render(request, 'equit/login.html', {'login_form': login_form, 'loginError': json.dumps(u'用户名或密码错误')})
else:
login_form = LoginForm()
return render(request, 'equit/login.html', {'login_form': login_form, 'loginError': json.dumps(u'验证码错误')})
elif request.method == 'GET':
login_form = LoginForm()
return render(request, 'equit/login.html', {'login_form': login_form})
# 登录 v0.3
class LoginView(View):
def get(self, request):
login_form = LoginForm()
return render(request, 'equit/login.html', {'login_form': login_form})
# 登出
def logout_view(request):
logout(request)
return redirect(reverse('equit:login'))
# 首页 v3
@login_required(login_url='equit:login')
def index_view(request):
if request.method == 'GET':
form = InfoForm()
keywords = request.GET.get('keywords', '')
# error_msg = ''
# 模糊查询
if keywords:
equitinfo_list = Info.objects.filter(
Q(ip_addr__icontains=keywords) |
Q(sw_ip_addr__icontains=keywords) |
Q(oper_sys__icontains=keywords) |
Q(sys_prog__icontains=keywords) |
Q(cab_id__icontains=keywords) |
Q(staff_name__icontains=keywords) |
Q(equit_name__icontains=keywords)
)
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {'form': form, 'equitinfo': equitinfo, 'keywords': keywords})
else:
equitinfo_list = Info.objects.all()
# 分页
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {'form': form, 'equitinfo': equitinfo})
elif request.method == 'POST':
equitinfo_list = Info.objects.all()
if 'upload' in request.POST:
form = InfoForm()
# print(request.user.equitstaff.id)
# form = UploadFileForm(request.POST, request.FILES)
# print(form)
# print(forms.is_valid())
# if form.is_valid():
equitstaff = get_object_or_404(Equitstaff, pk=request.user.equitstaff.id)
if request.FILES.get('excel_file', ''):
f = request.FILES.get('excel_file', '')
type_excel = f.name.split('.')[-1] # 只判断最后一个.即文件后缀
if type_excel == 'xlsx':
# 解析excel
wb = xlrd.open_workbook(filename=None, file_contents=f.read())
table = wb.sheets()[0]
nrows = table.nrows # 行数
# ncole = table.ncols #
if nrows > 1:
try:
with transaction.atomic():
excelError = ""
for i in range(1, nrows):
row_values = table.row_values(i)
# print(rowvalues)
# 校验IP地址及唯一ip地址在用状态校验
print(row_values)
if IPy.IP(row_values[0]) and Info.objects.filter(ip_addr__exact=row_values[0], status=True).exists():
raise ValidationError(_(u'第 %(value)d 行IP地址已存在'), params={'value': i})
if Info.objects.filter(equit_name__exact=row_values[5], status=True).exists():
raise ValidationError(_(u'第 %(value)d 行该设备名已存在'), params={'value': i})
else:
if row_values[1]:
oper_sys = row_values[1]
else:
oper_sys = u'未填写'
if row_values[2]:
sys_prog = row_values[2]
else:
sys_prog = u'未填写'
if row_values[4]:
act_date = row_values[4]
else:
act_date = datetime.strptime('2000-1-1', '%Y-%m-%d')
equitinfo = Info(
ip_addr=row_values[0],
oper_sys=oper_sys,
sys_prog=sys_prog,
sw_ip_addr=row_values[3],
act_date=act_date,
equit_name=row_values[5],
cab_id=row_values[6],
staff=equitstaff,
staff_name=equitstaff.staff_name,
staff_phone=equitstaff.phone,
status=True
)
equitinfo.full_clean()
equitinfo.save()
except ValidationError as e:
print(json.dumps(e.message_dict))
# if not isinstance(e, dict):
# e = json.dumps(e)
equitinfo_list = Info.objects.all()
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {
'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(u'数据错误,请调整后重试')
})
else:
return HttpResponseRedirect(reverse('equit:index', args=()))
else:
excelError = u"excel文件不能为空"
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {
'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
})
else:
excelError = u"上传文件格式不是xlsx"
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {
'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
})
else:
excelError = u"文件不能为空."
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {
'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
})
elif 'create' in request.POST:
# equitstaff = get_object_or_404(Equitstaff, pk=request.user.equitstaff.id)
form = InfoForm(request.POST)
print(form)
print(request)
createError = ''
if form.is_valid():
if request.POST.get('ip_addr', ''):
if Info.objects.filter(ip_addr__exact=request.POST.get('ip_addr', ''), status=True).exists():
createError = u'IP地址已存在'
if Info.objects.filter(equit_name__exact=request.POST.get('equit_name', ''), status=True).exists():
createError = u'该设备名已存在'
# 判断端口是否占用
if request.POST.get('sw_port', '') and Info.objects.filter(
sw_ip_addr__exact=request.POST.get('sw_ip_addr', ''),
sw_port__exact=request.POST.get('sw_port', ''),
status=True).exists():
createError = u'该端口已被占用'
if createError:
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html',
{'form': form, 'equitinfo': equitinfo, 'createError': json.dumps(createError)})
else:
info = form.save(commit=False)
equitstaff = get_object_or_404(Equitstaff, pk=request.POST.get('staff', ''))
# info.staff = equitstaff
info.staff_phone = equitstaff.phone
info.staff_name = equitstaff.staff_name
info.save()
form = InfoForm()
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {'form': form, 'equitinfo': equitinfo})
else:
createError = u"设备信息不正确或输入的信息不规范"
paginator = Paginator(equitinfo_list, 13)
page = request.GET.get('page')
equitinfo = paginator.get_page(page)
return render(request, 'equit/index.html', {
'form': form,
'equitinfo': equitinfo,
'createError': json.dumps(createError)})
# 查询
# @login_required(login_url='equit:login')
# def search_view(request):
# equitstaff = get_object_or_404(Equitstaff, pk=request.user.equitstaff.id)
# if request.method == 'GET':
# keywords = request.GET.get('keywords', '')
# # error_msg = ''
# if keywords:
# equitinfo = Info.objects.get(Q(ip_addr__exact=keywords) | Q(sw_ip_addr__exact=keywords))
# return render(request, 'equit/index.html', {'equitinfo': equitinfo})
# def equitinfo_create_view(request, equit_staff_id):
# equit_staff = get_object_or_404(Equitstaff, pk=equit_staff_id)
# if request.method == 'POST':
# form = InfoForm(request.POST)
# if form.is_valid():
# info = form.save(commit=False)
# info.staff = equit_staff
# info.staff_phone = equit_staff.phone
# info.staff_name = equit_staff.staff_name
# info.save()
# return HttpResponseRedirect(reverse('equit:index', args=(equit_staff_id,)))
#
# else:
# form = InfoForm()
# return render(request, 'equit/equitinfocreate.html', {'form': form, 'equit_staff_id': equit_staff_id})
# 首页 V0.2
# class IndexView(LoginRequiredMixin, generic.DetailView):
# model = Equitstaff
# template_name = 'equit/index.html'
# @login_required(login_url='equit:login')
# def index_view(request, equitstaff_id):
# equitStaff = get_object_or_404(Equitstaff, pk=equitstaff_id)
# 用户页
class EquitstaffUpdate(LoginRequiredMixin, UpdateView):
model = Equitstaff
fields = ['phone']
template_name = 'equit/equitstaff.html'
def get_success_url(self):
# print(self.object.id)
# print(self.object.phone)
# 维护人员联系方式修改成功后修改对应的设备维护人员联系方式
Info.objects.filter(staff__exact=self.object.id).update(staff_phone=self.object.phone)
return reverse('equit:index', args=())
# return reverse('equit:index', args=(self.object.id, ))
# 设备详情
# class Equitinfo(LoginRequiredMixin, generic.DetailView):
# model = Info
# template_name = 'equit/equitinfo.html'
@login_required(login_url='equit:login')
def equitinfo_view(request, equit_info_id):
if request.method == 'GET':
equitinfo = get_object_or_404(Info, pk=equit_info_id)
form = InfoForm(instance=equitinfo)
return render(request, 'equit/equitinfo.html', {'info': equitinfo, 'form': form})
elif request.method == 'POST':
equitinfo = get_object_or_404(Info, pk=equit_info_id)
equitstaff = get_object_or_404(Equitstaff, pk=request.user.equitstaff.id)
if (equitinfo.staff_id == request.user.equitstaff.id) or (equitstaff.equit_permission == 9):
if 'update' in request.POST:
updateError = ''
form = InfoForm(request.POST)
if form.is_valid():
if equitinfo.ip_addr != request.POST.get('ip_addr', ''):
if request.POST.get('ip_addr', ''):
if Info.objects.filter(
ip_addr__exact=request.POST.get('ip_addr', ''),
status=True
) and equitinfo.status:
updateError = u'IP地址已存在'
if equitinfo.equit_name != request.POST.get('equit_name', ''):
if Info.objects.filter(
equit_name__exact=request.POST.get('equit_name', ''),
status=True
) and equitinfo.status:
updateError = u'该设备名已存在'
print(request.POST.get('sw_port', ''))
print(Info.objects.filter(
sw_ip_addr__exact=request.POST.get('sw_ip_addr', ''),
sw_port__exact=request.POST.get('sw_port', ''),
status=True
))
print(request.POST.get('status', ''))
if request.POST.get('sw_port', '') and Info.objects.filter(
sw_ip_addr__exact=request.POST.get('sw_ip_addr', ''),
sw_port__exact=request.POST.get('sw_port', ''),
status=True
) and equitinfo.status:
updateError = u'该端口已占用'
if updateError:
return render(request, 'equit/equitinfo.html', {
'info': equitinfo, 'form': form, 'updateError': json.dumps(updateError)
})
else:
data = form.cleaned_data
equitinfo.ip_addr = data['ip_addr']
equitinfo.oper_sys = data['oper_sys']
equitinfo.sys_prog = data['sys_prog']
equitinfo.sw_ip_addr = data['sw_ip_addr']
equitinfo.act_date = data['act_date']
equitinfo.equit_name = data['equit_name']
equitinfo.cab_id = data['cab_id']
equitinfo.sw_port = data['sw_port']
equitinfo.staff_id = data['staff']
equitstaff = get_object_or_404(Equitstaff, pk=request.POST.get('staff', ''))
# info.staff = equitstaff
equitinfo.staff_phone = equitstaff.phone
equitinfo.staff_name = equitstaff.staff_name
equitinfo.save()
equitinfo = get_object_or_404(Info, pk=equit_info_id)
form = InfoForm(instance=equitinfo)
return render(request, 'equit/equitinfo.html', {
'info': equitinfo, 'form': form
})
else:
updateError = u"设备信息不正确或输入的信息不规范"
return render(request, 'equit/equitinfo.html', {
'info': equitinfo, 'form': form, 'updateError': json.dumps(updateError)
})
elif 'disable' in request.POST: # 停用
if equitinfo.status is True:
equitinfo.status = False
equitinfo.deact_date = datetime.now().strftime('%Y-%m-%d')
equitinfo.save()
equitinfo = get_object_or_404(Info, pk=equit_info_id)
form = InfoForm(instance=equitinfo)
return render(request, 'equit/equitinfo.html', {
'info': equitinfo, 'form': form
})
elif 'delete' in request.POST: # 删除
if equitstaff.equit_permission == 9:
equitinfo.delete()
return HttpResponseRedirect(reverse('equit:index', args=()))
else:
deleteError = u'无删除设备权限'
form = InfoForm(instance=equitinfo)
return render(request, 'equit/equitinfo.html', {
'info': equitinfo, 'form': form, 'permissionError': json.dumps(deleteError)
})
else:
permissionError = u"无修改此设备权限"
form = InfoForm(instance=equitinfo)
return render(request, 'equit/equitinfo.html', {
'info': equitinfo, 'form': form, 'permissionError': json.dumps(permissionError)
})
# elif request.method == 'POST':
# equitInfo = get_object_or_404(Info, pk=equitInfoId)
# if 'delete' in request.POST:
# try:
# equitStaff = get_object_or_404(Equitstaff, pk=request.equitstaff.id)
# if equitStaff.equit_permission == 9:
#
# with transaction.atomic():
# equitInfo.delete()
# return reverse('equit:index', args=())
# except Exception as e:
# print(e)
# deleteError = u"删除失败"
# return render(request, 'equit/equitinfo.html', {
# 'equitinfo': equitInfo, 'deleteError': json.dumps(deleteError)
# })
# 修改
# class Equitinfoupdate(LoginRequiredMixin, UpdateView):
# model = Info
# fields = [
# 'ip_addr',
# 'oper_sys',
# 'sys_prog',
# 'sw_ip_addr',
# 'act_date',
# 'equit_name',
# 'cab_id'
# ]
# template_name = 'equit/equitinfoupdate.html'
# @login_required(login_url='equit:login')
# def equitinfo_update_view(request, equitInfoId):
# if request.method == 'POST':
# try:
# equitStaff = get_object_or_404(Equitstaff, pk=request.user.equitstaff)
# equitInfo = get_object_or_404(Info, pk=equitInfoId)
# except Exception as e:
# 新增视图
# @login_required(login_url='equit:login')
# def equitinfo_create_view(request, equit_staff_id):
# equit_staff = get_object_or_404(Equitstaff, pk=equit_staff_id)
# if request.method == 'POST':
# form = InfoForm(request.POST)
# if form.is_valid():
# info = form.save(commit=False)
# info.staff = equit_staff
# info.staff_phone = equit_staff.phone
# info.staff_name = equit_staff.staff_name
# info.save()
# return HttpResponseRedirect(reverse('equit:index', args=(equit_staff_id,)))
# else:
# form = InfoForm()
# return render(request, 'equit/equitinfocreate.html', {'form': form, 'equit_staff_id': equit_staff_id})
#
#
# # 删除
# class Equitinfodelete(LoginRequiredMixin, DeleteView):
# model = Info
# template_name = 'equit/equitinfodelete.html'
#
# def get_success_url(self):
# # print(self.object.id)
# # print(self.object.staff_id)
#
# return reverse('equit:index', args=(self.object.staff_id, ))
# 批量导入
# @login_required(login_url='equit:login')
# def excel_upload(request):
# equitinfo = Info.objects.all()
# form = InfoForm()
# # print(request.user.equitstaff.id)
# # form = UploadFileForm(request.POST, request.FILES)
# # print(form)
# # print(forms.is_valid())
# # if form.is_valid():
# equit_staff = get_object_or_404(Equitstaff, pk=request.user.equitstaff.id)
# if request.FILES.get('excel_file', ''):
# f = request.FILES.get('excel_file', '')
# type_excel = f.name.split('.')[-1] # 只判断最后一个.即文件后缀
# if type_excel == 'xlsx':
# # 解析excel
# wb = xlrd.open_workbook(filename=None, file_contents=f.read())
# table = wb.sheets()[0]
#
# nrows = table.nrows # 行数
# # ncole = table.ncols #
# if nrows > 1:
# try:
# with transaction.atomic():
# for i in range(1, nrows):
# row_values = table.row_values(i)
# # print(rowvalues)
# # 校验IP地址及唯一ip地址在用状态校验
# if IPy.IP(row_values[0]) and not (
# Info.objects.filter(ip_addr__exact=row_values[0], status=True)):
# equitinfo = Info.objects.create(
# ip_addr=row_values[0],
# oper_sys=row_values[1],
# sys_prog=row_values[2],
# sw_ip_addr=row_values[3],
# act_date=row_values[4],
# equit_name=row_values[5],
# cab_id=row_values[6],
# staff=equit_staff,
# staff_name=equit_staff.staff_name,
# staff_phone=equit_staff.phone,
# status=True
# )
# # equitinfo.clean_fields(exclude=None) 官方文档上有提到在save前要进行数据校验,如果没有成功应该是提交到数据库时数据库报错后返回
# equitinfo.save()
# return HttpResponseRedirect(reverse('equit:index', args=()))
# except Exception as e:
# print(e)
# excelError = u"导入失败,请检查导入的数据是否正确"
# return render(request, 'equit/index.html', {
# 'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
# })
# else:
# excelError = u"excel文件不能为空"
# return render(request, 'equit/index.html', {
# 'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
# })
# else:
# excelError = u"上传文件格式不是xlsx"
# return render(request, 'equit/index.html', {
# 'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
# })
# else:
# excelError = u"文件不能为空."
# return render(request, 'equit/index.html', {
# 'equitinfo': equitinfo, 'form': form, 'excelError': json.dumps(excelError)
# })
#
@login_required(login_url='equit:login')
def excel_export(request):
print(request.META.get('HTTP_USER_AGENT', ''))
header = request.META.get('HTTP_USER_AGENT', '')
# equit_staff = get_object_or_404(Equitstaff, pk=equit_staff_id)
equit_info_list = Info.objects.all()
# if request.method == "GET":
wb = Workbook()
sheet = wb.worksheets[0]
row0 = [u'IP地址', u'操作系统', u'系统程序', u'上级IP地址', u'上级IP端口', u'启用时间', u'设备名称', u'机架号', u'维护人员', u'联系方式']
sheet.append(row0)
for equit_info in equit_info_list:
row = [
equit_info.ip_addr,
equit_info.oper_sys,
equit_info.sys_prog,
equit_info.sw_ip_addr,
equit_info.sw_port,
equit_info.act_date,
equit_info.equit_name,
equit_info.cab_id,
equit_info.staff_name,
equit_info.staff_phone
]
sheet.append(row)
print(sheet)
# response = HttpResponse(content_type='application/vnd.ms-excel')
dest_filename = '设备信息_' + time.strftime('%Y-%m-%d', time.localtime()) + '.xlsx'
print(dest_filename)
response = HttpResponse(content=save_virtual_workbook(wb), content_type='application/vnd.ms-excel')
if (header.find('Chrome') != -1) or (header.find('Firefox') != -1): # 浏览器适配
response['Content-Disposition'] = "attachment; filename*=utf-8''{}".format(escape_uri_path(dest_filename))
else:
response['Content-Disposition'] = 'attachment; filename=' + escape_uri_path(dest_filename)
# wb.save(response)
return response
|
16,689 | d8fac4b6271f92906ee4fe4885996d0fbc088899 | import picamera
import picamera.array
import io
from config import IMAGE_WIDTH, IMAGE_HEIGHT
from PIL import Image
import numpy as np
import logging
from logging import INFO, DEBUG
logging.basicConfig(level=INFO, format="%(levelname)s [%(filename)s line %(lineno)d]: %(message)s")
logger = logging.getLogger()
logger.disabled = False
class MyPiCamera:
"""
This class will be in CarController.py, created at __init__.
To be used in gym_env.
"""
def __init__(self):
logging.info("Creating camera wrapper in gym env")
self.camera = picamera.PiCamera(resolution=(640, 480), framerate=30)
# self.stream = io.BytesIO()
# variables starting with _ are like private vars
self._input_width = IMAGE_WIDTH
self._input_height = IMAGE_HEIGHT
logging.info("input width: {0}, input height: {1}".format(self._input_width, self._input_height))
def get_image_array(self):
"""
Captures the current image as seen on camera, converts it to a numpy array and returns it.
"""
with picamera.array.PiRGBArray(self.camera) as output:
self.camera.resolution = (640, 480)
self.camera.capture(output, 'rgb')
logging.info("Captured image of size {0}x{1}x{2}".format(
output.array.shape[0], output.array.shape[1], output.array.shape[2]))
output.truncate(0)
return output.array
# self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)
# self.stream.seek(0)
# image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)
# self.stream.seek(0)
# self.stream.truncate()
# self.camera.close()
def quit(self):
self.camera.close() |
16,690 | b0c67ce4b40c15457d0d87be169509a5e709db3c | a=int(input())
def test(a1,result):
if a1/10>0:
result+=1
a1=int(a1/10)
return test(a1,result)
else:
return result
print(test(a,0))
|
16,691 | 89c3e61b701509a5e22fd7add915e9d5f6ee5979 | # valid 를 통해서 유효한 범위 내인지를 확인
# check 함수를 통해 거리두기를 지키고 있는 지 여부확인
def solution(places):
answer = []
size = 5
def valid(r, c):
return -1<r<size and -1<c<size
def check(x, y, place):
dist = [(1,0),(0,1),(-1,0),(0,-1)]
for dx, dy in dist:
nx, ny = x+dx, y+dy
if valid(nx, ny) and place[nx][ny]=='P':
return False
dist = [(-1,-1),(-1,1),(1,-1),(1,1)]
for dx, dy in dist:
nx, ny = x+dx, y+dy
if valid(nx, ny) and place[nx][ny]=='P' and (place[x][ny]!='X' or place[nx][y]!='X'):
return False
dist = [(2,0), (0,2), (-2,0), (0,-2)]
for dx, dy in dist:
nx, ny = x+dx, y+dy
if valid(nx, ny) and place[nx][ny] =='P' and place[x+dx//2][y+dy//2] !='X':
return False
return True
for place in places:
flag = False
for r, c in [(i, j) for i in range(5) for j in range(5)]:
if place[r][c] == 'P':
result = check(r,c, place)
if not result:
answer.append(0)
flag = True
break
if not flag:
answer.append(1)
return answer |
16,692 | 1b2984a740426f3c0a36ed99342364f6ebe1a81f | def parse(data):
# TODO: your code here
i = 0
list = []
for n in data:
if n == "i" :
i += 1
elif n == "d" :
i -= 1
elif n == "s" :
i *= i
elif n == "o":
list.append(i)
return list
|
16,693 | 7d48b81d49d3c5b88467e25c3df31c095beafe8b | import os
import numpy as np
def create_dawa(class_names_to_load, path_to_dawa):
# Set the paths to the attribute information
path_to_attribute_names = os.path.join(path_to_dawa, 'attributes.txt')
path_to_class_names = os.path.join(path_to_dawa, 'classes.txt')
path_to_attribute_matrix = os.path.join(path_to_dawa, 'attribute_matrix.csv')
# Load the attributes into a numpy matrix
attribute_matrix = np.loadtxt(open(path_to_attribute_matrix, "rb"), delimiter=",")
# Load the attribute names into a list
with open(path_to_attribute_names) as f:
attribute_names = f.readlines()
attribute_names = [x.strip() for x in attribute_names]
# Load the class names into a list
with open(path_to_class_names) as f:
class_names = f.readlines()
class_names = [x.strip() for x in class_names]
# Make sure the matrix dimensions match the list
assert len(attribute_names) == attribute_matrix.shape[1] and len(class_names) == \
attribute_matrix.shape[0]
# Gather the embeddings specified in the class_names_to_load
semantic_attributes = []
for current_class in class_names_to_load:
current_class_index = class_names.index(current_class)
current_semantic_attribute = attribute_matrix[current_class_index]
semantic_attributes.append(current_semantic_attribute)
return semantic_attributes
if __name__ == '__main__':
# Example code for loading the attributes.
# Create list of activity names for that embeddings should be loaded.
# In this case, we load all activities.
with open('./attributes/classes.txt') as f:
class_names_to_load = f.readlines()
class_names_to_load = [x.strip() for x in class_names_to_load]
# Set up the semantic space: Driver activities with Attribtutes
dawa = create_dawa(class_names_to_load=class_names_to_load, path_to_dawa='./attributes/')
# You are ready to go, the ordering of the attributes corresponds to the ordering
# in the supplied class_names_to_load
print("There are {} classes with {} attributes respectively.\n".format(len(class_names_to_load),
dawa[0].shape[0]))
print("The activity \"{}\" has the following attribute representation:".format(
class_names_to_load[0]))
print(dawa[0])
|
16,694 | 1d93a198a9877fc3bd0e23c8f6c5dd7d36bc379e | #!/usr/bin/env python
import sys
from distmesh_dyn import DistMesh
from imgproc import findObjectThreshold
from synth import test_data, test_data_texture, test_data_image
from kalman import IteratedMSKalmanFilter, stats, IteratedKalmanFilter
from renderer import VideoStream
import pdb
import time
import cv2
import numpy as np
import matplotlib.pyplot as plot
import seaborn as sns
import pandas as pd
cuda = True
threshold = 9
name = 'test_data'
video, flow = test_data(680, 680)
nx = 680
start = nx//3
end = 2*nx//3
nI = 5
gridsizes = [80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20]
#gridsizes = [80, 60]
nF = video.shape[2]
#In data, for each grid size, we store:
#- mesh pts
#- ave update time,
#- ave pred time,
#- no. partitions jacobian
#- no. partitions hessian
#- ave per update iteration renders
#- ave per update iteration jacobain renders
#- ave per update iteration hessian renders
#- ave per update iteration renders theoretical (without multi pert rendering)
#- ave per update jacobain time
#- ave per update hessian time
data = np.zeros((len(gridsizes), 11))
for idx, gridsize in enumerate(gridsizes):
#idx = 1; gridsize = 40
print 'Running KF for gridsize =', gridsize
flowframe = flow[:,:,:,0]
frame = video[:,:,0]
distmesh = DistMesh(frame, h0 = gridsize)
mask, ctrs, h = findObjectThreshold(frame, threshold = threshold)
distmesh.createMesh(ctrs, h, frame, plot=False)
kf = IteratedMSKalmanFilter(distmesh, frame, flowframe, cuda, multi = True)
kf.state.render()
count = 0
for i in range(3):
count += 1
print 'Frame %d' % count
frame = video[:,:,i]
mask = (frame > 0).astype(np.uint8)
flowframe = flow[:,:,:,i]
time.sleep(1)
kf.compute(frame, flowframe, mask)
#Extract stats
meshpts = stats.meshpts
ave_update_time = stats.stateupdatetc[0]/stats.stateupdatetc[1]
ave_pred_time = stats.statepredtime[0]/stats.stateupdatetc[1]
jacpartitions = stats.jacobianpartitions
hesspartitions = stats.hessianpartitions
ave_nrenders = stats.renders[0]/stats.stateupdatetc[1]
ave_jacrenders = stats.jacobianrenderstc[1]/stats.stateupdatetc[1]
ave_hessrenders = stats.hessianrenderstc[1]/stats.stateupdatetc[1]
ave_theoryrenders = ave_jacrenders + ave_hessrenders
ave_jac_time = stats.jacobianrenderstc[0]/stats.stateupdatetc[1]
ave_hess_time = stats.hessianrenderstc[0]/stats.stateupdatetc[1]
data[idx, :] = [meshpts, ave_update_time, ave_pred_time, jacpartitions,\
hesspartitions, ave_nrenders, ave_jacrenders, ave_hessrenders,\
ave_theoryrenders, ave_jac_time, ave_hess_time]
kf.state.renderer.cudagl._destroy_PBOs()
kf.__del__()
stats.reset()
#Save data
np.savez('./timing_synthetic1.npz', data, gridsizes)
#Convert to pandas dataframe
#Reload data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plot
r = np.load('./timing_synthetic1.npz')
data = r['arr_0']
gridsizes = r['arr_1']
names = ['meshpts','ave_update_time','ave_pred_time','jacpartitions','hesspartitions','ave_nrenders','ave_jacrenders','ave_hessrenders','ave_theoryrenders','ave_jac_time','ave_hess_time']
df = pd.DataFrame(data, columns = names, index = gridsizes)
#Plot data
#Plot total renders per number of nodes
#f,(ax1, ax2, ax3, ax4, ax5, ax6) = plot.subplots(6)
plot.clf()
plot.plot(df.meshpts, df.ave_nrenders, label = 'per update iteration multipert renders')
plot.plot(df.meshpts, df.ave_theoryrenders, label = 'per update iteration singlepert renders')
plot.xlabel('Mesh points')
plot.ylabel('number of render operations')
plot.legend(loc = 'upper left')
plot.savefig('./analysis/timing_synthetic_renders.eps')
plot.clf()
plot.plot(df.meshpts, df.ave_update_time, label = 'Ave. per update iteration time')
plot.plot(df.meshpts, df.ave_pred_time, label = 'Ave. per prediction iteration time')
plot.xlabel('Mesh points')
plot.ylabel('time (s)')
plot.legend(loc = 'upper left')
plot.savefig('./analysis/timing_synthetic_timing.eps')
plot.clf()
plot.plot(df.meshpts, df.jacpartitions, label = 'Multi-perturbation jacobian partitions')
plot.plot(df.meshpts, df.hesspartitions, label = 'Multi-perturbation hessian partitions')
plot.legend(loc = 'upper left')
plot.xlabel('Mesh points')
plot.ylabel('number of paritions')
plot.savefig('./analysis/timing_synthetic_partitions.eps')
#savefig(fname, dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False, bbox_inches=None, pad_inches=0.1,
# frameon=None) |
16,695 | fbae963ec6c60958677cf78c31b94d52be7b2032 | import sklearn
from sklearn import datasets
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
iris = datasets.load_iris()
|
16,696 | 1e847078c419a51fada0b15c29c2909c76da2359 | import numpy as np
import pandas as pd
import sys
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import Conv2D, MaxPooling2D, Flatten, BatchNormalization
from keras.models import Sequential,load_model
from keras.optimizers import SGD, Adam
from keras.utils import np_utils
from keras.initializers import he_normal, he_uniform
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
raw_data = pd.read_csv(sys.argv[1], sep = ',' ,encoding = 'UTF-8')
raw_data = raw_data.as_matrix()
y_data = raw_data[:,0]
x_data = list()
num = len(y_data)
for i in range(num):
temp = raw_data[i][1].split()
x_data.append(temp)
x_data = np.array(x_data).astype(float).reshape(num,48,48,1)/255.0
y_data = np_utils.to_categorical(y_data,7)
"""
np.save('x_data.npy',x_data)
np.save('y_data.npy',y_data)
x_data = np.load('x_data.npy')
y_data = np.load('y_data.npy')
"""
num = len(y_data)
x_train = x_data[:int(0.9*num),:,:,:]
y_train = y_data[:int(0.9*num),:]
num_train = x_train.shape[0]
x_val = x_data[int(0.9*num):,:,:,:]
y_val = y_data[int(0.9*num):,:]
num_val = x_val.shape[0]
model = Sequential() # kernel_constraint
model.add(Conv2D(32,kernel_size = (3,3),input_shape=(48,48,1),padding='same',kernel_initializer = 'he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32,kernel_size = (3,3),padding='same',kernel_initializer = 'he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,kernel_size = (3,3),padding='same',kernel_initializer = 'he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64,kernel_size = (3,3),padding='same',kernel_initializer = 'he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,kernel_size = (3,3),padding='same',kernel_initializer = 'he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128,kernel_size = (3,3),padding='same',kernel_initializer = 'he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(7,activation='softmax'))
model.summary();
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=0.001), metrics=['accuracy'])
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
datagen.fit(x_train)
for i in range(40):
checkpointer = ModelCheckpoint(filepath='0.68.h5',monitor = 'val_acc' ,save_best_only=True,save_weights_only=False, mode='max')
model.fit(x_train,y_train,batch_size=128,epochs=1,validation_data=(x_val,y_val))
model.fit_generator(datagen.flow(x_train,y_train,batch_size=32),num_train/32,epochs=5,validation_data=(x_val,y_val),validation_steps=num_val/32, callbacks=[checkpointer])
################################################################################################
"""
model = load_model('0.68.h5')
test_data = pd.read_csv(sys.argv[2], sep = ',', encoding = 'UTF-8')
test_data = test_data.as_matrix()
id_data = test_data[:,0]
test_num = len(id_data)
test = list()
for i in range(test_num):
temp = test_data[i][1].split()
test.append(temp)
test = np.array(test).astype(float).reshape(test_num,48,48,1)/255.0
result = model.predict(test)
result = result.argmax(axis=-1)
output=pd.DataFrame()
output['id'] = [str(i) for i in range(test_num)]
output['label'] = pd.Series(result, index = output.index)
output.to_csv('firstCNN.csv',index=False)
"""
|
16,697 | ab5823891ba450affe47b20081b6a8517f8a6124 | #!/usr/bin/python
# encoding:utf-8
"""
@author: yuanxin
contact:
@file: 2017/9/14-muti_proc_demo01.py
@time: 2017/9/14
"""
from multiprocessing import Process,current_process
import time
def worker():
while True:
print('this is worker NO. %s ' % current_process())
time.sleep(0.5)
if __name__=='__main__':
procs = [Process(target=worker) for i in range(3)]
for i in procs:
i.start()
for i in procs:
i.join() |
16,698 | 9e87e86783fe207b5c5c76b63797b5c12d3597e9 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import json
import os
import re
import sys
from pprint import pprint
import argparse
import deepdiff
def get_suspect_packages():
"""
Package names suspected of possible hijacking.
This list was obtained from:
https://medium.com/@azerbike/i-ve-just-liberated-my-modules-9045c06be67c#.7kh6ics0w
"""
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'gistfile1.txt'), 'r') as f:
return map(lambda line: line.strip(), f.readlines())
def parse_manifest(manifest_path):
"""
Parses a package.json manifest file.
"""
with open(manifest_path, 'r') as f:
data = f.read()
if data:
return json.loads(data)
else:
return {}
def walk_path(start_path, match_pattern=None, recursive=True):
"""
Recursively walk a directory, yielding everything path it visits.
"""
if match_pattern is None:
match_pattern = re.compile(r'.*')
if os.path.isdir(start_path):
for dir_entry in os.listdir(start_path):
file_path = os.path.join(start_path, dir_entry)
if os.path.isdir(file_path):
if recursive:
for path in walk_path(file_path, match_pattern, recursive):
yield path
else:
yield path
elif match_pattern.search(file_path):
yield file_path
else:
yield start_path
def build_tree(base_path, match_pattern, leaf_evaluator):
"""
Build a directory tree represented as a dict.
"""
if not os.path.exists(base_path):
raise ValueError('path does not exist: '.format(base_path))
tree = {}
start_segment = len(base_path.split(os.path.sep))
for path in walk_path(base_path, match_pattern):
segments = path.split(os.path.sep)[start_segment:]
segments_len = len(segments)
subtree = tree
for i, segment in enumerate(segments):
if segment not in subtree:
if i == segments_len - 1:
# File
subtree[segment] = leaf_evaluator(path)
else:
# Directory
subtree[segment] = {}
subtree = subtree[segment]
return tree
def create_npm_leaf_evaluator(manifest_pattern):
"""
Creates a leaf evaulator that parses package.json leaves and returns
the manifest contents.
"""
suspect_packages = set(get_suspect_packages())
def leaf_evaluator(path):
leaf = {}
if manifest_pattern.search(path):
manifest = parse_manifest(path)
if manifest.get('name') in suspect_packages:
leaf = manifest
return leaf
return leaf_evaluator
def build_npm_tree(path):
manifest_pattern = re.compile(os.path.sep + r'package\.json$')
return build_tree(path, manifest_pattern, create_npm_leaf_evaluator(manifest_pattern))
def main():
argp = argparse.ArgumentParser(description='Ensures "liberated" node_modules are not compromised')
argp.add_argument('baseline_path', help='Baseline (untainted) node_modules path')
argp.add_argument('tainted_path', help='Tainted node_modules path to compare against baseline')
args = argp.parse_args()
original_tree = build_npm_tree(args.baseline_path)
tainted_tree = build_npm_tree(args.tainted_path)
diffs = deepdiff.DeepDiff(original_tree, tainted_tree)
pprint(diffs, indent=2)
if __name__ == '__main__':
main()
|
16,699 | 914a29e10e5fda1c620707caf7c56c21d14070c2 | #!/usr/bin/env python
import blaze as bz
import gtabview
data = bz.Data('data_ohlcv.csv')
gtabview.view(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.