blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91b1c10e64a295b822581c8cf1127bbf61ba56b2 | 7cecdae92dc9b32df74d45a1bd529bc544283f94 | /tests/test_config_reader.py | 20ca54e3cbd8839410c09f8f079c3409405dcb82 | [
"MIT"
] | permissive | Josef-Friedrich/jflib | 6d680bebc414a7b56918fdb33694ce666e41d9af | 722f8cd9fa7008fc1db77d7e785f6bfd90b5d7f6 | refs/heads/main | 2023-05-28T23:32:33.381374 | 2023-05-23T21:30:22 | 2023-05-23T21:30:22 | 180,605,227 | 3 | 0 | MIT | 2023-05-23T21:30:23 | 2019-04-10T14:58:43 | Python | UTF-8 | Python | false | false | 16,179 | py | import argparse
import os
import tempfile
import unittest
from jflib import config_reader
from jflib.config_reader import (
ArgparseReader,
ConfigReader,
ConfigValueError,
DictionaryReader,
EnvironReader,
IniReader,
ReaderBase,
ReaderSelector,
load_readers_by_keyword,
validate_key,
)
FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
# [Classical]
# name = Mozart
# [Romantic]
# name = Schumann
INI_FILE = os.path.join(FILES_DIR, "config.ini")
os.environ["XXX__Classical__name"] = "Mozart"
os.environ["XXX__Baroque__name"] = "Bach"
parser = argparse.ArgumentParser()
parser.add_argument("--classical-name")
parser.add_argument("--baroque-name")
ARGPARSER_NAMESPACE = parser.parse_args(
["--baroque-name", "Bach", "--classical-name", "Mozart"]
)
class TestFunctionValidateKey(unittest.TestCase):
def test_valid(self):
self.assertTrue(validate_key("test"))
self.assertTrue(validate_key("test_1"))
self.assertTrue(validate_key("1"))
self.assertTrue(validate_key("a"))
self.assertTrue(validate_key("ABC_abc_123"))
def test_invalid(self):
with self.assertRaises(ValueError) as context:
validate_key("l o l")
self.assertEqual(
str(context.exception),
"The key “l o l” contains invalid characters " "(allowed: a-zA-Z0-9_).",
)
with self.assertRaises(ValueError) as context:
validate_key("ö")
# Reader classes ##############################################################
class FalseReader(ReaderBase):
def not_get(self):
return "It’s not get"
class TestClassReaderBase(unittest.TestCase):
def test_exception(self):
with self.assertRaises(TypeError):
FalseReader() # pylint: disable=abstract-class-instantiated
class TestClassArgparseReader(unittest.TestCase):
def test_method_get_without_mapping(self):
argparse = ArgparseReader(args=ARGPARSER_NAMESPACE)
self.assertEqual(argparse.get("Classical", "name"), "Mozart")
self.assertEqual(argparse.get("Baroque", "name"), "Bach")
def test_method_get_with_mapping(self):
argparse = ArgparseReader(
args=ARGPARSER_NAMESPACE,
mapping={
"Classical.name": "classical_name",
"Baroque.name": "baroque_name",
},
)
self.assertEqual(argparse.get("Classical", "name"), "Mozart")
self.assertEqual(argparse.get("Baroque", "name"), "Bach")
def test_exception(self):
argparse = ArgparseReader(
args=ARGPARSER_NAMESPACE,
mapping={
"Classical.name": "classical_name",
"Baroque.name": "baroque_name",
"Romantic.name": "romantic_name",
},
)
with self.assertRaises(ConfigValueError):
argparse.get("Romantic", "name")
with self.assertRaises(ConfigValueError):
argparse.get("Modern", "name")
class TestClassDictionaryReader(unittest.TestCase):
dictionary = {"Classical": {"name": "Mozart"}}
def test_method_get(self):
dictionary = DictionaryReader(dictionary=self.dictionary)
self.assertEqual(dictionary.get("Classical", "name"), "Mozart")
def test_exception(self):
dictionary = DictionaryReader(dictionary=self.dictionary)
with self.assertRaises(ConfigValueError):
dictionary.get("Romantic", "name")
class TestClassEnvironReader(unittest.TestCase):
def test_method_get(self):
os.environ["AAA__bridge__ip"] = "1.2.3.4"
os.environ["AAA__bridge__username"] = "test"
environ = EnvironReader(prefix="AAA")
self.assertEqual(environ.get("bridge", "ip"), "1.2.3.4")
self.assertEqual(environ.get("bridge", "username"), "test")
def test_exception(self):
environ = EnvironReader(prefix="AAA")
with self.assertRaises(ConfigValueError) as cm:
environ.get("lol", "lol")
self.assertEqual(
str(cm.exception),
"Environment variable not found: AAA__lol__lol",
)
class TestClassEnvironWithoutPrefix(unittest.TestCase):
def test_method_get(self):
os.environ["Avantgarde__name"] = "Stockhausen"
environ = EnvironReader()
self.assertEqual(environ.get("Avantgarde", "name"), "Stockhausen")
del os.environ["Avantgarde__name"]
def test_exception(self):
environ = EnvironReader()
with self.assertRaises(ConfigValueError) as cm:
environ.get("xxxAvantgarde", "xxxname")
self.assertEqual(
str(cm.exception),
"Environment variable not found: xxxAvantgarde__xxxname",
)
class TestClassIniReader(unittest.TestCase):
def test_method_get(self):
ini = IniReader(path=INI_FILE)
self.assertEqual(ini.get("Classical", "name"), "Mozart")
self.assertEqual(ini.get("Romantic", "name"), "Schumann")
def test_exception(self):
ini = IniReader(path=INI_FILE)
with self.assertRaises(ConfigValueError) as context:
ini.get("lol", "lol")
self.assertEqual(
str(context.exception),
"Configuration value could not be found (section “lol” key " "“lol”).",
)
def test_non_existent_ini_file(self):
tmp_path = tempfile.mkdtemp()
non_existent = os.path.join(tmp_path, "xxx")
with self.assertRaises(config_reader.IniReaderError):
IniReader(path=non_existent)
def test_none(self):
with self.assertRaises(config_reader.IniReaderError):
IniReader(path=None)
def test_false(self):
with self.assertRaises(config_reader.IniReaderError):
IniReader(path=False)
def test_emtpy_string(self):
with self.assertRaises(config_reader.IniReaderError):
IniReader(path="")
# Common code #################################################################
class TestClassReaderSelector(unittest.TestCase):
def test_ini_first(self):
reader = ReaderSelector(IniReader(INI_FILE), EnvironReader(prefix="XXX"))
self.assertEqual(reader.get("Classical", "name"), "Mozart")
def test_environ_first(self):
reader = ReaderSelector(EnvironReader("XXX"), IniReader(INI_FILE))
self.assertEqual(reader.get("Baroque", "name"), "Bach")
def test_exception(self):
reader = ReaderSelector(EnvironReader("XXX"), IniReader(INI_FILE))
with self.assertRaises(ValueError) as context:
reader.get("lol", "lol")
self.assertEqual(
str(context.exception),
"Configuration value could not be found (section “lol” key " "“lol”).",
)
class TestFunctionLoadReadersByKeyword(unittest.TestCase):
def test_without_keywords_arguments(self):
with self.assertRaises(TypeError):
load_readers_by_keyword(INI_FILE, "XXX") # pylint: disable=E1121
def test_order_ini_environ(self):
readers = load_readers_by_keyword(ini=INI_FILE, environ="XXX")
self.assertEqual(readers[0].__class__.__name__, "IniReader")
self.assertEqual(readers[1].__class__.__name__, "EnvironReader")
def test_order_environ_ini(self):
readers = load_readers_by_keyword(
environ="XXX",
ini=INI_FILE,
)
self.assertEqual(readers[0].__class__.__name__, "EnvironReader")
self.assertEqual(readers[1].__class__.__name__, "IniReader")
def test_argparse_single_arguemnt(self):
readers = load_readers_by_keyword(argparse=ARGPARSER_NAMESPACE)
self.assertEqual(readers[0].__class__.__name__, "ArgparseReader")
# Integration tests ###########################################################
class TestClassConfigReader(unittest.TestCase):
def setUp(self):
# argparser
parser = argparse.ArgumentParser()
parser.add_argument("--common-key")
parser.add_argument("--specific-argparse")
args = parser.parse_args(
["--common-key", "argparse", "--specific-argparse", "argparse"]
)
self.argparse = (
args,
{"common.key": "common_key", "specific.argparse": "specific_argparse"},
)
# dictionary
self.dictionary = {
"common": {"key": "dictionary"},
"specific": {"dictionary": "dictionary"},
}
# environ
self.environ = "YYY"
os.environ["YYY__common__key"] = "environ"
os.environ["YYY__specific__environ"] = "environ"
# ini
self.ini = os.path.join(FILES_DIR, "integration.ini")
def tearDown(self):
del os.environ["YYY__common__key"]
del os.environ["YYY__specific__environ"]
def test_argparse_first(self):
config_reader = ConfigReader(
argparse=self.argparse,
dictionary=self.dictionary,
environ=self.environ,
ini=self.ini,
)
config = config_reader.get_class_interface()
self.assertEqual(config.common.key, "argparse")
def test_argparse_empty(self):
parser = argparse.ArgumentParser()
parser.add_argument("--empty-key")
args = parser.parse_args([])
config_reader = ConfigReader(
argparse=(args, {}),
dictionary={"empty": {"key": "from_dict"}},
)
config = config_reader.get_class_interface()
self.assertEqual(config.empty.key, "from_dict")
def test_dictionary_first(self):
config_reader = ConfigReader(
dictionary=self.dictionary,
argparse=self.argparse,
environ=self.environ,
ini=self.ini,
)
config = config_reader.get_class_interface()
self.assertEqual(config.common.key, "dictionary")
def test_environ_first(self):
config_reader = ConfigReader(
environ=self.environ,
argparse=self.argparse,
dictionary=self.dictionary,
ini=self.ini,
)
config = config_reader.get_class_interface()
self.assertEqual(config.common.key, "environ")
def test_ini_first(self):
config_reader = ConfigReader(
ini=self.ini,
argparse=self.argparse,
dictionary=self.dictionary,
environ=self.environ,
)
config = config_reader.get_class_interface()
self.assertEqual(config.common.key, "ini")
def test_specifiy_values(self):
config_reader = ConfigReader(
argparse=self.argparse,
dictionary=self.dictionary,
environ=self.environ,
ini=self.ini,
)
config = config_reader.get_class_interface()
self.assertEqual(config.specific.argparse, "argparse")
self.assertEqual(config.specific.dictionary, "dictionary")
self.assertEqual(config.specific.environ, "environ")
self.assertEqual(config.specific.ini, "ini")
def test_method_get_class_interface(self):
config_reader = ConfigReader(
argparse=self.argparse,
dictionary=self.dictionary,
environ=self.environ,
ini=self.ini,
)
config = config_reader.get_class_interface()
self.assertEqual(config.specific.argparse, "argparse")
self.assertEqual(config.specific.dictionary, "dictionary")
self.assertEqual(config.specific.environ, "environ")
self.assertEqual(config.specific.ini, "ini")
def test_method_get_dictionary_interface(self):
config_reader = ConfigReader(
argparse=self.argparse,
dictionary=self.dictionary,
environ=self.environ,
ini=self.ini,
)
config = config_reader.get_dictionary_interface()
self.assertEqual(config["specific"]["argparse"], "argparse")
self.assertEqual(config["specific"]["dictionary"], "dictionary")
self.assertEqual(config["specific"]["environ"], "environ")
self.assertEqual(config["specific"]["ini"], "ini")
def test_method_check_section(self):
dictionary = {
"missing_key": {"key": "value"},
"all_good": {"key": "value"},
"empty": {"key": ""},
}
spec = {
"missing_key": { # section
"key": { # key
"not_empty": True,
},
"not_configured_key": { # key
"not_empty": False,
},
},
"all_good": { # section
"key": { # key
"not_empty": True,
}
},
"empty": { # section
"key": { # key
"not_empty": True,
}
},
}
config_reader = ConfigReader(
spec=spec,
dictionary=dictionary,
)
self.assertTrue(config_reader.check_section("all_good"))
with self.assertRaises(ValueError):
config_reader.check_section("missing_key")
with self.assertRaises(KeyError):
config_reader.check_section("xxx")
with self.assertRaises(ValueError):
config_reader.check_section("empty")
def test_spec_defaults(self):
dictionary = {
"no_default": {
"key": "No default value",
},
}
spec = {
"default": {
"key": {
"description": "A default value",
"default": 123,
},
},
"no_default": {
"key": {
"description": "No default value",
},
},
}
config_reader = ConfigReader(
spec=spec,
dictionary=dictionary,
)
config = config_reader.get_class_interface()
self.assertEqual(config.no_default.key, "No default value")
self.assertEqual(config.default.key, 123)
def test_method_spec_to_argparse(self):
spec = {
"email": {
"smtp_login": {
"description": "The SMTP login name",
"default": "user1",
},
},
}
config_reader = ConfigReader(spec=spec)
parser = argparse.ArgumentParser()
config_reader.spec_to_argparse(parser)
args = parser.parse_args([])
self.assertEqual(args.email_smtp_login, "user1")
args = parser.parse_args(["--email-smtp-login", "user2"])
self.assertEqual(args.email_smtp_login, "user2")
class TestTypes(unittest.TestCase):
def setUp(self):
config_reader = ConfigReader(ini=os.path.join(FILES_DIR, "types.ini"))
self.config = config_reader.get_class_interface()
def test_int(self):
self.assertEqual(self.config.types.int, 1)
def test_float(self):
self.assertEqual(self.config.types.float, 1.1)
def test_str(self):
self.assertEqual(self.config.types.str, "Some text")
def test_list(self):
self.assertEqual(self.config.types.list, [1, 2, 3])
def test_tuple(self):
self.assertEqual(self.config.types.tuple, (1, 2, 3))
def test_dict(self):
self.assertEqual(self.config.types.dict, {"one": 1, "two": 2})
def test_code(self):
self.assertEqual(self.config.types.code, "print('lol')")
def test_invalid_code(self):
self.assertEqual(self.config.types.invalid_code, "print('lol)'")
def test_bool(self):
self.assertEqual(self.config.types.bool, True)
def test_empty_string(self):
self.assertEqual(self.config.types.empty_str, "")
def test_none(self):
self.assertEqual(self.config.types.none, None)
def test_zero(self):
self.assertEqual(self.config.types.zero, 0)
def test_false(self):
self.assertEqual(self.config.types.false, False)
def test_false_str(self):
self.assertEqual(self.config.types.false_str, "false")
| [
"josef@friedrich.rocks"
] | josef@friedrich.rocks |
be9f7d8b3ac111643c48d86d2142203de3228393 | 7b383cab8f9708dd9bc00c939cbab9600c0ca894 | /UP2/NCS1/USBCam/Classes/NCS1.py | 87c91137d99cfd455a3f827b54d10b7e35d6f62c | [
"MIT"
] | permissive | amirunpri2018/TassAI | 3f05b1b848d6d93fe491761589352bc0521496c3 | 3451f34d8973b67a823784e7db8fde03b274a60d | refs/heads/master | 2022-12-22T06:48:46.024068 | 2020-10-01T15:43:25 | 2020-10-01T15:43:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,806 | py | ######################################################################################################
#
# Organization: Asociacion De Investigacion En Inteligencia Artificial Para La Leucemia Peter Moss
# Project: UP2 NCS1 Facial Recognition USB Security System
#
# Author: Adam Milton-Barker (AdamMiltonBarker.com)
#
# Title: NCS1 Class
# Description: NCS1 helper functions.
# License: MIT License
# Last Modified: 2020-09-28
#
######################################################################################################
import os, json, cv2, dlib, imutils
import numpy as np
from datetime import datetime
from imutils import face_utils
from mvnc import mvncapi as mvnc
from Classes.Helpers import Helpers
class NCS1():
""" NCS1 Class
NCS1 helper functions.
"""
def __init__(self):
""" Initializes the class. """
self.Known = []
self.Helpers = Helpers("NCS1")
self.Detector = dlib.get_frontal_face_detector()
self.Predictor = dlib.shape_predictor(
self.Helpers.confs["Classifier"]["Dlib"])
self.check()
self.load()
self.preprocess()
self.Helpers.logger.info("NCS1 class initialized.")
def check(self):
""" Checks for NCS1 device. """
#mvnc.SetGlobalOption(mvnc.GlobalOption.LOGLEVEL, 2)
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
self.Helpers.logger.info(
"No Neural Compute Stick 1 devices, exiting")
quit()
self.ncs1 = mvnc.Device(devices[0])
self.ncs1.OpenDevice()
self.Helpers.logger.info("Connected to Neural Compute Stick 1")
def load(self):
""" Loads NCS1 graph. """
with open(self.Helpers.confs["Classifier"]["Graph"], mode='rb') as f:
graphFile = f.read()
self.Helpers.logger.info("Loaded NCS1 graph")
self.graph = self.ncs1.AllocateGraph(graphFile)
def preprocess(self):
""" Encodes the known users images. """
self.encoded = []
# Loops through all images in the security folder
for filename in os.listdir(self.Helpers.confs["Classifier"]["Known"]):
# Checks file type
if filename.lower().endswith(tuple(self.Helpers.confs["Classifier"]["Allowed"])):
fpath = os.path.join(
self.Helpers.confs["Classifier"]["Known"], filename)
# Gets user id from filename
user = os.path.splitext(filename)[0]
# Reads the image
raw, frame = self.prepareImg(cv2.imread(fpath))
# Saves the user id and encoded image to a list
self.encoded.append((user, self.infer(frame)))
self.Helpers.logger.info("Known data preprocessed!")
def faces(self, image):
""" Finds faces and their coordinates in an image. """
# Find faces
faces = self.Detector(image, 0)
# Gets coordinates for faces
coords = [self.Predictor(image, face) for face in faces]
return faces, coords
def prepareImg(self, frame):
""" Reads & processes frame from the local TassAI. """
# Resizes the frame
frame = cv2.resize(frame, (640, 480))
# Makes a copy of the frame
raw = frame.copy()
return raw, frame
def processImg(self, img):
""" Preprocesses an image for inference. """
dims = 160
resized = cv2.resize(img, (dims, dims))
processed = self.whiten(resized)
return processed
def whiten(self, grayscaled):
""" Creates a whitened image. """
mean = np.mean(grayscaled)
std_dev = np.std(grayscaled)
std_adjusted = np.maximum(std_dev, 1.0 / np.sqrt(grayscaled.size))
whitened_image = np.multiply(np.subtract(grayscaled, mean), 1 / std_adjusted)
return whitened_image
def infer(self, img):
""" Runs the image through NCS1. """
self.graph.LoadTensor(self.processImg(img).astype(np.float16), None)
output, userobj = self.graph.GetResult()
return output
def match(self, frame, coords):
""" Checks faces for matches against known users. """
msg = ""
person = 0
confidence = 0
# Loops through known encodings
for enc in self.encoded:
# Encode current frame
encoded = self.infer(frame)
# Calculate if difference is less than or equal to
recognize = self.compare(enc[1], encoded)
# If known
if recognize[0] == True:
person = int(enc[0])
confidence = recognize[1]
msg = "TassAI identified User #" + str(person)
break
if(person == 0):
msg = "TassAI identified an intruder"
self.Helpers.logger.info(msg)
return person, confidence
def compare(self, face1, face2):
""" Determines whether two images are a match. """
if (len(face1) != len(face2)):
self.Helpers.logger.info("Distance Missmatch")
return False
tdiff = 0
for index in range(0, len(face1)):
diff = np.square(face1[index] - face2[index])
tdiff += diff
if (tdiff < 1.3):
self.Helpers.logger.info("Calculated Match: " + str(tdiff))
return True, tdiff
else:
self.Helpers.logger.info("Calculated Mismatch: " + str(tdiff))
return False, tdiff
| [
"adammiltonbarker@eu.techbubbletechnologies.com"
] | adammiltonbarker@eu.techbubbletechnologies.com |
7c95681e4f7516d837f24831474d047624855e90 | 7cc312f65eb2d6af491e8b13ef57ff3ba2babf7f | /leetcode/python/palindrome-number.py | b8299c0c1fb21889b6b1e45498653348e650f966 | [] | no_license | tangwz/leetcode | df1c978999f4ed76254eb82be08785957052dca9 | 38e860a2fd103958d12a64a87e32f8c9c20d0a59 | refs/heads/master | 2021-01-22T13:13:04.181337 | 2019-11-26T08:00:39 | 2019-11-26T08:00:39 | 30,180,759 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # I think this problem is meanless
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
if x == 0:
return True
tmp = x
y = 0
while x:
y = y*10 + x%10
x = x/10
if y == tmp:
return True
else:
return False
| [
"tangwz.com@gmail.com"
] | tangwz.com@gmail.com |
739e7233ae6e85a915862e2c952ccf33b1711970 | f776848c59ddfeb045b44b18901df81e75fd11a2 | /mysite/settings.py | 8371ef36b06bdce0a716277040fc96a54222bf81 | [] | no_license | bianak/my-first-blog | 44fc2b84802f83997ebe4fd55a49278c1aebc9c5 | f9c0f9a086a44f4d3a8bab16b7e5d5e036528c45 | refs/heads/master | 2020-12-31T04:56:12.538888 | 2016-05-01T12:21:09 | 2016-05-01T12:21:09 | 57,597,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n3s7o5^ymtnf27$3-4&zimb%a^@n^-*#m_6^av&mva^b%)%9m8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tel_Aviv'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"bianak@gmail.com"
] | bianak@gmail.com |
1496cc8b9437c5d0ef5c0a07d256fbdbce3fd1b5 | c78ea793abf6c910d92dde56a48d0c880e5ba174 | /python/unionfind2.py | 288cc110cf2387a0e3a5f3bff5a05ba121f96c9a | [] | no_license | bdomokos74/Snippets | 9d10c29f2a99396ecf988cf79cd1d87303221596 | 3c2ce5e8f13802a768738830ca28884ba1975e0c | refs/heads/master | 2021-01-20T07:50:59.805027 | 2013-11-05T22:01:53 | 2013-11-05T22:01:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,875 | py | '''
unionfind.py
A class that implements the Union Find data structure and algorithm. This
data structure allows one to find out which set an object belongs to, as well
as join two sets.
The algorithm's performance, given m union/find operations of any ordering, on
n elements has been shown to take log* time per operation, where log* is
pronounced log-star, and is the INVERSE of what is known as the Ackerman
function, which is given below:
A(0) = 1
A(n) = 2**A(n-1)
I include the functions to be complete. Note that we can be 'inefficient'
when performing the inverse ackerman function, as it will only take a maximum
of 6 iterations to perform; A(5) is 65536 binary digits long (a 1 with 65535
zeroes following). A(6) is 2**65536 binary digits long, and cannot be
represented by the memory of the entire universe.
The Union Find data structure is not a universal set implementation, but can
tell you if two objects are in the same set, in different sets, or you can
combine two sets.
ufset.find(obja) == ufset.find(objb)
ufset.find(obja) != ufset.find(objb)
ufset.union(obja, objb)
This algorithm and data structure are primarily used for Kruskal's Minimum
Spanning Tree algorithm for graphs, but other uses have been found.
August 12, 2003 Josiah Carlson
'''
def Ackerman(inp, memo={0:1}):
inp = max(int(inp), 0)
if inp in memo:
return memo[inp]
elif inp <= 5:
memo[inp] = 2**ackerman(inp-1)
return memo[inp]
else:
print "Such a number is not representable by all the subatomic\nparticles in the universe."
ackerman(4);
out = (inp-4)*"2**" + str(memo[4])
print out
raise Exception, "NumberCannotBeRepresentedByAllSubatomicParticlesInUniverse"
def inverseAckerman(inp):
t = 0
while Ackerman(t) < inp:
t += 1
return t
class UnionFind:
def __init__(self, n):
'''\
Create an empty union find data structure.'''
self.num_weights = {}
self.parent_pointers = {}
self.num_to_objects = {}
self.objects_to_num = {}
self.__repr__ = self.__str__
self.insert_objects([i for i in range(n)])
self.cnt = n
def insert_objects(self, objects):
'''\
Insert a sequence of objects into the structure. All must be Python hashable.'''
for object in objects:
self.find(object);
def find(self, object):
'''\
Find the root of the set that an object is in.
If the object was not known, will make it known, and it becomes its own set.
Object must be Python hashable.'''
if not object in self.objects_to_num:
obj_num = len(self.objects_to_num)
self.num_weights[obj_num] = 1
self.objects_to_num[object] = obj_num
self.num_to_objects[obj_num] = object
self.parent_pointers[obj_num] = obj_num
return object
stk = [self.objects_to_num[object]]
par = self.parent_pointers[stk[-1]]
while par != stk[-1]:
stk.append(par)
par = self.parent_pointers[par]
for i in stk:
self.parent_pointers[i] = par
return self.num_to_objects[par]
def union(self, object1, object2):
'''\
Combine the sets that contain the two objects given.
Both objects must be Python hashable.
If either or both objects are unknown, will make them known, and combine them.'''
o1p = self.find(object1)
o2p = self.find(object2)
if o1p != o2p:
on1 = self.objects_to_num[o1p]
on2 = self.objects_to_num[o2p]
w1 = self.num_weights[on1]
w2 = self.num_weights[on2]
if w1 < w2:
o1p, o2p, on1, on2, w1, w2 = o2p, o1p, on2, on1, w2, w1
self.num_weights[on1] = w1+w2
del self.num_weights[on2]
self.parent_pointers[on2] = on1
self.cnt -= 1
def __str__(self):
'''\
Included for testing purposes only.
All information needed from the union find data structure can be attained
using find.'''
sets = {}
for i in xrange(len(self.objects_to_num)):
sets[i] = []
for i in self.objects_to_num:
sets[self.objects_to_num[self.find(i)]].append(i)
out = []
for i in sets.itervalues():
if i:
out.append(repr(i))
return ', '.join(out)
def count(self):
return (self.cnt)
def connected(self, a, b):
return(self.find(a)==self.find(b))
if __name__ == '__main__':
print "Testing..."
uf = UnionFind()
az = "abcdefghijklmnopqrstuvwxyz"
az += az.upper()
uf.insert_objects(az)
import random
cnt = 0
while len(uf.num_weights) > 20:
cnt += 1
uf.union(random.choice(az), random.choice(az))
print uf, cnt
print "Testing complete."
| [
"bdomokos@Domokoss-MacBook-Pro.local"
] | bdomokos@Domokoss-MacBook-Pro.local |
fd3ad031be0d00161054e0f826fde716d99ac206 | 52c4ebb5bfa5239093b0199e23b576aa2e54807a | /ecommerce/products/admin.py | a4845c5f3795439bab70b96415e7b13b60cf2e06 | [] | no_license | KUSH23/ecommerce | f216556a82da66df31a0ae8c8ccb2a5692d5efa6 | 7fe9249be88aca72894686bee5c9cd1e74226eba | refs/heads/master | 2020-07-07T16:57:08.641542 | 2019-08-20T16:27:48 | 2019-08-20T16:27:48 | 203,413,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | from django.contrib import admin
from .models import Product
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ['__str__', 'slug']
class Meta:
model = Product
admin.site.register(Product, ProductAdmin) | [
"kushalsai2007@gmail.com"
] | kushalsai2007@gmail.com |
2de76a36c9302294182913712a4cbdbd7f90c964 | 9e1f60a867f66b1f4e4fc84fa4252c581e5e1a36 | /Chapter09/test_state_1.py | e33eaec766eaab31fbfd5891365def90b52cc707 | [
"MIT"
] | permissive | PacktPublishing/Clean-Code-in-Python | c216e002485b8cd7736f97b59215a3930f35359a | 7348d0f9f42871f499b352e0696e0cef51c4f8c6 | refs/heads/master | 2023-06-10T13:40:33.331115 | 2023-05-30T17:48:09 | 2023-05-30T17:48:09 | 145,072,942 | 523 | 181 | MIT | 2023-05-30T17:48:10 | 2018-08-17T04:48:38 | Python | UTF-8 | Python | false | false | 1,489 | py | """Clean Code in Python - Chapter 9: Common Design Patterns
> Test State
"""
import unittest
from state_1 import Closed, InvalidTransitionError, Merged, MergeRequest, Open
class TestMergeRequestTransitions(unittest.TestCase):
def setUp(self):
self.mr = MergeRequest("develop", "master")
def test_reopen(self):
self.mr.approvals = 3
self.mr.open()
self.assertEqual(self.mr.approvals, 0)
def test_open_to_closed(self):
self.mr.approvals = 2
self.assertIsInstance(self.mr.state, Open)
self.mr.close()
self.assertEqual(self.mr.approvals, 0)
self.assertIsInstance(self.mr.state, Closed)
def test_closed_to_open(self):
self.mr.close()
self.assertIsInstance(self.mr.state, Closed)
self.mr.open()
self.assertIsInstance(self.mr.state, Open)
def test_double_close(self):
self.mr.close()
self.mr.close()
def test_open_to_merge(self):
self.mr.merge()
self.assertIsInstance(self.mr.state, Merged)
def test_merge_is_final(self):
self.mr.merge()
regex = "already merged request"
self.assertRaisesRegex(InvalidTransitionError, regex, self.mr.open)
self.assertRaisesRegex(InvalidTransitionError, regex, self.mr.close)
def test_cannot_merge_closed(self):
self.mr.close()
self.assertRaises(InvalidTransitionError, self.mr.merge)
if __name__ == "__main__":
unittest.main()
| [
"35489117+gaurav-packt@users.noreply.github.com"
] | 35489117+gaurav-packt@users.noreply.github.com |
0f964bce45a9a34367e7441978d9d150039184a5 | 9ae33e19ec06a95b537a1e71973f52463c654628 | /vampire-get.py | 478154238b1fcb4fa7daa4c97d4ba0965247587c | [
"BSD-2-Clause"
] | permissive | VerosK/samba-vampire-drivers | e38913aa2fb8aaa7f83219c8af22fe86610386bb | 924c735b247e216967db4834f16aa200e3c631a7 | refs/heads/master | 2020-05-02T13:01:50.634299 | 2013-09-15T19:31:54 | 2013-09-15T19:31:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,122 | py | #!/usr/bin/env python
import pexpect
from ConfigParser import ConfigParser
from optparse import OptionParser
import os
import re
import json
from zipfile import ZipFile
import StringIO
from pprint import pprint
import tempfile
import logging
reDRIVER_IGNORE = re.compile(r'Server does not support environment .*$')
reDRIVER_ARCH = re.compile(r'\[(?P<arch>Windows .*)\]$')
reDRIVER_START = re.compile(r'Printer Driver Info 1:$')
reDRIVER_NAME = re.compile(r'Driver Name: \[(?P<name>[-_A-Za-z0-9 ]+)\]$')
reDRIVER_FILE_START3 = re.compile(r'Printer Driver Info 3:$')
reDRIVER_FILE_VERSION = re.compile(r'Version: \[3\]$')
reDRIVER_FILE_ITEM = re.compile(r'(?P<key>.+): \[(?P<value>.+)\]$')
reDRIVER_PATH = re.compile(r'\\\\(?P<name>[^\\]+)\\(?P<share>print\$)\\'\
'(?P<path>.*)$')
DRIVER_KEYS = ['Version','Architecture','Driver Path', 'Datafile',
'Configfile', 'Helpfile', 'Driver Name', 'Monitorname',
'Defaultdatatype'
]
DRIVER_EXTRA = ['Dependentfiles']
DRIVER_FILES = ['Driver Path', 'Configfile', 'Helpfile', 'Datafile',
'Dependentfiles']
class SrcDriver(object):
'''
Printer driver wrapper
'''
def __init__(self, host, printer, driverName):
self._host = host
self.printer = printer
self.driverName = driverName
self._driverInfo = None
self._driverArchive = None
def __repr__(self):
return "<SrcDriver '%s' id=0x%x>" % (self.driverName, id(self))
@property
def driverInfo(self):
if self._driverInfo is not None:
return self._driverInfo
self._driverInfo = self._loadDriverInfo()
return self._driverInfo
@property
def driverArchive(self):
if self._driverArchive is not None:
return self._driverArchive
self._driverArchive = self._loadDriverArchive()
return self._driverArchive
@property
def archiveName(self):
retv = "%s---%s.zip" % \
(self.driverInfo['Driver Name'],
self.driverInfo['Architecture'])
return retv.replace(' ','_')
def saveArchive(self, filename=None, directory=None):
if directory is None:
directory = os.getcwd()
if filename is None:
filename = self.archiveName
target = os.path.join(directory, filename)
open(target, 'wb').write(self.driverArchive)
@property
def printerName(self):
return self.printer.name
def _loadDriverInfo(self):
cmd = self._host._prepareCommandList()
cmd.append('''-c 'getdriver "%s" ' ''' % self.printerName)
command = ' '.join(cmd)
logging.info('Loading driver info for "%s"' % self.printerName)
logging.debug('Run: %s' % command)
output = pexpect.run(command)
#
driverInfo = {}
logging.info('Parsing driver info')
for ln in output.split('\n'):
if not ln.strip(): continue
ln = ln.strip()
if reDRIVER_FILE_START3.match(ln): continue #
if reDRIVER_ARCH.match(ln): continue #
if reDRIVER_FILE_ITEM.match(ln):
m = reDRIVER_FILE_ITEM.match(ln)
k,v = m.group('key'), m.group('value')
if k in DRIVER_KEYS:
assert k not in driverInfo, k
driverInfo[k] = v
elif k in DRIVER_EXTRA:
if k not in driverInfo:
driverInfo[k] = [v]
else:
driverInfo[k].append(v)
else:
assert k in DRIVER_KEYS or \
k in DRIVER_EXTRA, k
continue
assert reDRIVER_IGNORE.match(ln), ln
return driverInfo
def _loadDriverArchive(self):
def _filePath(unc_path):
m = reDRIVER_PATH.match(unc_path)
assert m, unc_path
return m.group('path')
def _niceName(fname):
return fname.lower().replace('\\', '/')
logging.info('Getting driver files for "%s"' % self.driverName)
files = {}
for k,v in self.driverInfo.items():
if k not in DRIVER_FILES:
continue
if type(v) == type(''):
v = [v]
for fname in v:
fname = _filePath(fname)
logging.debug('Downloading file "%s"' % fname)
filedata = self._host._downloadFile(fname)
files[fname] = filedata
# prepare ZIP
logging.debug('Creating driver archive file')
archive_file = StringIO.StringIO()
archive = ZipFile(archive_file, 'a')
for fname in files:
target_name = _niceName(fname)
archive.writestr(target_name, files[fname])
# append JSON
logging.debug('Creating driver JSON file')
driver_info = {}
for k,v in self._driverInfo.items():
if k not in DRIVER_FILES:
driver_info[k] = v
continue
if type(v) == type(''):
driver_info[k] = _niceName(_filePath(v))
else:
driver_info[k] = \
[_niceName(_filePath(l)) for l in v]
json_info = json.dumps(driver_info, indent=4)
archive.writestr('driverinfo.json', json_info)
archive.close()
logging.debug('Archive file created')
#
return archive_file.getvalue()
class SrcPrinter(object):
'''
Printer wrapper.
'''
def __init__(self, path, name, driverName, comment, host):
self.path = path
self.name = name
self.driverName = driverName
self.comment = comment
self._driver = None
self.host = host
@staticmethod
def fromDict(a_dict, host):
assert 'name' in a_dict, a_dict
assert 'description' in a_dict, a_dict
assert host is not None
name = a_dict['name'].split('\\')[-1]
driverName = a_dict['description'].split(',')[1]
return SrcPrinter(
path=a_dict['name'],
name=name,
driverName = driverName,
comment = a_dict['comment'],
host=host)
def __repr__(self):
return "<SrcPrinter '%s' [%s] host=%s id=0x%x>" % (self.name, self.driverName,
self.host.name, id(self))
@property
def driver(self):
if self._driver is not None:
return self._driver
self._driver = SrcDriver(host=self.host, printer=self, driverName=self.driverName)
return self._driver
class SrcHost(object):
def __init__(self, host, options):
self.host = host
self.options = options
self._printers = None
self._drivers = None
@property
def name(self): return self.host
@property
def printers(self):
if self._printers is None:
self._printers = self._loadPrinterList()
return self._printers
def drivers(self):
if self._drivers is None:
self._drivers = self._loadDriverList()
return self._drivers
def _loadPrinterList(self):
logging.info('Enumerating printers')
cmd = self._prepareCommandList()
cmd.append('-c "enumprinters"')
command = ' '.join(cmd)
logging.debug(command)
output = pexpect.run(command)
#
values = {}
printers = []
logging.debug('Parsing response')
for ln in output.split('\n'):
if not ln.strip(): continue
parts = ln.strip().split(':',1)
key = parts[0]
assert parts[1][0]=='[',`parts[1]`
assert parts[1][-1]==']',`parts[1]`
value = parts[1][1:-1]
if key == 'flags':
assert len(values) == 0, values
values[key] = value
if key == 'comment':
a_printer = SrcPrinter.fromDict(values,
host=self)
printers.append(a_printer)
values = {}
logging.debug('Printer list created')
return printers
def _loadDriverList(self):
logging.info('Enumerating drivers')
cmd = self._prepareCommandList()
cmd.append('-c "enumdrivers"')
command = ' '.join(cmd)
logging.debug(command)
output = pexpect.run(command)
#
logging.debug('Parsing response')
drivers = {}
for ln in output.split('\n'):
if not ln.strip(): continue
ln = ln.strip()
if reDRIVER_IGNORE.match(ln): continue
if reDRIVER_START.match(ln): continue
if reDRIVER_ARCH.match(ln): continue #
if reDRIVER_NAME.match(ln):
m = reDRIVER_NAME.match(ln)
name = m.group('name')
if name in drivers: continue
a_driver = SrcDriver(host=self, name=name)
drivers[name] = a_driver
continue
assert reDRIVER_IGNORE.match(ln), ln
return drivers.values()
def _downloadFile(self, file_name):
#
cmd = ['smbclient', '//%s/print$' % self.host]
if self.options.source_address:
cmd.append('-I %s' % self.options.source_address)
if self.options.source_user and self.options.source_password:
cmd.append('-U "%s"%%"%s"' %
(self.options.source_user,
self.options.source_password))
_,output_name = tempfile.mkstemp()
cmd.append("-E -c 'get \"%s\" \"%s\"" % (file_name, output_name))
#
command = ' '.join(cmd)
pexpect.run(command)
output = open(output_name, 'rb').read()
os.unlink(output_name)
return output
def _prepareCommandList(self):
cmd = ['rpcclient', self.host]
if self.options.source_address:
cmd.append('-I %s' % self.options.source_address)
if self.options.source_user and self.options.source_password:
cmd.append('-U "%s"%%"%s"' %
(self.options.source_user,
self.options.source_password))
return cmd
@staticmethod
def fromOptions(options):
host = options.source_hostname
return SrcHost(host=options.source_hostname, options=options)
def __repr__(self):
return "<SrcHost '%s' id=0x%x>" % (self.host, id(self))
def parseArguments():
config = ConfigParser()
config.read(['vampire.ini'])
parser = OptionParser()
parser.add_option('-s', '--host', '--source-host', dest='source_hostname',
metavar='HOSTNAME',
help='Host to copy drivers from')
parser.add_option('-a', '--address', '--source-address', dest='source_address',
metavar='IP_ADDRESS',
help='IP address of source host')
parser.add_option('-u', '--user', '--source-user', dest='source_user',
metavar='USERNAME',
help='User for source host')
parser.add_option('-p', '--password', '--source-password', dest='source_password',
metavar='PASSWORD',
help='Password for source host')
parser.add_option('-v', '--verbose', action='count', dest='verbosity',
metavar='LOG_LEVEL',
help='Increase verbosity (multiple times allowed)')
# set defaults
parser.set_defaults(**dict(config.items('config')))
# parse
opts,args = parser.parse_args()
# set verbosity level
log_level = logging.WARNING
if opts.verbosity == 1:
log_level = logging.INFO
elif opts.verbosity >= 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
return opts, args
def main():
options,args = parseArguments()
src = SrcHost.fromOptions(options)
printers = src.printers
pprint(printers)
for pr in printers:
print 'Loading driver for %s' % pr.name
dr = pr.driver
print 'Loading driver files %s' % dr.driverName
dr.saveArchive()
if __name__ == '__main__':
main()
| [
"kaplan.v@czechglobe.cz"
] | kaplan.v@czechglobe.cz |
63fb876f6910592a3479aeab743cdf4899f4e6bc | d4dda5ef5c808afff4b91d1fb2b7a4c2c13c68cf | /sampling.py | efbe8b4d5b5b7120b24d0ac749c6faf363488edc | [] | no_license | Sandeep-AnilKumar/Kaggle-TwoSigma-Rental-Interest | 7474f409db234add035ef9b7b8d0eb449dba903e | f667cfbd1e9fed843c7012b95f08f6a1e6d84876 | refs/heads/master | 2021-01-19T10:15:23.050156 | 2017-04-19T17:47:54 | 2017-04-19T17:47:54 | 87,845,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,275 | py | import pandas as pd
import time
import csv
from sklearn.ensemble import RandomForestClassifier as rfc
import numpy as np
from nltk.stem import PorterStemmer
import re
import xgboost as xgb
np.seterr(divide='ignore', invalid='ignore')
stemmer = PorterStemmer()
features = ['garag', 'cinema', 'hardwood', 'air condit', 'modern', 'share', 'dog', 'landlord', 'unassign', '24 hour',
'fireplac', 'onsit', 'cat', 'storag', 'spaciou', 'huge', 'privat', 'fee', 'yoga', 'fit', 'pet', 'open',
'dishwash', 'walk', 'attach', 'state', 'includ', 'inunit', 'new', 'washer', 'gym', 'deposit', 'AC', 'queen',
'private terrac', 'twentyfour', 'attend', 'underpr', 'brand', 'upon', 'valet', 'free', 'courtyard', 'dryer',
'laundri', 'ok', 'granit', 'overs', 'pool', 'avail', 'convert', 'onli', 'central', 'gift', 'wifi', 'renov',
'air', 'walkin', 'duplex', 'common', 'elev', 'unknown']
start_time = time.time()
train_df_total = pd.read_json('train.json')
for feature in features:
train_df_total[feature] = 0
def add_features(fe):
found = False
f_list = train_df_total.loc[fe, "features"]
if f_list:
for f in f_list:
#print f
#f = f.encode('utf-8').strip()
for f_word in f.split(" "):
#print f_word
f_word = f_word.lower().strip()
f_word = f_word.replace("\"", "")
f_word = f_word.replace("'", "")
f_word = re.sub(r'(#\*!,\.\-)*', "", f_word)
for fea in features:
if stemmer.stem(f_word) in fea:
#print "found " + str(f_word) + " in " + str(fea)
train_df_total.loc[fe, fea] = 1
found = True
break
if found:
break
return
def add_test_features(fe):
found = False
f_list = test_df.loc[fe, "features"]
if f_list:
for f in f_list:
#print f
#f = f.encode('utf-8').strip()
for f_word in f.split(" "):
#print f_word
f_word = f_word.lower().strip()
f_word = f_word.replace("\"", "")
f_word = f_word.replace("'", "")
f_word = re.sub(r'(#\*!,\.\-)*', "", f_word)
for fea in features:
if stemmer.stem(f_word) in fea:
#print "found " + str(f_word) + " in " + str(fea)
test_df.loc[fe, fea] = 1
found = True
break
if found:
break
return
fea_list = train_df_total.index.values.tolist()
#print train_df_total.loc[10000, "features"]
for fea_l in fea_list:
#print fea_l
add_features(fea_l)
#print train_df_total.loc[10000, ["elev", "dog", "cat", "fit", "renov"]]
train_df_total['num_photos'] = train_df_total['photos'].apply(len)
train_df_total['num_features'] = train_df_total['features'].apply(len)
train_df_total['num_description'] = train_df_total['description'].apply(lambda x: len(x.split(" ")))
train_df_vector = train_df_total.loc[:, ['bathrooms', 'bedrooms', 'num_photos', 'num_features', 'num_description',
'latitude', 'longitude', 'manager_id','price', 'street_address', 'garag',
'cinema', 'hardwood', 'air condit', 'modern', 'share', 'dog', 'landlord',
'unassign', '24 hour','fireplac', 'onsit', 'cat', 'storag', 'spaciou',
'huge', 'privat', 'fee', 'yoga', 'fit', 'pet', 'open','dishwash', 'walk',
'attach', 'state', 'includ', 'inunit', 'new', 'washer', 'gym', 'deposit',
'AC', 'queen','private terrac', 'twentyfour', 'attend', 'underpr', 'brand',
'upon', 'valet', 'free', 'courtyard', 'dryer','laundri', 'ok', 'granit',
'overs', 'pool', 'avail', 'convert', 'onli', 'central', 'gift', 'wifi',
'renov','air', 'walkin', 'duplex', 'common', 'elev', 'unknown', 'display_address']]
train_df_vector['manager_id'] = train_df_vector['manager_id'].astype('category')
train_df_vector['street_address'] = train_df_vector['street_address'].astype('category')
train_df_vector['display_address'] = train_df_vector['display_address'].astype('category')
categorical_columns = train_df_vector.select_dtypes(['category']).columns
train_df_vector[categorical_columns] = train_df_vector[categorical_columns].apply(lambda x: x.cat.codes)
train_df_target = train_df_total.loc[:, 'interest_level']
print("Done with training data")
test_df = pd.read_json('test.json')
for feature in features:
test_df[feature] = 0
fea_list = test_df.index.values.tolist()
#print train_df_total.loc[10000, "features"]
for fea_l in fea_list:
#print fea_l
add_test_features(fea_l)
#print train_df_total.loc[10000, ["elev", "dog", "cat", "fit", "renov"]]
test_df['num_photos'] = test_df['photos'].apply(len)
test_df['num_features'] = test_df['features'].apply(len)
test_df['num_description'] = test_df['description'].apply(lambda x: len(x.split(" ")))
test_df_vector = test_df.loc[:, ['bathrooms', 'bedrooms', 'num_photos', 'num_features', 'num_description',
'latitude', 'longitude', 'manager_id','price', 'street_address', 'garag',
'cinema', 'hardwood', 'air condit', 'modern', 'share', 'dog', 'landlord',
'unassign', '24 hour','fireplac', 'onsit', 'cat', 'storag', 'spaciou',
'huge', 'privat', 'fee', 'yoga', 'fit', 'pet', 'open','dishwash', 'walk',
'attach', 'state', 'includ', 'inunit', 'new', 'washer', 'gym', 'deposit',
'AC', 'queen','private terrac', 'twentyfour', 'attend', 'underpr', 'brand',
'upon', 'valet', 'free', 'courtyard', 'dryer','laundri', 'ok', 'granit',
'overs', 'pool', 'avail', 'convert', 'onli', 'central', 'gift', 'wifi',
'renov','air', 'walkin', 'duplex', 'common', 'elev', 'unknown', 'display_address']]
test_df_vector['manager_id'] = test_df_vector['manager_id'].astype('category')
test_df_vector['street_address'] = test_df_vector['street_address'].astype('category')
test_df_vector['display_address'] = test_df_vector['display_address'].astype('category')
categorical_columns = test_df_vector.select_dtypes(['category']).columns
test_df_vector[categorical_columns] = test_df_vector[categorical_columns].apply(lambda x: x.cat.codes)
test_df_ids = list(test_df['listing_id'])
print("Done with testing data")
#
# param = dict()
# param['objective'] = 'multi:softprob'
# param['max_depth'] = 6
# param['silent'] = False
# param['num_class'] = 3
# param['eval_metric'] = "mlogloss"
# param['min_child_weight'] = 1
# param['subsample'] = 0.7
# param['colsample_bylevel'] = 0.7
# param['seed'] = 350
# param['n_estimators'] = 2000
# plst = list(param.items())
reg = xgb.XGBClassifier(objective='multi:softprob', max_depth=6, silent=False, min_child_weight=1, subsample=0.7,
colsample_bylevel=0.7, seed=312, n_estimators=2000)
reg.fit(train_df_vector, train_df_target)
predict = reg.predict_proba(test_df_vector)
print("Time for the XGBoost Classifier to train and predict on the testing data is := %.2f" % (time.time() -
start_time))
csv_file = open("submissions_new_xgboost.csv", 'w')
wr = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_NONE)
wr.writerow(['listing_id', 'high', 'medium', 'low'])
for index in range(0, len(test_df_ids)):
wr.writerow([test_df_ids[index], predict[index][0], predict[index][2], predict[index][1]])
index += 1
print("Done with predicting Interest Levels for the test data")
csv_file.close()
| [
"sandeepa@yahoo-inc.com"
] | sandeepa@yahoo-inc.com |
506f233829f19442d95d33bf7a1e1cd4473855f1 | 5d54668f3a8c05b0c07b28d56ce80c4261ff2652 | /opencv-master/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py | 502457b471af6d9d51493147bb467b941d1f136e | [
"BSD-3-Clause",
"MIT"
] | permissive | AmitHasanShuvo/Face-Detection-with-Image-Enchantments | 43ff70d0885d0d3dd2909919d873fc907bdd4fba | 468e12c35db047d36cd7815d46576137528da47a | refs/heads/master | 2023-04-05T18:29:25.044797 | 2020-07-09T20:22:03 | 2020-07-09T20:22:03 | 277,646,487 | 3 | 1 | MIT | 2021-04-20T23:54:27 | 2020-07-06T20:52:31 | C++ | UTF-8 | Python | false | false | 2,469 | py | from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
erosion_size = 0
max_elem = 2
max_kernel_size = 21
title_trackbar_element_type = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse'
title_trackbar_kernel_size = 'Kernel size:\n 2n +1'
title_erosion_window = 'Erosion Demo'
title_dilatation_window = 'Dilation Demo'
def erosion(val):
erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window)
erosion_type = 0
val_type = cv.getTrackbarPos(title_trackbar_element_type, title_erosion_window)
if val_type == 0:
erosion_type = cv.MORPH_RECT
elif val_type == 1:
erosion_type = cv.MORPH_CROSS
elif val_type == 2:
erosion_type = cv.MORPH_ELLIPSE
element = cv.getStructuringElement(erosion_type, (2*erosion_size + 1, 2*erosion_size+1), (erosion_size, erosion_size))
erosion_dst = cv.erode(src, element)
cv.imshow(title_erosion_window, erosion_dst)
def dilatation(val):
dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilatation_window)
dilatation_type = 0
val_type = cv.getTrackbarPos(title_trackbar_element_type, title_dilatation_window)
if val_type == 0:
dilatation_type = cv.MORPH_RECT
elif val_type == 1:
dilatation_type = cv.MORPH_CROSS
elif val_type == 2:
dilatation_type = cv.MORPH_ELLIPSE
element = cv.getStructuringElement(dilatation_type, (2*dilatation_size + 1, 2*dilatation_size+1), (dilatation_size, dilatation_size))
dilatation_dst = cv.dilate(src, element)
cv.imshow(title_dilatation_window, dilatation_dst)
parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.')
parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image: ', args.input)
exit(0)
cv.namedWindow(title_erosion_window)
cv.createTrackbar(title_trackbar_element_type, title_erosion_window , 0, max_elem, erosion)
cv.createTrackbar(title_trackbar_kernel_size, title_erosion_window , 0, max_kernel_size, erosion)
cv.namedWindow(title_dilatation_window)
cv.createTrackbar(title_trackbar_element_type, title_dilatation_window , 0, max_elem, dilatation)
cv.createTrackbar(title_trackbar_kernel_size, title_dilatation_window , 0, max_kernel_size, dilatation)
erosion(0)
dilatation(0)
cv.waitKey()
| [
"kaziamithasan89@gmail.com"
] | kaziamithasan89@gmail.com |
fd4d0a99bfba98753367c2dee4d75fda61fa6e52 | 128c0d2924f0c5a97346b5a05ea3e4c138b00c57 | /opencv/chapter16/HoughLines.py | 8e5ed09bcab15e090b4d4b4406ff19e9a82d3add | [] | no_license | GOD-TEN/MyNote | af0d6f8ae698c13c062750ef7931150eb2347a88 | d7737952f098cd9b145c52e672211da8f05dd785 | refs/heads/master | 2023-05-30T08:19:45.882762 | 2021-06-24T08:42:32 | 2021-06-24T08:42:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("computer.jpg")
gray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize=3)
orgb =cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
oShow = orgb.copy()
lines = cv2.HoughLines(edges, 1, np.pi/180, 140)
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0+1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0-1000*(-b))
y2 = int(y0-1000*(a))
cv2.line(orgb,(x1,y1),(x2,y2),(0,0,255),2)
plt.subplot(121)
plt.imshow(oShow)
plt.axis('off')
plt.subplot(122)
plt.imshow(orgb)
plt.axis('off')
plt.show()
| [
"1348800462@qq.com"
] | 1348800462@qq.com |
c84e27f853a3739f2c3a75ee9f54b29eab09c342 | a07248a9c15105dd7fad728d18fd381dea5ed17f | /aurevoir.py | 33a21a50e1bcd5bfe156d5807f98e13a1c8a3eb3 | [] | no_license | battyone/aurevoir | 207e84fd8aea68f4e864c93ed54a4cf782274016 | 143f637db81d35ad3e607bca17dd865791dd73e9 | refs/heads/master | 2020-09-28T21:48:55.414057 | 2017-01-23T04:21:31 | 2017-01-23T04:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | from flask import Flask, render_template
from logging import getLogger, basicConfig, DEBUG, INFO, WARNING
import os
from threading import Lock
from zeroconf import ServiceInfo, Zeroconf, ServiceBrowser
class RootListener(object):
def remove_service(self, z, service_type, service_name):
log.info("We're told that the last {} stopped announcing.".format(service_name))
def add_service(self, z, service_type, service_name):
log.info("Discovered new service type {}".format(service_name))
if service_name in browsers:
log.info("Nothing to do, we already had a listener for {}".format(service_name))
return
browsers[service_name] = ServiceBrowser(z, service_name, ServiceListener())
class ServiceListener(object):
def remove_service(self, z, service_type, service_name):
log.info("Removed {}".format(service_name))
del self.root_listener.services[service_name]
def add_service(self, z, service_type, service_name):
lock.acquire()
log.info("Discovered {}".format(service_name))
info = z.get_service_info(service_type, service_name)
if len(info.address) == 4:
info.ipv4 = '.'.join(str(ord(b)) for b in info.address)
log.debug("Expanded {!r} to {}".format(info.address, info.ipv4))
else:
log.warning("Address {!r} length != 4; setting IPV4 field to empty string!".format(info.address))
info.ipv4 = ''
services[service_name] = info
log.debug("We now have {} services".format(len(services)))
lock.release()
log = getLogger(__name__)
basicConfig(level = DEBUG if os.environ.get("FLASK_DEBUG") else INFO)
zeroconf = Zeroconf()
root_listener = RootListener()
services = {}
browsers = {}
lock = Lock()
root_browser = ServiceBrowser(zeroconf, "_services._dns-sd._udp.local.", root_listener)
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html", services=services)
| [
"jerome.petazzoni@gmail.com"
] | jerome.petazzoni@gmail.com |
2145526225efcedfd80d26083e233b193732814f | 8ff6c3e513e17be6c51b484bed81d03150bdd175 | /2013-01-facegif/cutface.py | afb58002b5ace80bf5625eccd582ac4bee62a9f2 | [] | no_license | ricbit/Oldies | f1a2ac520b64e43d11c250cc372d526e9febeedd | 2d884c61ac777605f7260cd4d36a13ed5a2c6a58 | refs/heads/master | 2023-04-27T20:35:19.485763 | 2023-04-26T04:45:44 | 2023-04-26T04:45:44 | 2,050,140 | 40 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | import cv
import os
import sys
def DetectFaces(image):
cascade = cv.Load('haarcascade_frontalface_alt.xml')
storage = cv.CreateMemStorage(0)
return cv.HaarDetectObjects(
image, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING)
def cut_image(image):
grayscale = cv.CreateImage(cv.GetSize(image), 8, 1)
cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
#cv.EqualizeHist(grayscale, grayscale)
faces = DetectFaces(grayscale)
ans = []
for face in faces:
x, y, dx, dy = face[0]
cropped = cv.CreateMat(dx, dy, cv.CV_8UC1)
cv.GetRectSubPix(grayscale, cropped, (x + dx / 2, y + dy / 2))
resized = cv.CreateImage((92, 112), 8, 1)
cv.Resize(cropped, resized)
ans.append(resized)
return ans
def main():
path = sys.argv[1]
i = 0
for filename in os.listdir(path):
fullpath = os.path.join(path, filename)
print fullpath
image = cv.LoadImage(fullpath)
for cut in cut_image(image):
output = os.path.join(sys.argv[2], '%d.jpg' % i)
cv.SaveImage(output, cut)
i += 1
if __name__ == '__main__':
main()
| [
"bluepenguin@gmail.com"
] | bluepenguin@gmail.com |
317e5a23ae615ad38a5be8cd5a1acceaf1e40665 | 12b4051077b3c2c39f1dbdd565eacbf65eda4e9a | /benchmark2.py | ae78f23d86428e89d2c302db814e579b649cef60 | [
"BSD-3-Clause"
] | permissive | villawang/Continual_Learning_CV | b715295dfb66ee2f2fcd75a3874d63ac20cf8996 | 6715fa9c741df920e56aede11cbb85a4be41871e | refs/heads/master | 2022-10-08T09:29:51.004471 | 2020-06-08T09:51:26 | 2020-06-08T09:51:26 | 264,915,681 | 0 | 0 | BSD-3-Clause | 2020-05-18T11:15:51 | 2020-05-18T11:15:50 | null | UTF-8 | Python | false | false | 9,171 | py | import os, sys, shutil
shutil.copytree("./train/illumination/segment1","./benchmark2/sequence/train/task1")
shutil.copytree("./train/illumination/segment2","./benchmark2/sequence/train/task2")
shutil.copytree("./train/illumination/segment3","./benchmark2/sequence/train/task3")
shutil.copytree("./train/illumination/segment4","./benchmark2/sequence/train/task4")
shutil.copytree("./train/illumination/segment5","./benchmark2/sequence/train/task5")
shutil.copytree("./train/illumination/segment6","./benchmark2/sequence/train/task6")
shutil.copytree("./train/illumination/segment7","./benchmark2/sequence/train/task7")
shutil.copytree("./train/illumination/segment8","./benchmark2/sequence/train/task8")
shutil.copytree("./train/illumination/segment9","./benchmark2/sequence/train/task9")
shutil.copytree("./train/occlusion/segment1","./benchmark2/sequence/train/task10")
shutil.copytree("./train/occlusion/segment2","./benchmark2/sequence/train/task11")
shutil.copytree("./train/occlusion/segment3","./benchmark2/sequence/train/task12")
shutil.copytree("./train/occlusion/segment4","./benchmark2/sequence/train/task13")
shutil.copytree("./train/occlusion/segment5","./benchmark2/sequence/train/task14")
shutil.copytree("./train/occlusion/segment6","./benchmark2/sequence/train/task15")
shutil.copytree("./train/occlusion/segment7","./benchmark2/sequence/train/task16")
shutil.copytree("./train/occlusion/segment8","./benchmark2/sequence/train/task17")
shutil.copytree("./train/occlusion/segment9","./benchmark2/sequence/train/task18")
shutil.copytree("./train/pixel/segment1","./benchmark2/sequence/train/task19")
shutil.copytree("./train/pixel/segment2","./benchmark2/sequence/train/task20")
shutil.copytree("./train/pixel/segment3","./benchmark2/sequence/train/task21")
shutil.copytree("./train/pixel/segment4","./benchmark2/sequence/train/task22")
shutil.copytree("./train/pixel/segment5","./benchmark2/sequence/train/task23")
shutil.copytree("./train/pixel/segment6","./benchmark2/sequence/train/task24")
shutil.copytree("./train/pixel/segment7","./benchmark2/sequence/train/task25")
shutil.copytree("./train/pixel/segment8","./benchmark2/sequence/train/task26")
shutil.copytree("./train/pixel/segment9","./benchmark2/sequence/train/task27")
shutil.copytree("./train/clutter/segment1","./benchmark2/sequence/train/task28")
shutil.copytree("./train/clutter/segment2","./benchmark2/sequence/train/task29")
shutil.copytree("./train/clutter/segment3","./benchmark2/sequence/train/task30")
shutil.copytree("./train/clutter/segment4","./benchmark2/sequence/train/task31")
shutil.copytree("./train/clutter/segment5","./benchmark2/sequence/train/task32")
shutil.copytree("./train/clutter/segment6","./benchmark2/sequence/train/task33")
shutil.copytree("./train/clutter/segment7","./benchmark2/sequence/train/task34")
shutil.copytree("./train/clutter/segment8","./benchmark2/sequence/train/task35")
shutil.copytree("./train/clutter/segment9","./benchmark2/sequence/train/task36")
shutil.copytree("./test/illumination/segment1","./benchmark2/sequence/test/task1")
shutil.copytree("./test/illumination/segment2","./benchmark2/sequence/test/task2")
shutil.copytree("./test/illumination/segment3","./benchmark2/sequence/test/task3")
shutil.copytree("./test/illumination/segment4","./benchmark2/sequence/test/task4")
shutil.copytree("./test/illumination/segment5","./benchmark2/sequence/test/task5")
shutil.copytree("./test/illumination/segment6","./benchmark2/sequence/test/task6")
shutil.copytree("./test/illumination/segment7","./benchmark2/sequence/test/task7")
shutil.copytree("./test/illumination/segment8","./benchmark2/sequence/test/task8")
shutil.copytree("./test/illumination/segment9","./benchmark2/sequence/test/task9")
shutil.copytree("./test/occlusion/segment1","./benchmark2/sequence/test/task10")
shutil.copytree("./test/occlusion/segment2","./benchmark2/sequence/test/task11")
shutil.copytree("./test/occlusion/segment3","./benchmark2/sequence/test/task12")
shutil.copytree("./test/occlusion/segment4","./benchmark2/sequence/test/task13")
shutil.copytree("./test/occlusion/segment5","./benchmark2/sequence/test/task14")
shutil.copytree("./test/occlusion/segment6","./benchmark2/sequence/test/task15")
shutil.copytree("./test/occlusion/segment7","./benchmark2/sequence/test/task16")
shutil.copytree("./test/occlusion/segment8","./benchmark2/sequence/test/task17")
shutil.copytree("./test/occlusion/segment9","./benchmark2/sequence/test/task18")
shutil.copytree("./test/pixel/segment1","./benchmark2/sequence/test/task19")
shutil.copytree("./test/pixel/segment2","./benchmark2/sequence/test/task20")
shutil.copytree("./test/pixel/segment3","./benchmark2/sequence/test/task21")
shutil.copytree("./test/pixel/segment4","./benchmark2/sequence/test/task22")
shutil.copytree("./test/pixel/segment5","./benchmark2/sequence/test/task23")
shutil.copytree("./test/pixel/segment6","./benchmark2/sequence/test/task24")
shutil.copytree("./test/pixel/segment7","./benchmark2/sequence/test/task25")
shutil.copytree("./test/pixel/segment8","./benchmark2/sequence/test/task26")
shutil.copytree("./test/pixel/segment9","./benchmark2/sequence/test/task27")
shutil.copytree("./test/clutter/segment1","./benchmark2/sequence/test/task28")
shutil.copytree("./test/clutter/segment2","./benchmark2/sequence/test/task29")
shutil.copytree("./test/clutter/segment3","./benchmark2/sequence/test/task30")
shutil.copytree("./test/clutter/segment4","./benchmark2/sequence/test/task31")
shutil.copytree("./test/clutter/segment5","./benchmark2/sequence/test/task32")
shutil.copytree("./test/clutter/segment6","./benchmark2/sequence/test/task33")
shutil.copytree("./test/clutter/segment7","./benchmark2/sequence/test/task34")
shutil.copytree("./test/clutter/segment8","./benchmark2/sequence/test/task35")
shutil.copytree("./test/clutter/segment9","./benchmark2/sequence/test/task36")
shutil.copytree("./validation/illumination/segment1","./benchmark2/sequence/validation/task1")
shutil.copytree("./validation/illumination/segment2","./benchmark2/sequence/validation/task2")
shutil.copytree("./validation/illumination/segment3","./benchmark2/sequence/validation/task3")
shutil.copytree("./validation/illumination/segment4","./benchmark2/sequence/validation/task4")
shutil.copytree("./validation/illumination/segment5","./benchmark2/sequence/validation/task5")
shutil.copytree("./validation/illumination/segment6","./benchmark2/sequence/validation/task6")
shutil.copytree("./validation/illumination/segment7","./benchmark2/sequence/validation/task7")
shutil.copytree("./validation/illumination/segment8","./benchmark2/sequence/validation/task8")
shutil.copytree("./validation/illumination/segment9","./benchmark2/sequence/validation/task9")
shutil.copytree("./validation/occlusion/segment1","./benchmark2/sequence/validation/task10")
shutil.copytree("./validation/occlusion/segment2","./benchmark2/sequence/validation/task11")
shutil.copytree("./validation/occlusion/segment3","./benchmark2/sequence/validation/task12")
shutil.copytree("./validation/occlusion/segment4","./benchmark2/sequence/validation/task13")
shutil.copytree("./validation/occlusion/segment5","./benchmark2/sequence/validation/task14")
shutil.copytree("./validation/occlusion/segment6","./benchmark2/sequence/validation/task15")
shutil.copytree("./validation/occlusion/segment7","./benchmark2/sequence/validation/task16")
shutil.copytree("./validation/occlusion/segment8","./benchmark2/sequence/validation/task17")
shutil.copytree("./validation/occlusion/segment9","./benchmark2/sequence/validation/task18")
shutil.copytree("./validation/pixel/segment1","./benchmark2/sequence/validation/task19")
shutil.copytree("./validation/pixel/segment2","./benchmark2/sequence/validation/task20")
shutil.copytree("./validation/pixel/segment3","./benchmark2/sequence/validation/task21")
shutil.copytree("./validation/pixel/segment4","./benchmark2/sequence/validation/task22")
shutil.copytree("./validation/pixel/segment5","./benchmark2/sequence/validation/task23")
shutil.copytree("./validation/pixel/segment6","./benchmark2/sequence/validation/task24")
shutil.copytree("./validation/pixel/segment7","./benchmark2/sequence/validation/task25")
shutil.copytree("./validation/pixel/segment8","./benchmark2/sequence/validation/task26")
shutil.copytree("./validation/pixel/segment9","./benchmark2/sequence/validation/task27")
shutil.copytree("./validation/clutter/segment1","./benchmark2/sequence/validation/task28")
shutil.copytree("./validation/clutter/segment2","./benchmark2/sequence/validation/task29")
shutil.copytree("./validation/clutter/segment3","./benchmark2/sequence/validation/task30")
shutil.copytree("./validation/clutter/segment4","./benchmark2/sequence/validation/task31")
shutil.copytree("./validation/clutter/segment5","./benchmark2/sequence/validation/task32")
shutil.copytree("./validation/clutter/segment6","./benchmark2/sequence/validation/task33")
shutil.copytree("./validation/clutter/segment7","./benchmark2/sequence/validation/task34")
shutil.copytree("./validation/clutter/segment8","./benchmark2/sequence/validation/task35")
shutil.copytree("./validation/clutter/segment9","./benchmark2/sequence/validation/task36")
| [
"noreply@github.com"
] | villawang.noreply@github.com |
43144455fedd5a060eab88def5055aa00f29455f | 9db36c25c175cc5f064dbe2300a9e92a2c2684b6 | /students/KevinCavanaugh/session8/Lab8-6 Dynamic SQL Transactions.py | a35e3e459caa2f4b5d1741177b245062cade5d60 | [] | no_license | KevCav91/Python210-W19 | 2507e52922e8f59d5d4e5c9615c1048bbd22b945 | 8000611c99b18b15c2992192dc11127d6775e350 | refs/heads/master | 2020-04-19T21:28:27.083710 | 2019-03-13T03:59:20 | 2019-03-13T03:59:20 | 168,441,925 | 0 | 0 | null | 2019-01-31T01:20:45 | 2019-01-31T01:20:44 | null | UTF-8 | Python | false | false | 9,184 | py | import sqlite3
from sqlite3 import Error as sqlErr
def create_connection(db_file):
""" Create or connect to a SQLite database """
try:
con = sqlite3.connect(db_file)
print('SQLite Version is: ', sqlite3.version)
except sqlErr as se:
raise Exception('SQL Error in create_connection(): ' + se.__str__())
except Exception as e:
raise Exception('General Error in create_connection(): ' + e.__str__())
return con
def execute_sql_code(db_con = None, sql_code=''):
""" Execute SQL code on a open connection """
try:
if db_con is not None and sql_code != '':
csr = db_con.cursor()
csr.execute(sql_code)
db_con.commit()
else:
raise Exception('SQL Code or Connection is missing!')
except sqlErr as se:
raise Exception('SQL Error in create_connection(): ' + se.__str__())
except Exception as e:
raise Exception('General Error in create_connection(): ' + e.__str__())
return csr
def create_table_code(name_of_table, col_names=[None]):
""" Create table code """
sql_str = ''
try:
if col_names is None:
raise Exception('You must provide at least one column!')
else:
sql_str = 'CREATE TABLE ' + name_of_table + '('
for col in col_names:
sql_str += str(col) + ' [text], '
sql_str = sql_str[0:-2] + ');' # Strip off the last comma
except Exception as e:
raise Exception('Error in create_table(): ' + e.__str__())
return sql_str
def create_select_code(name_of_table, col_names=[None]):
""" create table select code """
sql_str = ''
try:
if col_names is None:
raise Exception('You must provide at least one column name!')
else:
sql_str = 'SELECT \n'
for col in col_names:
sql_str += str(col) + ', '
sql_str = sql_str[0:-2] + '\n' # Strip off the last comma
sql_str += 'FROM ' + name_of_table + ';'
except Exception as e:
raise Exception('Error in create_select_code(): ' + e.__str__())
return sql_str
def create_insert_code(name_of_table, col_names=[None], col_values=[None],):
""" create table insert code """
sql_str = ''
try:
if col_names is None:
raise Exception('You must provide at least one column name!')
else:
sql_str = 'INSERT INTO ' + str(name_of_table).strip() + '\n('
for col in col_names:
sql_str += str(col) + ', '
sql_str = sql_str[0:-2] + ')' # Strip off the last comma
if col_values is None:
raise Exception('You must provide at least one column value!')
else:
sql_str += '\nVALUES\n('
for col in col_values:
sql_str += str(col) + ', '
sql_str = sql_str[0:-2] + ');' # Strip off the last comma
except Exception as e:
raise Exception('Error in create_insert_code(): ' + e.__str__())
return sql_str
def create_update_code(name_of_table, col_names=[None], col_values=[None], where_col = None, where_equals_value = None):
""" create table update code """
sql_str = ''
try:
if col_names is None:
raise Exception('You must provide at least one column name!')
elif col_values is None:
raise Exception('You must provide at least one column value!')
elif len(col_names) != len(col_values):
raise Exception('You must provide one value for each column')
elif where_col is None or where_equals_value is None:
raise Exception('You must provide a where column and an equals value')
else:
sql_str = 'UPDATE ' + str(name_of_table).strip() + '\nSET\n\t'
counter = 0
while counter < len(col_names):
sql_str += str(col_names[counter]).strip() \
+ ' = ' + str(col_values[counter]).strip() + ', \n\t'
counter += 1
sql_str = (sql_str.strip())[0:-1] + '' # Strip off the last comma
sql_str += '\nWHERE ' + where_col + " = " + where_equals_value
except Exception as e:
raise Exception('Error in create_update_code(): ' + e.__str__())
return sql_str
def create_delete_code(name_of_table, where_col = None, where_equals_value = None):
""" create table delete code """
sql_str = ''
try:
if where_col is None or where_equals_value is None:
raise Exception('You must provide a where column and an equals value')
else:
sql_str = 'DELETE FROM ' + str(name_of_table).strip()
sql_str += '\nWHERE ' + where_col + " = " + str(where_equals_value).strip()
except Exception as e:
raise Exception('Error in create_delete_code(): ' + e.__str__())
return sql_str
def main_menu():
print('\n', '='*50, sep='')
print("Choose an option by number: ")
print("\t 1 = Create or Connect to a new file database")
print("\t 2 = Create a new memory database")
print("\t 3 = Create a new table")
print("\t [s] = Select from table")
print("\t [i] = Insert into table")
print("\t [u] = Update in table")
print("\t [d] = Delete from table")
print('Type exit to quit program!')
print('='*50, '\n', sep='')
if __name__ == '__main__':
dbconnection = None
while True:
try:
main_menu()
choice = input("Option: ").strip()
if choice == '1':
fn = input("Enter file name and path: ").strip()
dbconnection = create_connection(fn)
elif choice == '2':
dbconnection = create_connection(':memory:')
elif choice == '3':
t = input("Enter a name for the table: ").strip()
cols = input("Enter a comma separated list of column names (col1,col2,etc...): ").strip()
sql = create_table_code(t, cols.split(','))
opt = input('\nPreview:\n\n\t' + sql + '\n\nCreate the following table?(y/n):')
if opt.lower() == 'y':
execute_sql_code(db_con=dbconnection, sql_code=sql).close() # Close Cursor
else:
print('Info ->\tTable creation cancelled!')
elif choice == 's':
t = input("Enter a name for the table: ").strip()
cols = input("Enter a comma separated list of column names (col1,col2,etc...): ").strip()
sql = create_select_code(t, cols.split(','))
print('\nCode Used : ' + sql + '\n')
csrData = execute_sql_code(db_con=dbconnection, sql_code=sql) # Don't close cursor
for row in csrData:
for col in row:
print(col, end=' | ')
print()
csrData.close() # Now close cursor!
elif choice == 'i':
t = input("Enter a name for the table: ").strip()
cols = input("Enter a comma separated list of column names (col1,col2,etc...): ").strip()
colvals = input("Enter a comma separated list of column VALUES (col1,col2,etc...): ").strip()
sql = create_insert_code(t, cols.split(','), colvals.split(','))
opt = input('\nPreview:\n\n' + sql + '\n\nInsert this data?(y/n):')
if opt.lower() == 'y':
execute_sql_code(db_con=dbconnection, sql_code=sql).close() # Close Cursor
elif choice == 'u':
t = input("Enter a name for the table: ").strip()
cols = input("Enter a comma separated list of column names (col1,col2,etc...): ").strip()
colvals = input("Enter a comma separated list of column VALUES (col1,col2,etc...): ").strip()
wc = input("Enter one WHERE column Name: ").strip()
wv = input("Enter one WHERE column Equals Value: ").strip()
sql = create_update_code(t, cols.split(','), colvals.split(','),where_col=wc, where_equals_value=wv)
opt = input('\nPreview:\n\n' + sql + '\n\nUpdate this data?(y/n):')
if opt.lower() == 'y':
execute_sql_code(db_con=dbconnection, sql_code=sql).close() # Close Cursor
elif choice == 'd':
t = input("Enter a name for the table: ").strip()
wc = input("Enter one WHERE column Name: ").strip()
wv = input("Enter one WHERE column Equals Value: ").strip()
sql = create_delete_code(t, where_col=wc, where_equals_value=wv)
opt = input('\nPreview:\n\n' + sql + '\n\nDelete this data?(y/n):')
if opt.lower() == 'y':
execute_sql_code(db_con=dbconnection, sql_code=sql).close() # Close Cursor
elif choice.lower() == 'exit':
break
else:
print('Please enter a number for the option you want!')
except Exception as e:
print('Error ->\t', e.__str__())
dbconnection()
| [
"kevincavanau@gmail.com"
] | kevincavanau@gmail.com |
aecde9e7abcdbfa41ccb962cbb2953491d867875 | 586a7372bc4f26082653e0a50926b5687dd9de40 | /src/tokens.py | 34c0ee689a4f8c96a19f316913f1084cf27ab981 | [] | no_license | mbucc/qiftoiif | 70ffe3a8e217d86a819d7e42225b2c5901d55168 | 75bc7ed3b5af58636e8520b5f2d0fa494f898b98 | refs/heads/master | 2016-09-05T14:02:38.539623 | 2010-03-10T00:56:31 | 2010-03-10T00:56:31 | 540,123 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,321 | py | #
# Copyright (c) 2010 Mark Bucciarelli <mkbucc@gmail.com>
#
# Permission to use, copy, modify, and distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys
import ply.lex as lex
#
# QIF Tokens:
#
# ref: http://en.wikipedia.org/wiki/Quicken_Interchange_Format
# read on Sun Jan 31 13:05:15 EST 2010
#
tokens = (
#
# Record types. Currently, we only support checking + investment.
#
'ASSET',
'CASH',
'CHECKING',
'CREDIT_CARD',
'END_RECORD',
'INVESTMENT',
'LIABILITY',
'QUICKEN_ACCOUNT',
'QUICKEN_CATEGORY_LIST',
'QUICKEN_CLASS_LIST',
'QUICKEN_MEMORIZED_TRANSACTION_LIST',
#
# Used in all record types
#
'AMOUNT',
'CLEARED_STATUS',
'DATE',
'MEMO',
#
# Used in Banking and Investment
#
'PAYEE',
'PAYEE_ADDRESS',
#
# Used in Banking and Splits
#
'CATEGORY_OR_TRANSFER',
'CHECK_NUMBER',
#
# Used in Investment.
#
'COMMISSION_COST',
'INVESTMENT_ACTION',
'SECURITY_NAME',
'SECURITY_PRICE',
'SHARE_QUANTITY',
#
# Used in Splits
#
'SPLIT_CATEGORY',
'SPLIT_MEMO',
'SPLIT_PERCENT',
#
# Used in Investment or Splits
#
'SPLIT_OR_TRANSER_AMOUNT'
)
#
# Header tokens
#
t_CASH = r'^!Type:Cash\s*'
t_CHECKING = r'^!Type:Bank\s*'
t_CREDIT_CARD = r'^!Type:CCard\s*'
t_INVESTMENT = r'^!Type:Invst\s*'
t_ASSET = r'^!Type:Oth A\s*'
t_LIABILITY = r'^!Type:Oth L\s*'
#
# Quicken-specific types--currently not supported by parser.
#
t_QUICKEN_ACCOUNT = r'^!Account\s*'
t_QUICKEN_CATEGORY_LIST = r'^!Type:Cat\s*'
t_QUICKEN_CLASS_LIST = r'^!Type:Class\s*'
t_QUICKEN_MEMORIZED_TRANSACTION_LIST = r'^!Type:Memorized\s*'
#
# Detail tokens
#
# Date. Leading zeroes on month and day can be skipped. Year can
# be either 4 digits or 2 digits or '6 (=2006).
#
# Used in all types.
# D12/25/2006
#
def t_DATE(t):
r'D[0-9]{1,2}\/[0-9]{1,2}\/[0-9]{4}\s*'
s = t.value
s = s[1:]
s = s.rstrip()
month, day, year = s.split('/')
t.value = '%s-%02d-%02d' % (year, int(month), int(day))
return t
#
# Amount of the item. For payments, a leading minus sign is required.
# For deposits, either no sign or a leading plus sign is accepted.
# Do not include currency symbols ($, , , etc.). Comma separators
# between thousands are allowed.
#
# Used in all types.
# T-1,234.50
#
def t_AMOUNT(t):
r'T-?([0-9]{1,3},?)*([0-9]{1,3}).[0-9]{2}\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Memo -- any text you want to record about the item.
#
# Used in all types.
# Mgasoline for my car
#
def t_MEMO(t):
r'M.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Cleared status. Values are blank (not cleared), "*" or "c" (cleared)
# and "X" or "R" (reconciled).
#
# Used in all types.
# CR
#
def t_CLEARED_STATUS(t):
r'C.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Also, Investment Action (Buy, Sell, etc).
#
# Used in Investment
# NBuy
#
# Note:
#
# Must come before t_CHECK_NUMBER. Order of functions
# determines precedence and if this comes later check #
# always matches first.
#
def t_INVESTMENT_ACTION(t):
r'N(Buy|ReinvDiv|Div|Sell)\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Number of the check. Blank for non-check transactions.
#
# Used in Banking, Splits
# N1001
#
def t_CHECK_NUMBER(t):
r'N([0-9]+)*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Payee. Or a description for deposits, transfers, etc.
#
# Used in Banking, Investment
# PStandard Oil, Inc.
#
def t_PAYEE(t):
r'P.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Address of Payee. Up to 5 address lines are allowed. A 6th address
# line is a message that prints on the check. 1st line is normally
# the same as the Payee line -- the name of the Payee.
#
# Used in Banking, Splits
# A101 Main St.
#
def t_PAYEE_ADDRESS(t):
r'A.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Category or Transfer and (optionally) Class. The literal values
# are those defined in the Quicken Category list. SubCategories can
# be indicated by a colon (":") followed by the subcategory literal.
# If the Quicken file uses Classes, this can be indicated by a slash
# ("/") followed by the class literal. For Investments, MiscIncX or
# MiscExpX actions, Category/class or transfer/class.
#
# Used in Banking, Splits
# LFuel:car
#
def t_CATEGORY_OR_TRANSFER(t):
r'L.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Split category. Same format as L (Categorization) field.
#
# Used in Splits
# Sgas from Esso
def t_SPLIT_CATEGORY(t):
r'S.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Split memo -- any text to go with this split item.
#
# Used in Splits
# Ework trips
#
def t_SPLIT_MEMO(t):
r'E.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Amount for this split of the item. Same format as T field.
#
# Used in Splits
# $1,000.50
#
# Also, amount transferred, if cash is moved between accounts.
#
# Used in Investment
# $25,000.00
#
#
def t_SPLIT_OR_TRANSER_AMOUNT(t):
r'\$-?([0-9]{1,3},)*([0-9]{1,3}).[0-9]{2}\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Percent. Optional -- used if splits are done by percentage.
#
# Used in Splits
# %50
#
def t_SPLIT_PERCENT(t):
r'%[0-9]\{1,}\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Security name.
#
# Used in Investment
# YIDS Federal Income
def t_SECURITY_NAME(t):
r'Y.*\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Price.
#
# Used in Investment
# I5.125
#
def t_SECURITY_PRICE(t):
r'I([0-9]{1,3},?)*([0-9]{1,3}).[0-9]{1,}\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Quantity of shares (or split ratio, if Action is StkSplit).
#
# Used in Investment
# Q4,896.201
#
def t_SHARE_QUANTITY(t):
r'Q([0-9]{1,3},?)*([0-9]{1,3})*.[0-9]{1,}\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# Commission cost (generally found in stock trades)
#
# Used in Investment
# O14.95
# O.95
#
def t_COMMISSION_COST(t):
r'O([0-9]{1,3},?)*([0-9]{1,3})*.[0-9]{2}\s*'
t.value = t.value[1:]
t.value = t.value.rstrip()
return t
#
# F, Flag this transaction as a reimbursable business expense.
# Banking F???
#
t_END_RECORD = r'\^\s*'
def t_error(t):
#
# When I was debugging, I found it more convenient to send
# errors to stdout, as the errors came out after the last
# successful token parse.
#
#print >> sys.stderr, "Illegal character '%s'" % t.value[1]
print "Illegal character '%s'" % t.value[1]
t.lexer.skip(1)
def lexer():
return lex.lex(errorlog=lex.NullLogger())
| [
"mark@x30.crosscutmedia.com"
] | mark@x30.crosscutmedia.com |
4e6ebbffaa7463d3cfd44e97df532f2dee48d07a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2788/60595/251681.py | ce493d0bf81d1f4beb8b4ad066205aea69c75fcb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | def Test():
n=int(input())
boys=eval("["+input().strip().replace(" ",",")+"]")
m=int(input())
girls=eval("["+input().strip().replace(" ",",")+"]")
z=min(m,n)
parts=[]
j=0
if(z==n):
while(j<len(girls)):
if(check(boys[0],girls[j])):
parts.append([boys[0],girls[j]])
boys.remove(boys[0])
girls.remove(girls[j])
else:
j=j+1
else:
while(j<len(boys)):
if (check(girls[0], boys[j])):
parts.append([boys[j], girls[0]])
boys.remove(boys[j])
girls.remove(girls[0])
else:
j=j+1
print(len(parts))
def check(a,b):
return abs(a-b)<=1
if __name__ == "__main__":
Test() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
53fa7f6b3bcd54e0d7438f5e5e06baf0bcea2920 | cb5adbdf41bb23964d0dd8fc5c04bd4385673b9a | /twobuntu/api/v1_2/urls.py | 2313b4377987ef2bf82bacf565073d1820df99d5 | [
"Apache-2.0"
] | permissive | nitstorm/2buntu-Django-Blog-Nitin-fork | 109d5a4ffeb7e646485b4730d1b1dd63e4c1257a | 64d7b09c0678cecbb9e79c8bbd9869bf4301e418 | refs/heads/master | 2021-01-18T01:03:28.407392 | 2014-07-13T05:39:48 | 2014-07-13T05:39:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from django.conf.urls import patterns, url
urlpatterns = patterns('twobuntu.api.v1_2.views',
url(r'^articles/$', 'articles'),
url(r'^articles/(?P<id>\d+)/$', 'article_by_id'),
url(r'^authors/$', 'authors'),
url(r'^authors/(?P<id>\d+)/$', 'author_by_id'),
url(r'^authors/(?P<id>\d+)/articles/$', 'articles_by_author'),
url(r'^categories/$', 'categories'),
url(r'^categories/(?P<id>\d+)/articles/$', 'articles_by_category'),
)
| [
"admin@quickmediasolutions.com"
] | admin@quickmediasolutions.com |
4ab524e5911fa1a0819ce8c309fbea89a1cb8c53 | 6c564b6ca7c47d20b139eec8012773e45e629f16 | /v_env/lib/python2.7/site-packages/djangocms_blog/views.py | a18a4d6838825cd02033b14b0ac070b34afe288e | [] | no_license | blowUA/oscarbg | f76fa3c47981044b3f2f13b53d0e999eeea8369e | 321f5bd792df6369fa2092c64ae73e036c1351d5 | refs/heads/master | 2021-03-19T11:35:16.337676 | 2017-08-07T18:25:18 | 2017-08-07T18:25:18 | 96,817,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,464 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os.path
from aldryn_apphooks_config.mixins import AppConfigMixin
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.timezone import now
from django.utils.translation import get_language
from django.views.generic import DetailView, ListView
from parler.views import TranslatableSlugMixin, ViewUrlMixin
from .models import BlogCategory, Post
from .settings import get_setting
User = get_user_model()
class BaseBlogView(AppConfigMixin, ViewUrlMixin):
model = Post
def optimize(self, qs):
"""
Apply select_related / prefetch_related to optimize the view queries
:param qs: queryset to optimize
:return: optimized queryset
"""
return qs.select_related('app_config').prefetch_related(
'translations', 'categories', 'categories__translations', 'categories__app_config'
)
def get_view_url(self):
if not self.view_url_name:
raise ImproperlyConfigured(
'Missing `view_url_name` attribute on {0}'.format(self.__class__.__name__)
)
url = reverse(
self.view_url_name,
args=self.args,
kwargs=self.kwargs,
current_app=self.namespace
)
return self.request.build_absolute_uri(url)
def get_queryset(self):
language = get_language()
queryset = self.model._default_manager.namespace(
self.namespace
).active_translations(
language_code=language
)
if not getattr(self.request, 'toolbar', False) or not self.request.toolbar.edit_mode:
queryset = queryset.published()
setattr(self.request, get_setting('CURRENT_NAMESPACE'), self.config)
return self.optimize(queryset.on_site())
def get_template_names(self):
template_path = (self.config and self.config.template_prefix) or 'djangocms_blog'
return os.path.join(template_path, self.base_template_name)
class BaseBlogListView(BaseBlogView):
context_object_name = 'post_list'
base_template_name = 'post_list.html'
def get_context_data(self, **kwargs):
context = super(BaseBlogListView, self).get_context_data(**kwargs)
context['TRUNCWORDS_COUNT'] = get_setting('POSTS_LIST_TRUNCWORDS_COUNT')
return context
def get_paginate_by(self, queryset):
return (self.config and self.config.paginate_by) or get_setting('PAGINATION')
class PostDetailView(TranslatableSlugMixin, BaseBlogView, DetailView):
context_object_name = 'post'
base_template_name = 'post_detail.html'
slug_field = 'slug'
view_url_name = 'djangocms_blog:post-detail'
instant_article = False
def get_template_names(self):
if self.instant_article:
template_path = (self.config and self.config.template_prefix) or 'djangocms_blog'
return os.path.join(template_path, 'post_instant_article.html')
else:
return super(PostDetailView, self).get_template_names()
def get_queryset(self):
queryset = self.model._default_manager.all()
if not getattr(self.request, 'toolbar', False) or not self.request.toolbar.edit_mode:
queryset = queryset.published()
return self.optimize(queryset)
def get(self, *args, **kwargs):
# submit object to cms to get corrent language switcher and selected category behavior
if hasattr(self.request, 'toolbar'):
self.request.toolbar.set_object(self.get_object())
return super(PostDetailView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PostDetailView, self).get_context_data(**kwargs)
context['meta'] = self.get_object().as_meta()
context['instant_article'] = self.instant_article
context['use_placeholder'] = get_setting('USE_PLACEHOLDER')
setattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), self.get_object())
return context
class PostListView(BaseBlogListView, ListView):
view_url_name = 'djangocms_blog:posts-latest'
class PostArchiveView(BaseBlogListView, ListView):
date_field = 'date_published'
allow_empty = True
allow_future = True
view_url_name = 'djangocms_blog:posts-archive'
def get_queryset(self):
qs = super(PostArchiveView, self).get_queryset()
if 'month' in self.kwargs:
qs = qs.filter(**{'%s__month' % self.date_field: self.kwargs['month']})
if 'year' in self.kwargs:
qs = qs.filter(**{'%s__year' % self.date_field: self.kwargs['year']})
return self.optimize(qs)
def get_context_data(self, **kwargs):
kwargs['month'] = int(self.kwargs.get('month')) if 'month' in self.kwargs else None
kwargs['year'] = int(self.kwargs.get('year')) if 'year' in self.kwargs else None
if kwargs['year']:
kwargs['archive_date'] = now().replace(kwargs['year'], kwargs['month'] or 1, 1)
context = super(PostArchiveView, self).get_context_data(**kwargs)
return context
class TaggedListView(BaseBlogListView, ListView):
view_url_name = 'djangocms_blog:posts-tagged'
def get_queryset(self):
qs = super(TaggedListView, self).get_queryset()
return self.optimize(qs.filter(tags__slug=self.kwargs['tag']))
def get_context_data(self, **kwargs):
kwargs['tagged_entries'] = (self.kwargs.get('tag')
if 'tag' in self.kwargs else None)
context = super(TaggedListView, self).get_context_data(**kwargs)
return context
class AuthorEntriesView(BaseBlogListView, ListView):
view_url_name = 'djangocms_blog:posts-author'
def get_queryset(self):
qs = super(AuthorEntriesView, self).get_queryset()
if 'username' in self.kwargs:
qs = qs.filter(**{'author__%s' % User.USERNAME_FIELD: self.kwargs['username']})
return self.optimize(qs)
def get_context_data(self, **kwargs):
kwargs['author'] = User.objects.get(**{User.USERNAME_FIELD: self.kwargs.get('username')})
context = super(AuthorEntriesView, self).get_context_data(**kwargs)
return context
class CategoryEntriesView(BaseBlogListView, ListView):
_category = None
view_url_name = 'djangocms_blog:posts-category'
@property
def category(self):
if not self._category:
self._category = BlogCategory.objects.active_translations(
get_language(), slug=self.kwargs['category']
).get()
return self._category
def get(self, *args, **kwargs):
# submit object to cms toolbar to get correct language switcher behavior
if hasattr(self.request, 'toolbar'):
self.request.toolbar.set_object(self.category)
return super(CategoryEntriesView, self).get(*args, **kwargs)
def get_queryset(self):
qs = super(CategoryEntriesView, self).get_queryset()
if 'category' in self.kwargs:
qs = qs.filter(categories=self.category.pk)
return self.optimize(qs)
def get_context_data(self, **kwargs):
kwargs['category'] = self.category
context = super(CategoryEntriesView, self).get_context_data(**kwargs)
return context
| [
"root@barneysfarm.com.ua"
] | root@barneysfarm.com.ua |
4b4271a8ba071eebed2374c64d25ea8b0cbc123c | 127882af5e2e0af03d7a917e534c00cf844b2529 | /size.py | 3770a3ec44094606df7f6d00eaf8c2907790624b | [] | no_license | aspencerpsu/PythonCookBook-Revamped | c496562a0796238af698fe54ffa7761cae0e334b | 444f978aba91607c250da20e82a99bdfd846a7f6 | refs/heads/master | 2020-06-10T22:36:11.196928 | 2017-03-14T16:43:46 | 2017-03-14T16:43:46 | 75,855,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from subprocess import Popen, PIPE
def command(directory):
return ["du", "-sh", directory]
def check_size(directory):
proc = Popen(command(directory), stdout=PIPE, stderr=PIPE, universal_newlines=True)
result = ""
exit_code = proc.wait()
if exit_code != 0:
for line in proc.stderr:
result = result + line
else:
for line in proc.stdout:
result = result + line
return result
def main():
arg = input("directory to check: ")
result = check_size(directory=arg)
print(result)
if __name__ == "__main__":
main()
| [
"akeem@schwartzenegger.aint.on.no.joke.sh1t"
] | akeem@schwartzenegger.aint.on.no.joke.sh1t |
9835eecd936d4de08bae53466c78dbc12ef8eb9d | 0daa9cddd8a591fcc1b077bb7d5d6a4f5b801453 | /Live Video Feed/cam.py | fe77898088cb1e62368f4c92ce5181319029f70f | [
"MIT"
] | permissive | yuvaraj-06/EM_BOT.io | 5fbfc116d584d2c7ce1bd3024542833404ad2665 | b83fe9c223b99be49c1b7c0c575c04c54a9c429d | refs/heads/master | 2022-08-29T17:09:44.199304 | 2020-04-11T12:08:54 | 2020-04-11T12:08:54 | 244,861,061 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 23,181 | py | # Web streaming example
# Source code from the official PiCamera package
# http://picamera.readthedocs.io/en/latest/recipes2.html#web-streaming
import io
import picamera
import logging
import socketserver
from threading import Condition
from http import server
import os
import RPi.GPIO as GPIO
PAGE="""\
<html>
<head>
<title>Raspberry Pi - Surveillance Camera</title>
</head>
<body>
<center><h1>Raspberry Pi - Surveillance Camera</h1></center>
<img src="stream.mjpg" width="640" height="480">
<br><br>
<br>
<button style="height: 75px; width: 120px" ><a href="/forward"><img style="height: 65px "src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRqr4Fvs3wGNhXjMQ8cV_z9AjEOGU-a8zSNKiWNssZd0EXseribaw&s"></a></button>
<img hspace="900" vspace="60" style="padding-left: 5px">
<br>
<button style="height: 75px; width: 120px" <a href="/left"><img style="height: 65px"src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQqhLLAg-aZAGnWAifSuTROl4lA6GwlSaeg7F6xvGIjdAJqhjNG2Q&s"></button>
<img hspace="800" vspace="60" style="padding-left: 5px">
<button style="height: 75px; width: 120px" <a href="/stop"><img style="height: 65px"src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxATEBUTExIVFRQXFxYXFxcXFQ8XGhUaHRUYFhUYHxUYHSggGBslGxcVITEhJSkrLjIuFx8zOjMtNygtLisBCgoKDQ0NDw8NDysZFRkrLSsrKystKysrKysrKysrKy0tKysrKysrLSsrKysrKysrKysrKysrKysrKysrKysrK//AABEIAOEA4QMBIgACEQEDEQH/xAAcAAEAAwEBAQEBAAAAAAAAAAAAAQcIBgUCAwT/xABPEAABAgIHBAYDCwgJBAMAAAABAAIDMQQGESFBYXEHElGxBRMigbPxUpGhFCMkJTJCU4OTwdEWNURUYmNz0hUXMzRDZHKCo5Ki4/DCw+H/xAAVAQEBAAAAAAAAAAAAAAAAAAAAAf/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/ALvS3gh4KMh5IJJwCE4YqJXCaS1QSTZqhNiiWqSvM0E22TXFV9ruKI0wYJDqSRfIiCCJni6yTe83WA/Nfq7CigwYJDqSRebiIIOJ4vskO83WA06xsSLEsG9EiPdm5z3E+0koIpEd73F73Oc4m0ucSSdSV+aueplQYMCFv0ljIsZwvDg1zYYnugG4ni71XT6MVcoJ/RIFn8KFf7ERnVFor8naCf0SBZ/Chfghq7QTcKJA+xhfgis6otFGrtBkKJA+xhXexDV2gyFEgfZQvwQZ0tUrRRq7QR+iQLf4UL8E/Jygj9EgE/woX4IM6otFfk5QROiQPsYX4J+TlBmaJA+yhfggzqpY4g2gkESIJBHetEirlBmaJA+yhfgvKrHUeiUmCWw4cOBEF7HsY1t/BwaO00ojw9nle+s3aNSne+XCHEJ/tODXH0+Bx1nYoNuizX0n0fFo8V0KK0te2Y4jBwOLTgVZmz2vXWbtFpT+3KHFP+JwY4+lwOMpzKsgG3RLeCg8AmQ8kEk4BCcAolcElqgkn1qbV8yzKkCyc0EqVClB8k4BRK4TUk8JqJaoEtUlqktUleZoErzNcPX+u4ooMCAQaSRebiIAOPAv4DvOAPzX+u4owMCAQaQR2nXEQAebzgMJnAGoYMKJFiBrQ6JEe64Xuc9xPEzONpQIbIkWIAA6JEe64Xuc9xPtJ4q6ah1LZQ2dbFsdSHC8zEMH5rfvOOi+qiVLZQ2dZEsdSHC8zEMei37zjoutnpzQJ6c0nok9EyCBkEyHkmQ8klcECVwSWqS1SWqBLMpK8zSV5mmZQMz5JmUzKTvMkCd5kk9OaT05pPTmg8Ct9V4VPhWGxsRoPVxLLwfRPFhxHqVF9KdHRaPFdBit3XtmMCMHA4g4FaTncF4Nb6rwadC3DY2K23q4ll7TwPFpxHfNBymzyve/u0WlO7fyYUU/O4McfS4HHG+dkyuCzb0r0bFo8V0GM3de2fAjBwOLTxVkbO69727RaU7tXCFFcflYBjz6XB2MjfMLKlqksyksykrzNAleZqQMSozKkDEoJUqLVKD5Js1US1Uk2KJXmaBK8zXCbQK8CjAwIBBpBFjnXEQAeb8sJnAH52gV4FHto9HcDHNznCwiCDhnE5exVLR4ESLEDGNc+I91wFpc5xvJt9ZJOZQRAgxIsQNaHRIj3XC8uc43m8zMyScyrsqJUxlDZ1kSx1IcO07Bg9BuXE46Kai1NZQmb77HUhw7TsGD0G5cTjpcurnpzQJ6c0nok9EyCBkEyHkmQ8klcECVwSWqS1SWqBLVJXmaSvM0zKBmfJMymZSd5kgTvMknpzSenNJ6c0CenNMgmQTIeSBkPJJXBJXBJaoPCrdVeDTYO47sxW2mHEsvaeB4tOI+9UV0v0ZFo0V0GM3de31OGDgcWnitJSzK8KttWINNg7r+zEFvVxAL2Hhm04j70HIbO6927tGpTu1c2FFcflYBjiceDsZG+dmZlZu6X6LjUaK6DGZuvb3hwwcDi08fvVibPK9W7tFpb77mworjPAMeTjwdjI3zCzcypF96iy28yUi/Tmg+rUREHybr1wG0GvHUb1Ho7gY5ue8XiCOA4xOSjaHXkQN6j0Z1saT3iUHIcYnLVVRRKLEjRGw4bS+I82AC8uJvJJPeSTmUEUWjxI0QMY1z4jzcBeXE3k2nvJJzKu6o1TodCZvvsdSHDtOwaPQblxOPqCmo9T4dCh7zrHx3DtvwaJ7jbZNzx9QHUT05oE9OaT0SeiZBAyCZDyTIeSSuCBK4JLVJapLMoEsykrzNJXmaZlAzPkmZTMpO8yQJ3mST05pPTmk9OaBPTmk7gk7gmQ8kDIeSSuCSuCS1QJapLMpLMpK8zQJXmaZlMymZQeHWyrMGnQd2J2XttMOIBew/e03Wj771RXTHRUajRXQYzd1w9Thg4HFp/wDb1pGd5kvErVVqDToO4/svbaYcQC9h+9pxGORsKDi9nlevk0Wlv4CFFcZ8GPPJ3cVaE9Fm7pnoqNRozoMZtjh3hwwc04tP/t6sDZ5XuzdotKfdc2FFce4MceTu48UFpoosUoMwRHlxLibSSSScSTaT61d2zqrUGj0Zkf5UWMxr3PIk1wDgwcBK3ie4CkCtFVWvoNG4dRC7/e2oj056c0nok9EyCKZBMh5JkPJJXBAlcElqktUlqgS1SV5mkrzNMz5IGZTMpmUneZIE7zJJ6c0npzSenNAnpzSdwTIJkPJAyHkkrgkrgktUCWqSzKSzKSvM0CV5mmZTMoOJQMyk7zJJ3mST05oE9OaT05pPTmk7hJB4ta6uQabB3Hjdc20siAXsP3tOI+8ArPhGC04+RA4epZkdM6oPQ/p6l/TxP+pyleaiqBWiqr30GjcOoheG1Z1K0VVc20GjAfQQvDaor08gmQ8kyHkkrgg8ysnTkKhUd0Z99lzWib3GTbfWbeAJVL9KV36QjOJ690MYMhEsA7x2j3lddtriENorLbiYziOJAhgH1Od61zGznoWBSqW5kdpcxsNz90Oc3eO81otLSDZ2jig8n8oqd+t0j7aN+KllZKeDaKXSLf4sU+wlXD/V70UJ0b/lpP8AOvOrFULo5lDjxGQSx7Ib3tIiRjYWtLhc5xBFyI82oNf4kSK2j0shznXQ4tgB3sGuAuvwIxuxtX9+1unxoUCA6FFfDJiOB3HuaSNwm8tN6qCFFLXNeLi0hwPAg2g+sLRXTXQVGpbWtpEPfa07zRvRG2Gyy21pGCKob8oqd+t0j7aN+KflFTv1ukfbRvxVxDZ70Uf0a7+LSb/+9VPXjo+FAp8aFBbuw27m6LXOsthscb3Ek3koj+X8oqd+t0j7aN+Kh1Y6dYfhdI+2jfiu32a1WodKor4keFvuEVzQ7fjN7IYwgWNcBMldY7Z70VL3N/y0n+dB0NAcTCh8dxtp/wBo9q4yuu0FlFcYFHDYkYXPcbSyGeF3ynZSHsXuVx6V9x0CI+Hc4NEOHjY53Zab52C//aqFo0B8WI1jAXRHuDQLb3OcbLyczeSivWpdb+kYhtdSooyY7qx6mWL+rouvfSMFwPXmKMWxe2D/ALvlDuK7/ofZlQocMe6N6PEM+3EY0Hg0MINmZJ7pLx66bOYcKC6PRC4bg3nQiS61omWuPatAvsJNtnrDsKn1tgU5hIG5GaBvwybSM2n5zbcfWuhzKzh0F0q+i0iHHZNhtI9Jvz294t9i0bBiBzQ8G1pAI0ItBQVJtKrPS2010CHFfChwwy5ji0uJaHElwv8AnWWSuXJflFTv1ukfbRvxXqbSz8aR/q/CYui2aVVoVLor4keFvuEZzAd+M24Q4ZAsa4Cbj60RxTayU8fpdI+2in2Er3ehto9OhECK4R4eIeA11mT2ieoKsN2zvosyo5GYi0n73qu6/wBTm0IsiQnudBe4t7Vm8x1hcG2iy0EA2f6UFu9BdMwaXBEWCeybiDc5jsWkYGXrC9DIeSprZH0i5lNMG3sRmG0ftMG80+rfHerlyCKh8rBwWZHTOq02+4EYrMjpnVBCIiqBWiqrn4DRgPoIXd72FnUrRVVz8BowH0ELw2qK9OVwSWqS1SWqCr9tgvon1/8A9K83Y6fh0T+A7xIa9zbPQXmDR40wxz2Oy3w0tOlrLNXBcHVGsLqDSDGEMRAWFhaTu3Eg2h1hsNrRgg0FmfJfhT6I2NCiQ3khr2OYbLLQHAtJFuN6rf8Arb/yf/P/AONS3a2Cb6GbMow/kvQemNlVB+kpFn+uF/Iu7npzXh1ZrVRqcD1RLXNFrob7A8Z3EgtzB1sXuT05oE9OaonaX+dKR9V4LFe07gqJ2l/nSkfVeCxB3ex0/AYgH07vDhru5XBcJsdPwGIB9O7w4a7uWqDhNsZsoEMcY7Lc/e4i4LZs0f0pR7rb4h7+pfYu92xj4DD4+6G+FFXCbND8aUf6zwXoi9pXmaiI0WHe4HyU5lQ6RJRWYyFoqq19BopP6vB8Nt6zq7FaJqqLaBReHueD3+9tQU7tLPxpH+r8Ji7rY2LaDE4e6H+FCXC7Sz8aR/q/CYv6anV59wwHQeo6zeiGJvdZuWWtY2yzdPozzRF2zuElXu2eksFFgQre0Y2+B+y2G9pPre1efG2tu3bGUQA4ExSQO4MFvrXBdNdMRqVFMWM7edIWXNaMGgYBFe5swhE9JwrPmiI45Dq3N5uCvOVwmq92T1ddBhupURtj4rQ2GDMQ7bS48N42dzRxVhSzKCHXA8VmR0zqtNuuBJmsyOmdUEIiKogrRdV7qDRuPUQvDas6laKqvdQaNx6iF4YUV6ctUleZpK8zTMoPxptEhxYbmRWhzHCxzTKz8c1WPSuyiJvE0eOzcMmxd4FuW+0He9QVgdNVgotE3PdETc397cG7Edbu2b3yQbPlN9a/LoatFCpcQsgxd9zWlxG5FbcCBba5oEyEFbDZVTvpaP8A9Ub+RfydJ7OKbBgvjF0FzWNLnBrom9ugWuIDmgGwXzV2z05ry60m2g0rh1Ebw3IKE6C6SdR6TDjNNm44E5ttse3QttC0eeAWYTJaeyHkgZDyVE7Sx8aUj6rwWK9pXBUTtLHxpSPqvBYg7vY4fgMTj17vDhru5ZlcJscNlBicevd4cNd3K8zQcJtjHwGHx90N8KKuE2aH40o/1vgvXd7Yx8Bhn/MN7ve4q4PZofjSj2/vPBiIi98yodeLTJTO8yUOvB4c0VmNy0TVW+gUXh7ng+G1Z2K0TVb+40Uf5eD4TURTu0v86R/q/CYvirNS6TTYTosJ8JrWvLDvmIDaGtdg03WOC+9pf50j/V+Exd1sbPwGIB+sP8KEg5pmyqm22GLRxxsdGNnduLqKu7NaNAcHxne6IgvDS3dhg8dy073ebMl3MrhNJZlFJapK8zSV5mmZQQ6RJ4LMjpnVabdIk8FmR0zqghERVArRVVrqDRj+4heGFnUrRVVv7jRifoIXhtUV6eZ8kzKZlJ3mSCsNtYJ9yGy738evqrOR9S8HZb0jAg01xjRGw2uhOaHPIa23eYbC43C4GfBWjXCr4p1GMK0NcDvw3HB4BF/7JBIOtuCozpToak0dxbGhPYRjYS05h4ucNCgvs1loB/TKNZ/Hg/zLy601koJoVIa2lQHF0KI0NbEhuc4uYQ0BrTabyqKsUtYSbACTwAJ9iCGtJsAvJuGpuC07K4Kotn9SIz4zKRSIZhw2EPY14Ic9wvaS03hoN987BgrdlqgS1VE7S/zpSPqvBYr2lmVRO0sH+lKR9V4LEHd7HLqDEP793hw13eZXCbHLqDEJ+nd4cNd3mUHObQeinUno+K1ote2yIwcS28jUt3h3qjejKc+DGhxoZ7THBzeBswORFoORWlRfeZKsq7bO3Pe6kUMDtEl8G0C04uYTdf6Jsy4IOn6Gr1QKQwF0ZkF1naZFc1hByc6wOGnsXjV32gUdsF8GixBEivBbvsvZDBuJ3pOdZKy3PgappdBjQjuxIT2Hg5jm8wv6ei+gaXSHAQYER+e6Q0avdY0etEfzdG0B8eMyDDHbe4NGVszoBaTkFpGjQQxjYbPksa1oyAFg9gXKVGqU2hDrIhD6Q4WFwt3YYxa22ZOLuWPX5BFUTtLHxpH+r8Ji7vY2fgMTj7of4UJcJtLHxpH+r8Ji7vY2bKDE4+6H+FCQd3LVJXmaSvM0zKBmUzPkmZ8kneZIIdeCSsyOmdVpt14PDmsyOmdUEIiKoFaKqsPgNGJ+gheG1UD0z0ZFo0d8GK2xzT3OHzXDiCFY2zaugcGUOkEAixsF5sAcJNhuzEgcZTnBZc7zJJ6c0npzSenNFJ6Ib7hJMgmQ8kCzAeSWWXCaSuCS1QJapLMpLMpK8zQJXmaWYlMymZQAMSk7zJJ3mST05oE9OaT05pPTmk7hJAN92CZDyTIeSZBAyCSuE0lcJpLMoFgGZQCzVJXmaZlAzKZnyTM+STvMkCd5kk9OaT05pPTmgh94PDmsyOmdVc+0KuoorTAgEGkOF5uIgg4n9oiQ7zgDTlFo74j2w4bS97jY1omSg/JFYH9VVK+lh/8AciqO/rjVaFToVhsbFbb1cSz5J4Hi04jvVE9JUCLAiuhRWlr2m8ciDiDMFaVIt0XN10qpDp0K6xsZgPVxP/g7iw+yYxtiuf2dV367dotJd75KHEP+KMGE+nnjrOwzwCzTTaJFgxXQ4jSyIw2EYgzBBHcQQrY2eV368No1IdZHAsY8/wCKBgf3nOfFB32Q8klcElcElqgS1SWZSWZSV5mgSvM0zKZlMygZlJ3mSTvMknpzQJ6c0npzSenNJ3CSBO4STIeSZDyTIIGQSVwmkrhNJZlAlmUleZpK8zTMoGZTM+SZnySd5kgTvMknpzSenNJ6c0CenNcfX+ubaIzqoRBpDhdiIQPziOPAd8p/pXyuLKFD6uHY6kOHZbMMHpuy4DHS1Uo98SNEtO9EiRHZlz3E+0koDWxI0Swb0SJEdmXPcT7SSrqqHU5tCZvxLHUlw7RmIY9Bp5nHRflUGpjaI0RYoDqS4ZEQQfmg4u4nuF0+zlcglSoUoPki3RRO4KTwUZDyQcxXiqMOmw7WWNpDB2H4ET3HcQcDge8Gj6TAiQYhY8OhxGOsIk5rheDaO4gjIhaXlcFydfKnMpkPfh2NpLR2XYRB6Djw4HDS1B/Hs9rsKS0UeOQKQB2XXARgMcngTGMxiB3Esys0RGRIUQgh0OIx2bXMcDxwIOKuTZ/XRtLb1UYgUloyAigfOAwdxHeLpB2crzNMymZTMoGZSd5kk7zJJ6c0CenNJ6c0npzSdwkgTuEkyHkmQ8kyCBkElcJpK4TSWZQJZlJXmaSvM0zKBmUzPkmZ8kneZIE7zJJ6c0npzSenNAnpzXMV4rfDoUPdbY6O4dhmAHpu/ZHDE95H610rXDoULB0ZwPVs4/tHg0e2QyounUyLHiuiRHF8R5tJxJkABwwACCKRHiRohe8uiRHuvM3OcbgLB3AAZAK39n1ShRQI8YA0lwuFxEEESzecT3DEn8tnlSfc4FIpDQY5FrGG/qQcT+8I9UuK72VwmgSuE1Iu1USzKkXaoJUqFKD5JwCiVwUk4BRLVAlqksyksykrzNBx9fqmNpbOth2NpLRdgIo9A58HdxulTAMSFEt7UOJDdm1zHA+wgrS+ZXE7QalClNMeCAKQ0Xi4CMBIH9vge44EB/RUOuTKYzcikNpDBeJCIB89o5jDRddO8yWaIMWJCiBzS6HEY60G8OY4HgZGYIOiu2o1b2U6HuPsbSGDtsweJb7cuIwJ0JDqp6c0npzSenNJ3CSBO4STIeSZDyTIIGQSVwmkrhNJZlAlmUleZpK8zTMoGZTM+SZnySd5kgTvMknpzSenNJ6c0CenNeDXCtEKhQd42OiOtEOHbYXHieDRie6a/StlZYNCg77r3m0Q4YNhefuaLrXYakBUP0v0pFpMZ0aM7ee7uDRg0DBo4feg+ek+kItIiuixXFz3G88OAAwAkArR2dVI6ndpNIb78RbDhkf2Q9Jw9PgMNZfhs5qRubtKpLe3OFDcPkcIjh6XAYTN8rKlcJoErhNJZlJZlJaoEtVIGJUSvKkDEoJUoiD5J9aiWq+ioAsvxQRK8zTMqQMSgGJQRmUneZKbLZpZbpzQcHtCqT7pBpNHbZHA7TRd1wHJ4EjjI4WVJRKTEgxWxIbiyIw2gyLSLiLD3gg5haXN+ir7aLUjr96k0ZvvwviMH+KOIHpj26zD26l1th06FYbGR2AdYzjhvt4tPslwJ6TfEgR+CzH7E3jx9qDTm8JAhN4CRvWZN48fam8eJ9aDTe8BjaU3gLyb1mTePH2qN48fag05vCZITeEyQsx7x4+1N48fag05vA3khN4HG7msybx4n1qN48fag05vA43c15NZqxQaHBMR5tMmMBG9Ed6I4DicFnrePH2qCUH93TnS8alRnRozrXG4AfJY3BrRgB/+rv8AZzUf5NKpLeDoUNwlwiOHJvfwX5bOqj7+7SqS3s3GFDcPlYiI4ejwGM5WW2tKSCJXCaSzKmyzMoBZqgiWqSvKkDEoBiUEZnyUi+8pZbeUnogm1SiIIRSiCEKlEAoiICgKUQUdXr+/xv8AUvBRFUEREQREQEREBERAX9XRX9vD/wBbeaIitCBFKKKhFKIIUoiCCpREEIiIP//Z"></button>
<button style="height: 75px; width: 120px" <a href="/right"><img style="height: 65px"src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOcAAADaCAMAAABqzqVhAAAAeFBMVEX///8AAABNTU1CQkI8PDyGhob39/f09PRISEj19fX5+fnW1tYiIiKLi4vQ0NCXl5fCwsIXFxeQkJAPDw8cHBxVVVUmJiaamppiYmJpaWkjIyPLy8sTExOAgIApKSkZGRk0NDTh4eHd3d13d3fq6upTU1NoaGhxcXH6kISsAAAEgUlEQVR4nO3d63LaMBAFYJMLhFwITZqSW2nI9f3fsAYyBNmSLC8L1jnrM/3HjOtvdmUcI8lF0VWmp53914fMdDD42/U5HCAl0wJ0xeSHfjPZoRsmN3SLORjwXnUdJi+0wmRt3RqTE+phMraul8kHDTDZoC7zhRXqMi+KOSe0yizO/zBCa0xOqIdZQula18vkgwaYJfSRCRpkckEjTKbWfYoxea66DUyW1m1kckATmAytm8TEh07SmOitm8wsoZe40It0JnLrtmLiQl/bMVFbtzWzCr3a9xmqZNGeiXjVFTHxoO8yJlrrjqRMLOjoVswsiiEMdPhvBybQGJ3vxIRp3ZPts3ySHAGjdY+2T/JMdAgMqFPQE9EhMFr3ZXcoREXdvzyIocNbBehvAKh7Q0RcUfcG91h0DEAoceuqVBQPSjxGF1bG6KsGFKF19aE3ymeoFDMVveih6YFo3R7aIkZbN1PoRAPqPETMtHU1oGOEMaoCtVJRiKtuP0ZbxEzrjn/BQYVP6vEqKoMCtm4PjWZkZYwCVvRIdAzAq64MCti6ZqAfomOYgUKMUXc2+Z3oGKNnAOhEA4pQUfeZ0b3oGK0qetRN7uYHhg7yiAb0GsA5+NwztCtXLV8K0EjrdsWq522v0K5Unsj2OXEn+wZbtyuUL7K5tmljtCuTN7Iv+6SKdkXyJ/rVsBN08/Hl2bFqzrazvDX4uCtzX/4r8/lQ5uvr7e3vMqdlrq5ubmaz2XBf0M2nmf7WlpbmMbr5MOMJ9wlprCiJsxHK4myC0jgboDzOOJTIGYUyOWNQKmcEyuUMQ8mcQSibMwSlcwagfE4/lNDphTI6fVBKZwU6K1iddSipsxjNXWjvhI6RvjVyHaozKZ3uqukVk9HpqSaj08vkc/qZdM4Ak80ZYpI5g0wuZ5hJ5TTynDpSTSZnlMnjNPK7oJHfedN/t7cyD+Py+EQjobk1q0m3PxNr7n0Ta64PMa8mixxgnlQOMTLvzcg8RiPzUjXmGYfHd1eqWoTzxtOqmY/zQcZEWwew93UdPOt0ZN+9e47+uqssmRMbTP11kVkyVVZFojFp1y3rMwGqqbHcnpYJsAa9ZyYH7hIk3K4m/11cVDarsVFNgD15jOyx1FczOUO0atLuO2myaWn3nDSyh6iRnbg1mABNq8K0Uc1+bOYSIy/s0GACjE0j1VzYYBp54ZU+00g1eZmX2TNH6swsx+b4VoGZf9O67zmlZbrvraVtWvc9xLxMjfdK5397oPKecARmUTjvpxe89x2haZcZP+8ExXhH+DLuXUJLKEo1l3mXQ5GYO0BxmnadhQx6/ojFFELRqrnMa3soIlMAxWRWn1E3Qt2/cnCYLaF4l6CfTNKhqE27TjIUm1mdIR6EumMTccltEhSfmQRFb9p1GqEczEYoQ9OuM41BeZhRqHt7gM2MQLmYQej5nIsZgPIxvVCmS9BPalBOZh1K2LTruNAXVmYVSssMQtmYASgf0wtlZHqgnMwalJVZgfIyHSgzcwvKzdxA2ZnfUH7mCirbDwwt04NW8z/kFFuW/TSJUgAAAABJRU5ErkJggg=="></button>
<br>
<button style="height: 75px; width: 120px" <a href="/back"><img style="height: 65px"src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAb1BMVEX////z8/MAAAD09PT+/v79/f319fX8/Pz39/f4+Pj29vb6+vr7+/v5+fkEBAQ6Ojo/Pz9TU1MuLi7Ly8vQ0NCrq6stLS0mJibt7e3k5OQdHR01NTWkpKQfHx/CwsLo6OhKSkpXV1fY2NgXFxeysrKYY5G/AAASkElEQVR4nM1df2ObOA8mCb8ha9fs1rS93u629/t/xjdgy5ZlG2wJ0uSPjaQ2lkDikaXHpijUpyy9gyJ2sH/bbU83f5pRfS2bxhyov1SjOahibb0mobZFpC00yWmbI6b6DJ36uTyf1c/VedDtulEdjF3DaHtebQunK6CJadtAW97QcN750/Xq57Lt1c9V36pTjP1Zj98PXls9St8VtO0IbbVEcLoG2g72dLqtN3SorTe0L+ZI2s5n7U7q57I+qVGaU616DodWdzjoU/Qn3fN00oIcen1yaNsetES1bltB29G27dy2ZugKhh4DQzPEnG120HrfhF7veaBtD3XhtrUKHnIUjLXtoK0/9IkO7Ys5G2+jLddexgNDwQrMY+zBr3Tnou3gQJ+l6cFXtLUVHW1btmCZnoK1HTouplap70q4j5PQB0/BnvSsrYJgokrB35fL5cf8CR38cA8SmuiD36Fra8UsIgqC8ZTKFjVq4LuSfwd/Hnf5/CniJrouprF40jPBuD0Fiyct0rdv3+DgGDn4FjiINflZSXwQ2qpv9bqJLjxkmuddFDz+LMjQATGjPugqGH8yrvvg7clYPO+h4Lfjy+q1XfBBx0RTACY6yoSDz3soCBqGTDRZzGr6r2zFOPi8h4JawzwFiYlWQzUFeb3EB+dRnndRcNYwBBPJCjbtjPjn1efvkg/ObZ/R42GTjzrdSx5M1BQmmnr6S6nnISyYgLbP+ia8/vX333+pj3cQ/wtqMh/871Wf7kUGE41qohGfFaqZtjcN50v/BIFZAdOa4qyjuQJmMw0EcWO07ZM+3UtBQ7UsEzVwrXvGLs2qiRaFRoubhuAr8WC7N8E29SvTtvjQp/tZcUI1KiZ848HE/Gkbo2G3pmBgNkEV7CqjYeENnSEmXK/5m8QHb0ID4h+fxmQFD1EFD/oeAh7KIsrSGSUfJrTQGvGVhqVV0NztuImG5oMf+nQvpC3DB2fEr3o+TOi78qwhbdJQ5oPz0B9Hi/isUM2Y6DhME8RzK57RP2vMvmm4xYz+wyK+zESHdhqoGfLvPU1ZAB7eNMzywUjK4sMgftaM3n8W1tOIJWQUlnyQmKiXkzF4eK7FJqo1VHiYEKrFYcIMHb00aT6oNAQ8pEJnwQS0/TCIzwnVvOTfuoKrWbXSalikKxjNqlnELyU+SBRkwsT8qS3iD1poaicoxbgEE2pog/h6BsyCCapgjnH7eVGC+CIfvLUliM+CCSNmqUbhhGo48esgvswHp6EdxGeFakbMZvpD04sz2xjxJTChh/7Qp5s0XPTBVRMdJiisurNUQYz4TBN10oYf+nQvTB80d7CbEX8cSc/UUM0qaBG/aZZM1Cu+RJ5vFg8TfHABJtp6jpOb8ChZxRc7Az5tUXxBeMiACf8+yBU0Oe8nuQ8qDTValBKYiCjIMNHbDAXw8ONAFMyDCT10SWbAQjSLjJKj4MnGNKOrYC5MqKFLgvgJxZeFOm1UwZwCKEF8aQGUIL4wolQSCRSc77aD+LxQDRdfHMSX+eBcHTVlfpYPzm0x4st8cBYaIz4rVDNizonKqhtCo+QoiBE/cAeza/QI8Vmhmhm6nZO0QLWRkBAs4g8SmIChLeLLTLSu58pTJVfQIn59YodqKPFrEZ8Tqvlwrb9xfXD6GMS/nt22GaEaqtranDdjRh9RUMaTMTPgj5OnIIMnQ3Pe/FmdTEGU+CV4yPNBW3wheMj0wZiC+SY6Ce3gIRMmUPHFwUNu4iHIa8uHCS00VLlnDXmhGs6q4Zz3YkS56oPloHht+feemB1UuScNN+CqoZy3LKIkvDaeD85tocp90zCx+OIKTczZ5rxlPljVLq+N6YOgoZpbtCwfJEJbxM+a0fti9hFe2+ql8czOIP7hxA7VUNIJIb4AJmK8tiwThTKrmR9ejdnlw4ThySDEl5goW8FA+cwifgsKsmBCpyw8xOfAhKcg3wcRr03jIR8m1LWliM8J1Ta9gzeh3Sq3mNLsIr4ooiS8tnyY0BNeB/EFMKGHdhCf44NGTOC1iUx0EhohPssHidAY8XNm9D6kaF6bWEGM+EIfnIdGiJ8zo/fFdHltkmUFBvGHPgMmoiQEi/gimBgdXpts3QRC/PQZfZwngxA/f0bvi8nvaZkAPq8tP1RDVVuTEa44odqygryVL+u8tqyVL7TKveUdzIYJ1XaV15a38iXKa+NElPPXSmSiPeW15c7oqYJnwmsTBVyN4rUxjNtN/Lq8Nj5MOLw2ZaUyH9S8Nljnx1ZwmdeWv/oM5bxlEaXitY3DWs9FHyS8tpIdqlkFER5KEg+U18b2Qa2hepaeRTCBeG36WSoyUVNT4yuYwGvL9kGlIaAFJ1Tz4doZhbUCtKRV7jQfjCno89pEcM3vabNqlcdrW1VwiSdDEV8YcLEVRHnRGK+N44MTTY7y2iQmWqqfZQrGeG3sRcqU1yZQ0OW1MWBCZ9WCvDb+ImWH1yaKKF1eG88HZ2wL8doEi5QR4svuYNfOVe4xvyetTQR4bSyYCPDaRBFlPTWBKrdEwQCvTbRRAJoBi2Z1wCIL98xZ+YJ4bUbo7FDNZtUQHkp8EMTMvzR+damhiM+ECS20x2sTRZT5PQM8mZJUuVmRTJzXJosowz39qfLi6jNS5WYqaIaO8dpYCpbqZ4mJTn7l5LzjPJk0SnPpIr7IREvNa2M9RXFWDVe5WTDhFl8w4otMtJwZbRVsQCTYTwblvGX7yXi8NpmC8+4thteWEaot8NrO0VAtg3Uf4LXxIkqH18b2Qa2hwsPTKbH4UoRgAob2eW0iNEvuGefJ+Lw2zn4yC7w2UcDF6LnAa6tjCuasfPGq3BsquICDS1QuymsT7SfTeFXuu5tooLpEeW2MUA1n1dwqN09BPTTw2tgwEeK1CfeyqEmVW2SiFeW1pc3o/VEw4sd5MsmUZlzllkWUitd2huiE5YNzW4fXljuj94svqMotm/Q4vDbRXhaY18YN1QqbVbOIzzRRUAnz2mQbBaAqt9QHtYaA+BIfDLOiWIuUEa9Nm51oLwuE+ILEgxFz7dKkUJptlbuNCJ218iXKa2OJudYzMKP3y2eU1ybaT+Yc4LUJIsrSuYxZoRrOi9LdW7gwUYR4bbyIUg+teG29yEQpr4237ZiT+HUQX2SijbN7S/KMviB30OW1JfngMp0yxGvLCdXsfXB4bSyY6Dxemwgmzh6vTQQTLq8tP1SzCiJem2g/GZ/Xllcj8gxN8dqKyKXJWfkS4rW5QmetfPF4bTI0i/TMWfmC6vhG6DWYWODJoNXq3ORfgaqi6QrGaSQW8ZUawt3RvN1bJCEzKJgeA4UKoJTXJtpPpg3w2jJCNapgqUaR+OB0VxzEl+0nM90VB/FlPkh5bTmhGs6L4ip33j6vwQJokNeWZ6L6dC6vTbCXBapys0M1NDTevYUTqtm2Dq9NsJ+Mw2uT7CejhbZ4KJz0OLu35IVqJHVv8LCL38GMlS8+r00QUcJHtJeFv3uLbD8Zi4eSxMOqghnLCrzdW4T7ydg6fkaolqggb5Ey5bUJd0ejVW5ZRBm7NFkrXwjiy/ay8HhtLJgwQ5dKIh8H09dNTG0dxJftJzPdbQfxF3gyCS82GRWvjfIzAECKASg3A5Q54C+QBVNtMeILN+ugvLZigBJSeOigmGbos+K1DUjBt3+/53/++2MRn7ftmJsXBcT/9l++LP++oaHbEK/tnyPrA1Vu+Q6FhUV8zucfPHSY12ZUXNoOP7Iv/lN/EptoYRGfsSX/JXBtkYlOgtTjha0g5rUJ9rIweLilgk4MdOEq6PPaWIuUDR7uo+A0yoWnIOK1iXZHAzzcQUFTfNGGmjuK4bUJfBDx2lgKehGlkojGQOM/5JyJn4+xWPbBNMbvx/pIWj5PQWo8+q1k3oy+Bl9Up3h9evlQnxd68IIPfn4f2aEaxszvP/F57Ygv7tBPr6t3UO/eYqgheJQLuoOvv/uubduuu15P80F7vapf6uu1nQ8O18Pt/05oovqBdO5Ot/O29rzz0AcYulZDH36/rt1BzWuriInqMP1ijeD4elUSAQkHduIvgdY9wi5mom3H7BMXYHUAFlmri8sFlJHK89uqgu5bybwo1kD/dIr3a5H0ZBTuJ5NRfOne3lcVBBaZ7umNUl7QKW4qynkybAX96VK+gsFRLvhp86vNXPkSVDBnkfLCjL5LMFGiYHiqjKD/+PlL95S880VoolbBT66J0gkvDsM/r4V7VwTFFxlPJsNEFeK38XTVgKH/pqLMB6WrAI0PJt9BxWuzbyULpasw9L9ez5IavWjly6IPxqjllfNWsli6CkP/n7ciIrQ8VHOGXlQw2UT1W8n0q4/jeVEM/TMublF8SVQwCSbiqx/m00V5baZnaaH/OEc3WxRfchU0dMp0mDCPCtVzqfhSXIyCs4pbFF/YJpqsoEEzPcpyXvRytMD4+ab/Iiq+7A4TDVFwrTZhffGoffHBQzVjomWagi70zzON3WFCFKqZeETz2hKKL04G7nYXvyRUyzfRCK8t/PzFGbjPX+0jwMTqHRzVW8kory0GMIEw/K6hWj5MDHr3llUFAxk4Pevfa7q0DUyYmhpVMHppnAzc9ETdf0Yv8UG7OxpRcGmBJA7D36/NV8JE+jrONQXdGb2TgXuroO0DhWqrd3BhL4tYBu6RZvR+4sHltSUsUvYzcA8ZqhkF6VvJEozbycB9hjJwXzej931wcHhtqXtZkAzc/UO1BUMjQ7u8trgP0p4Y+v9ABu4RYYK+lWzVB01PHIa/kwzcF87o/cwKkDVWFPSzajYDd5ynxA8BE/HkX0zBxawaDsNfr91DzOhjiQf3W3LxBfviu8nAfVnxZSH5p79lmKjuacJwm4F7gBm9r6DLa8vZdqzyMnBbFV+2CNVah9dm3kqWt0jZycB9XotHCtWivLZVmHCzam4GztDFvz5Uo7y2JtZztfiCM3CfJAP3haEa4rXNeaiK9MzZdiyegbtf8SV+B42YkUuTUnxpcAbuz/UxQjUqZrxnUvEFQ/+7ycB9aagWYSb6xp1Ym8DQ//pLouC2oVrMRBkFUD8D9wihmhFzbhl/K1lK8cXLwD0CTJhHhea1iXgyPcnAjQ8QqlFeW0MuY3Z90M3AleQy7l18WRCzd99KJuDJ4Azcp5uBux9MBMSMvZUsASZIyiKUgftCHySPCtKTx5MpcAbu9kS9X/Fl3ZP0KGKejJOBe2s9H9yr+JKooGw/GT8D966g/wt9kJho0q5cq8UXLwN3v+LLwt2eZxdtdqgWjHgHHIbffPF+oVobFRPeSpbjg0vFF7cQ3t19Ru97En0r2eKMvkgovoQycPcP1ayY7lvJNqFT4gycgv47zuh9MecmwGvbhk7pZeC2L77kixkRmsmTcTJwtyfq3Wf0gYBr9dJk8mQiGbhNeTI5zGv1LWeH2NXEL87A6UL4/UM1I6bmtW3KF/UzcPeb0Xtizohf9RuvfHEycO/X/YsvcTFHh9e2IZ0Sh+Hvv9oFE/2zD0zA9hyK1zZsaaL6UbaUgdu6+LJATHbeSrYxnRJn4BT03y9U8yA42lPCkxliGbjdZ/SB3dHCPcU8GZyB+zQZuDuEaol3cANKs5uBK7SCOaHaem4sYXFApOcmK1/8pShYwfSn6HqotmCipZJIHKqFFPQzcHsVX0IK6tNRXpvAREMz+mgGbu9QzdwH961kOT1TaxMkA7dXqBZDM/etZBvBBLGTUAZu71DNiql2b2lIz40pzX4GbuPiSxwm7OZhpOe2dEovA7dx8SXhPrg9d6A0uxm4MQPot0k80EuzA6XZWYryQxBssx4VYQU3plPiDNwxWUEeTFAfLNXP+/ig2R2twRm4RKDn73fuiOny2nZbpHxyMnD3gAkQ030r2a6rzy55dzAhVAuJSU207eZbaHht24RqkeILCsO3Lb4sPewJr22zUE2djszonTB8nxm9Lyaqke6/8gUycPfzwXVe2zYKBjNw9wjVqILbhmpe0ungZOD4D5l8T/J6bhWqEQVrnIHbvvgSFzPKa9t25YvZz2DOwG1ffAmIqYcu6VvJtoWJ0L4+l7uEajaZ4PDatlFwbRPjw+bFlwVDC7+VbOuVLymbSe8DE1WU1yZQMAwTqQpyii8LJhrhtW2k4CHgg6tCbxOqhRWErTGaGtJwNawsrSGKrfUode+1hWxdPcLptIlC2xHanqFtG2hb6LYdtKVD9/7QvphkaNWz6zQjo9Xjj63uMOh96IpzV623baAJpA07YO6YtnA625acrvGHbpOHLjoqpvo2wl5RsDF2BZtoN3Aw6q2yy4G2bWxb3WTUFcmFtuZ0MHSZMDRLzMr+iw7KqvQOSJOctqEmZcbpctp6Ypb/BxelQwgk6+6VAAAAAElFTkSuQmCC"></button>
<img hspace="800" vspace="60" style="padding-left: 5px">
</body>
</html>
"""
red ="""\
<html>
<head>
<meta http-equiv="refresh" content="0;url='\index.html'"/>
</head>
</html>
"""
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT)
class StreamingOutput(object):
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
content = PAGE.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
elif self.path == '/forward':
os.system("/var/www/cgi-bin/forward.cgi")
content = red.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stop':
os.system("/var/www/cgi-bin/stop.cgi")
content = red.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/back':
os.system("/var/www/cgi-bin/reverse.cgi")
content = red.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/left':
os.system("/var/www/cgi-bin/left.cgi")
content = red.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/right':
os.system("/var/www/cgi-bin/right.cgi")
content = red.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
else:
self.send_error(6969)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
with picamera.PiCamera(resolution='320x240', framerate=100) as camera:
output = StreamingOutput()
#Uncomment the next line to change your Pi's Camera rotation (in degrees)
#camera.rotation = 90
camera.start_recording(output, format='mjpeg')
try:
address = ('', 8000)
server = StreamingServer(address, StreamingHandler)
server.serve_forever()
finally:
camera.stop_recording()
| [
"noreply@github.com"
] | yuvaraj-06.noreply@github.com |
a4b1a60545658a8d8f78c60948c9a730f08c530f | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_sqr/trend_constant/cycle_12/ar_/test_artificial_1024_sqr_constant_12__100.py | 2239a2b9360d2647f6e4ca61b30fe39991f9a2e9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 12, transform = "sqr", sigma = 0.0, exog_count = 100, ar_order = 0);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
23e530aff4d8b428e1040aedebaf067d2e74e9d6 | ab1416f6fd6655094298e6c7bab1ac47d2533342 | /samples/Python/filenames/DEPS | 6e5d1268bdd60e43376eea30b5d172bd8dad0369 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | monkslc/hyperpolyglot | 6ddc09e9d10d30bd8ce5c80a3bd755fa5714d621 | a55a3b58eaed09b4314ef93d78e50a80cfec36f4 | refs/heads/master | 2023-05-26T15:26:14.686247 | 2023-05-17T13:51:31 | 2023-05-17T13:51:31 | 248,387,967 | 43 | 11 | Apache-2.0 | 2023-05-17T13:51:35 | 2020-03-19T02:02:35 | RenderScript | UTF-8 | Python | false | false | 4,536 | gclient_gn_args_file = 'src/build/config/gclient_args.gni'
gclient_gn_args = [
'build_with_chromium',
'checkout_android',
'checkout_android_native_support',
'checkout_libaom',
'checkout_nacl',
'checkout_oculus_sdk'
]
vars = {
'chromium_version':
'f200986dfaabd6aad6a4b37dad7aae42fec349e9',
'node_version':
'229bd3245b2f54c12ea9ad0abcadbc209f8023dc',
'nan_version':
'960dd6c70fc9eb136efdf37b4bef18fadbc3436f',
'boto_version': 'f7574aa6cc2c819430c1f05e9a1a1a666ef8169b',
'pyyaml_version': '3.12',
'requests_version': 'e4d59bedfd3c7f4f254f4f5d036587bcd8152458',
'boto_git': 'https://github.com/boto',
'chromium_git': 'https://chromium.googlesource.com',
'electron_git': 'https://github.com/electron',
# FIXME: Once https://github.com/nodejs/nan/pull/857 lands this should point at nodejs/nan
'nodejs_git': 'https://github.com/marshallofsound',
'requests_git': 'https://github.com/kennethreitz',
'yaml_git': 'https://github.com/yaml',
# KEEP IN SYNC WITH utils.js FILE
'yarn_version': '1.15.2',
# To be able to build clean Chromium from sources.
'apply_patches': True,
# Python interface to Amazon Web Services. Is used for releases only.
'checkout_boto': False,
# To allow in-house builds to checkout those manually.
'checkout_chromium': True,
'checkout_node': True,
'checkout_nan': True,
# It's only needed to parse the native tests configurations.
'checkout_pyyaml': False,
# Python "requests" module is used for releases only.
'checkout_requests': False,
# To allow running hooks without parsing the DEPS tree
'process_deps': True,
# It is always needed for normal Electron builds,
# but might be impossible for custom in-house builds.
'download_external_binaries': True,
'checkout_nacl':
False,
'checkout_libaom':
True,
'checkout_oculus_sdk':
False,
'build_with_chromium':
True,
'checkout_android':
False,
'checkout_android_native_support':
False,
}
deps = {
'src': {
'url': (Var("chromium_git")) + '/chromium/src.git@' + (Var("chromium_version")),
'condition': 'checkout_chromium and process_deps',
},
'src/third_party/nan': {
'url': (Var("nodejs_git")) + '/nan.git@' + (Var("nan_version")),
'condition': 'checkout_nan and process_deps',
},
'src/third_party/electron_node': {
'url': (Var("electron_git")) + '/node.git@' + (Var("node_version")),
'condition': 'checkout_node and process_deps',
},
'src/electron/vendor/pyyaml': {
'url': (Var("yaml_git")) + '/pyyaml.git@' + (Var("pyyaml_version")),
'condition': 'checkout_pyyaml and process_deps',
},
'src/electron/vendor/boto': {
'url': Var('boto_git') + '/boto.git' + '@' + Var('boto_version'),
'condition': 'checkout_boto and process_deps',
},
'src/electron/vendor/requests': {
'url': Var('requests_git') + '/requests.git' + '@' + Var('requests_version'),
'condition': 'checkout_requests and process_deps',
},
}
hooks = [
{
'name': 'patch_chromium',
'condition': '(checkout_chromium and apply_patches) and process_deps',
'pattern': 'src/electron',
'action': [
'python',
'src/electron/script/apply_all_patches.py',
'src/electron/patches/config.json',
],
},
{
'name': 'electron_external_binaries',
'pattern': 'src/electron/script/update-external-binaries.py',
'condition': 'download_external_binaries',
'action': [
'python',
'src/electron/script/update-external-binaries.py',
],
},
{
'name': 'electron_npm_deps',
'pattern': 'src/electron/package.json',
'action': [
'python',
'-c',
'import os, subprocess; os.chdir(os.path.join("src", "electron")); subprocess.check_call(["python", "script/lib/npx.py", "yarn@' + (Var("yarn_version")) + '", "install", "--frozen-lockfile"]);',
],
},
{
'name': 'setup_boto',
'pattern': 'src/electron',
'condition': 'checkout_boto and process_deps',
'action': [
'python',
'-c',
'import os, subprocess; os.chdir(os.path.join("src", "electron", "vendor", "boto")); subprocess.check_call(["python", "setup.py", "build"]);',
],
},
{
'name': 'setup_requests',
'pattern': 'src/electron',
'condition': 'checkout_requests and process_deps',
'action': [
'python',
'-c',
'import os, subprocess; os.chdir(os.path.join("src", "electron", "vendor", "requests")); subprocess.check_call(["python", "setup.py", "build"]);',
],
},
]
recursedeps = [
'src',
]
| [
"monkslc@gmail.com"
] | monkslc@gmail.com | |
082a1d0a91074a3a3545799fa89ac845b5d5790b | bf448de248ec95325839b5e355af6982b4e5632d | /todo_project/testing_app/views/index.py | ac307f51c5dad4b28290138bc92c859ef3195724 | [] | no_license | rusalinastaneva/Python-Web-Basics | b459b8f47918b52654d304f3db3de54156299f66 | ecdd72b44d4d0a1b107ef1f9fa10eb252bd4fd0e | refs/heads/master | 2023-01-05T13:15:58.691511 | 2020-11-02T20:32:16 | 2020-11-02T20:32:16 | 309,486,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from django.shortcuts import render, redirect
from testing_app.forms.profile import ProfileForm
from testing_app.models import Profile
def index(request):
if request.method == 'GET':
context = {
'profiles': Profile.objects.all(),
'form': ProfileForm(),
}
return render(request, 'testing/index.html', context)
else:
form = ProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect('profiles')
context = {
'profiles': Profile.objects.all(),
'form': form,
}
return render(request, 'testing/index.html', context)
| [
"rossavelrs@yahoo.com"
] | rossavelrs@yahoo.com |
5718784c595d36e780c62bac3c46b052db8c7ba1 | 308dbe988649bf2bdc21e65ea1546165675bd6f1 | /bin/countpep8 | 2a52665967c7957e9019b2698af1a19814287ff3 | [] | no_license | julie777/user_environment | 0ca424f8bf2bccc16959ef28821b34841232ca6d | 746c4516eaa1b801cb2f052969bc39c0737a460c | refs/heads/master | 2021-01-19T06:53:29.415289 | 2015-12-30T19:26:31 | 2015-12-30T19:26:31 | 42,820,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,512 | #!/usr/bin/python
import os
import sys
import threading
import subprocess
from collections import defaultdict
import multiprocessing
import argparse
config = None
counts = defaultdict(int)
def check_file(filepath):
try:
output = subprocess.check_output(['/usr/local/bin/pep8', '--statistics', '--max-line-length=132',
'--config=', '-qq', filepath])
except subprocess.CalledProcessError as e:
return [l.split()[:2] for l in e.output.split('\n') if l.strip()]
def get_result(result):
if result:
for count, kind in result:
counts[kind] += int(count)
def main(dir, sortbycount=False, quiet=False):
dir = os.path.abspath(dir)
if not quiet:
print 'collecting files in %s...' % dir
number_of_files = 0
pool = multiprocessing.Pool()
for root, subdirs, files in os.walk(dir):
for s in ['pyenv', '.sdists-venv', 'static', 'waf', 'build-waf']:
if s in subdirs:
subdirs.remove(s)
for s in subdirs:
if s.startswith('.waf'):
subdirs.remove(s)
for f in files:
if f.endswith('.py') and f != 'passogva.py':
number_of_files += 1
filepath = os.path.join(root, f)
pool.apply_async(check_file, args=(filepath,), callback=get_result)
if not quiet:
print 'processing %r files...' % number_of_files
pool.close()
pool.join()
if not quiet:
print 'Pep8 Results'
items = counts.items()
if sortbycount:
items.sort(key=lambda item: item[1])
else:
items.sort()
for name, value in items:
if config.itemidonly:
print name
else:
print '%s %6d %s' % (name, value, errorcodes[name])
errorcodes = {'E1': 'Indentation',
'E101': 'indentation contains mixed spaces and tabs',
'E111': 'indentation is not a multiple of four',
'E112': 'expected an indented block',
'E113': 'unexpected indentation',
'E121': 'continuation line indentation is not a multiple of four',
'E122': 'continuation line missing indentation or outdented',
'E123': "closing bracket does not match indentation of opening bracket's line",
'E124': 'closing bracket does not match visual indentation',
'E125': 'continuation line does not distinguish itself from next logical line',
'E126': 'continuation line over-indented for hanging indent',
'E127': 'continuation line over-indented for visual indent',
'E128': 'continuation line under-indented for visual indent',
'E2': 'Whitespace',
'E201': "whitespace after '('",
'E202': "whitespace before ')'",
'E203': "whitespace before ':'",
'E211': "whitespace before '('",
'E221': 'multiple spaces before operator',
'E222': 'multiple spaces after operator',
'E223': 'tab before operator',
'E224': 'tab after operator',
'E225': 'missing whitespace around operator',
'E226': '(*) missing whitespace around arithmetic operator',
'E227': 'missing whitespace around bitwise or shift operator',
'E228': 'missing whitespace around modulo operator',
'E231': "missing whitespace after ','",
'E241': "multiple spaces after ','",
'E242': "tab after ','",
'E251': 'unexpected spaces around keyword / parameter equals',
'E261': 'at least two spaces before inline comment',
'E262': "inline comment should start with '# '",
'E271': 'multiple spaces after keyword',
'E272': 'multiple spaces before keyword',
'E273': 'tab after keyword',
'E274': 'tab before keyword',
'E3': 'Blank line',
'E301': 'expected 1 blank line, found 0',
'E302': 'expected 2 blank lines, found 0',
'E303': 'too many blank lines (3)',
'E304': 'blank lines found after function decorator',
'E4': 'Import',
'E401': 'multiple imports on one line',
'E5': 'Line length',
'E501': 'line too long (82 > 79 characters)',
'E502': 'the backslash is redundant between brackets',
'E7': 'Statement',
'E701': 'multiple statements on one line (colon)',
'E702': 'multiple statements on one line (semicolon)',
'E703': 'statement ends with a semicolon',
'E711': "comparison to None should be 'if cond is None:'",
'E712': "comparison to True should be 'if cond:'",
'E721': "do not compare types, use 'isinstance()'",
'E9': 'Runtime',
'E901': 'SyntaxError or IndentationError',
'E902': 'IOError',
'W1': 'Indentation warning',
'W191': 'indentation contains tabs',
'W2': 'Whitespace warning',
'W291': 'trailing whitespace',
'W292': 'no newline at end of file',
'W293': 'blank line contains whitespace',
'W3': 'Blank line warning',
'W391': 'blank line at end of file',
'W6': 'Deprecation warning',
'W601': "has_key() is deprecated, use 'in'",
'W602': 'deprecated form of raising exception',
'W603': "'<>' is deprecated, use '!='",
'W604': "backticks are deprecated, use 'repr()'",
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--sort-by-count', action='store_true', dest='sortbycount',
help='sort results by count instead of by item id')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress informational messages and only show counts')
parser.add_argument('-i', '--item-id-only', action='store_true', dest='itemidonly',
help='just show item id for items with count > 0')
parser.add_argument('directory', default='.', nargs='?',
help='root directory for finding files to be processed')
global config
config = parser.parse_args()
main(config.directory, config.sortbycount, config.quiet)
| [
"julie.jones@movenetworks.com"
] | julie.jones@movenetworks.com | |
31451823a0339390e66441098803890d7228f75a | a017f3a0c2ae3e6286bb6409245f4dccd6b4ff53 | /forms/AllTasks.py | 4f82223abb7696fbf85501d93ec015c36cc4624b | [] | no_license | KaurJa/To-Do_App | cca528a72c89e44ca75859aa5d373b70ffed9846 | 986c1f3367edd274cb459ff49d3457287681b240 | refs/heads/master | 2020-04-04T17:51:03.092178 | 2018-10-08T04:59:04 | 2018-10-08T04:59:04 | 156,138,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | from anvil import *
import anvil.tables as tables
from anvil.tables import app_tables
class AllTasks(AllTasksTemplate):
def __init__(self, **properties):
# You must call self.init_components() before doing anything else in this function
self.init_components(**properties)
# Any code you write here will run when the form opens.
#this line of code display all the tasks from reminder data table(if manually type a task in data table)
#this line detects a row with task from reminder data table
self.repeating_panel_1.items = app_tables.reminders.search(Done=False)
def button_1_click(self, **event_args):
"""This method is called when the button is clicked"""
#This line add a new task to our reminder data table at the backend(when type a new task in new_reminder_box)
app_tables.reminders.add_row(task=self.new_reminder_box.text, Done=False )
#this line display task right away at frontend(when add/type a new task in new_reminder_box)
self.repeating_panel_1.items = app_tables.reminders.search()
#this line make the new_reminder_box empty as soon done writing a task
self.new_reminder_box.text = ''
def link_2_click(self, **event_args):
"""This method is called when the link is clicked"""
open_form('AllCompletedTasks')
def link_1_click(self, **event_args):
"""This method is called when the link is clicked"""
open_form('AllTasks')
| [
"Jass9343@gmail.com"
] | Jass9343@gmail.com |
54b7de4e7b3aa0e145b36a1e60e08ff50247ff97 | c221d7c4ce09c40c5089a1dde8350eef1b22111b | /ball.py | dc1713bf914be4fffa975e723ff7f9c852ae09be | [] | no_license | karolina139/Arkanoid | e00057f8e54df3f8f0ee55bf471ba1a7e5417ff3 | 34251a6a2265fa8489b65c9d0e9c21480c8d00f9 | refs/heads/master | 2023-03-22T02:44:15.237815 | 2021-03-09T22:41:21 | 2021-03-09T22:41:21 | 346,149,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import pygame
class Ball(pygame.sprite.Sprite):
"""Klasa tworząca piłkę."""
def __init__(self, width, height):
#inicjalizuj klasę bazową
pygame.sprite.Sprite.__init__(self)
self.screen_width = 800
self.scree_height = 600
self.width = width
self.height = height
self.image = pygame.image.load("data/ball.png")
self.image = pygame.transform.scale(self.image, (self.width, self.height))
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.centerx = 400
self.rect.centery = 540
self.x_velocity = 0
self.y_velocity = 4
def update(self):
"""Sprawdza czy piłka nie opuściła ekranu."""
if self.rect.left < 0 or self.rect.right > self.screen_width:
self.x_velocity = -self.x_velocity
#
if self.rect.top < 5:
self.y_velocity = -self.y_velocity
def get_pos(self):
"""Zwraca wartość środka piłki."""
return self.rect.centerx
| [
"255746@student.pwr.edu.pl"
] | 255746@student.pwr.edu.pl |
80da9ab7c2c2ddbd96b52a1722c7d9c5776c612b | 2981370bed1988f2b1173a4c4217df608635da21 | /meiduo_mall/meiduo_mall/settings/prod.py | e577b92386f022455cced19df3f0e2c12df12d5f | [
"MIT"
] | permissive | Zasling/meiduo_mall33 | 15c0eec1e028b6a2502ac1ab944f7bd42b40d5e8 | ec55597758d5052b311d65aee44533b001f6ddd8 | refs/heads/master | 2020-04-08T22:21:35.765256 | 2018-12-04T07:14:59 | 2018-12-04T07:14:59 | 159,783,731 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,390 | py | """
Django settings for meiduo_mall project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5&&c-2+0*w&c(j2v1w(6f@szv*!*li=wqs(uwyc3!z&fli2&^v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# 允许哪些地址访问web服务器,‘*’允许任何地址访问
# ALLOWED_HOSTS = ['*']
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduo_mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo_mall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'meiduo_mall',
'USER': 'meiduo',
'PASSWORD': 'meiduo',
'HOST':'localhost',
'PORT':'3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://10.211.55.5:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://10.211.55.5:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(os.path.dirname(BASE_DIR), "logs/meiduo.log"), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
| [
"zasling@163.com"
] | zasling@163.com |
a8b064ab5954d9afce97d2fefae22cdc3f005b5a | f571169e1293ec37556b2f30844ab28db2479e46 | /RNN_retry/make_Word2Vec_data.py | 59d91e2b2cf0d6b3810734ffcfb80c0e858e07c1 | [] | no_license | swallowtail224/classification_tweet | 7ea9331e3d338256dc69642cf4c160a00d2bd808 | 8ba0708af909c0af63c955a0919591bab587a70e | refs/heads/master | 2020-04-17T08:43:36.905165 | 2020-01-17T14:04:59 | 2020-01-17T14:04:59 | 166,423,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from gensim.models import word2vec
import numpy as np
import pandas as pd
#word2vecモデルの作成と保存
def make_model(text_data, model_name):
data = word2vec.LineSentence(text_data)
model = word2vec.Word2Vec(data, size = 50, window=10, hs= 1, sg=1)
model.save(model_name)
return model
#データの読み込み
C_use_data = pd.read_csv(filepath_or_buffer="Datas/pickup_data.csv", encoding="utf_8", sep=",")
print(len(C_use_data))
C_use_data.info()
D_use_data = pd.read_csv(filepath_or_buffer="Datas/A_pickup_data.csv", encoding="utf_8", sep=",")
print(len(D_use_data))
D_use_data.info()
#テキストデータの作成
C_use_data.drop(['screen_name','user_id','tweet_id', 'tweet','postdate', 'cos_day', 'sin_day', 'tag', 'image_url', 'image', 'retweet'], axis=1, inplace=True)
C_use_data.to_csv("Datas/Word2Vec_model/dC_tweet.txt",header=False, index=False, sep=",")
D_use_data.drop(['screen_name','user_id','tweet_id', 'tweet','postdate', 'cos_day', 'sin_day', 'tag', 'image_url', 'image', 'retweet'], axis=1, inplace=True)
D_use_data.to_csv("Datas/Word2Vec_model/dD_tweet.txt",header=False, index=False, sep=",")
#モデルの作成と保存
C_model = make_model("Datas/Word2Vec_model/dC_tweet.txt", "Datas/Word2Vec_model/dC.model")
D_model = make_model("Datas/Word2Vec_model/dD_tweet.txt", "Datas/Word2Vec_model/dD.model")
| [
"gandamexsia@yahoo.co.jp"
] | gandamexsia@yahoo.co.jp |
b4391290703dfc6a10438c48de9f21f63720a332 | cc0c0f99a5cf563ff52a76f2ac17cdad09d22f01 | /venv/Lib/site-packages/itk/itkReinitializeLevelSetImageFilterPython.py | 3d980dd5b7380f3bcd09a86b369d64e85eb438f6 | [] | no_license | Marxss/carck_detect_system | 9c0d338bde322b4c7304fd0addb524d8697c8a7b | d2480f2108052af8af0aa5265a5239c309885043 | refs/heads/master | 2022-04-15T23:34:20.988335 | 2020-03-29T16:24:00 | 2020-03-29T16:24:00 | 214,625,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,562 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkReinitializeLevelSetImageFilterPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkReinitializeLevelSetImageFilterPython', [dirname(__file__)])
except ImportError:
import _itkReinitializeLevelSetImageFilterPython
return _itkReinitializeLevelSetImageFilterPython
if fp is not None:
try:
_mod = imp.load_module('_itkReinitializeLevelSetImageFilterPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkReinitializeLevelSetImageFilterPython = swig_import_helper()
del swig_import_helper
else:
import _itkReinitializeLevelSetImageFilterPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkImageToImageFilterAPython
import itkImageSourcePython
import itkImageSourceCommonPython
import ITKCommonBasePython
import pyBasePython
import itkImagePython
import itkOffsetPython
import itkSizePython
import itkPointPython
import vnl_vector_refPython
import stdcomplexPython
import vnl_vectorPython
import vnl_matrixPython
import itkVectorPython
import itkFixedArrayPython
import itkMatrixPython
import vnl_matrix_fixedPython
import itkCovariantVectorPython
import itkSymmetricSecondRankTensorPython
import itkImageRegionPython
import itkIndexPython
import itkRGBPixelPython
import itkRGBAPixelPython
import itkVectorImagePython
import itkVariableLengthVectorPython
import itkImageToImageFilterCommonPython
import ITKFastMarchingBasePython
import itkFastMarchingStoppingCriterionBasePython
import itkNodePairPython
import itkLevelSetNodePython
def itkReinitializeLevelSetImageFilterIF3_New():
return itkReinitializeLevelSetImageFilterIF3.New()
def itkReinitializeLevelSetImageFilterIF2_New():
return itkReinitializeLevelSetImageFilterIF2.New()
class itkReinitializeLevelSetImageFilterIF2(itkImageToImageFilterAPython.itkImageToImageFilterIF2IF2):
"""Proxy of C++ itkReinitializeLevelSetImageFilterIF2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkReinitializeLevelSetImageFilterIF2_Pointer":
"""__New_orig__() -> itkReinitializeLevelSetImageFilterIF2_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkReinitializeLevelSetImageFilterIF2_Pointer":
"""Clone(itkReinitializeLevelSetImageFilterIF2 self) -> itkReinitializeLevelSetImageFilterIF2_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_Clone(self)
def SetLevelSetValue(self, _arg: 'double const') -> "void":
"""SetLevelSetValue(itkReinitializeLevelSetImageFilterIF2 self, double const _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)
def GetLevelSetValue(self) -> "double":
"""GetLevelSetValue(itkReinitializeLevelSetImageFilterIF2 self) -> double"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetLevelSetValue(self)
def SetNarrowBanding(self, _arg: 'bool const') -> "void":
"""SetNarrowBanding(itkReinitializeLevelSetImageFilterIF2 self, bool const _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetNarrowBanding(self, _arg)
def GetNarrowBanding(self) -> "bool":
"""GetNarrowBanding(itkReinitializeLevelSetImageFilterIF2 self) -> bool"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetNarrowBanding(self)
def NarrowBandingOn(self) -> "void":
"""NarrowBandingOn(itkReinitializeLevelSetImageFilterIF2 self)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_NarrowBandingOn(self)
def NarrowBandingOff(self) -> "void":
"""NarrowBandingOff(itkReinitializeLevelSetImageFilterIF2 self)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_NarrowBandingOff(self)
def SetInputNarrowBandwidth(self, _arg: 'double') -> "void":
"""SetInputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF2 self, double _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetInputNarrowBandwidth(self, _arg)
def GetInputNarrowBandwidth(self) -> "double":
"""GetInputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF2 self) -> double"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetInputNarrowBandwidth(self)
def SetOutputNarrowBandwidth(self, _arg: 'double') -> "void":
"""SetOutputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF2 self, double _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetOutputNarrowBandwidth(self, _arg)
def GetOutputNarrowBandwidth(self) -> "double":
"""GetOutputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF2 self) -> double"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetOutputNarrowBandwidth(self)
def SetNarrowBandwidth(self, value: 'double') -> "void":
"""SetNarrowBandwidth(itkReinitializeLevelSetImageFilterIF2 self, double value)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetNarrowBandwidth(self, value)
def SetInputNarrowBand(self, ptr: 'itkVectorContainerUILSNF2') -> "void":
"""SetInputNarrowBand(itkReinitializeLevelSetImageFilterIF2 self, itkVectorContainerUILSNF2 ptr)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetInputNarrowBand(self, ptr)
def GetInputNarrowBand(self) -> "itkVectorContainerUILSNF2_Pointer":
"""GetInputNarrowBand(itkReinitializeLevelSetImageFilterIF2 self) -> itkVectorContainerUILSNF2_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetInputNarrowBand(self)
def GetOutputNarrowBand(self) -> "itkVectorContainerUILSNF2_Pointer":
"""GetOutputNarrowBand(itkReinitializeLevelSetImageFilterIF2 self) -> itkVectorContainerUILSNF2_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetOutputNarrowBand(self)
LevelSetDoubleAdditiveOperatorsCheck = _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_LevelSetDoubleAdditiveOperatorsCheck
LevelSetOStreamWritableCheck = _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_LevelSetOStreamWritableCheck
__swig_destroy__ = _itkReinitializeLevelSetImageFilterPython.delete_itkReinitializeLevelSetImageFilterIF2
def cast(obj: 'itkLightObject') -> "itkReinitializeLevelSetImageFilterIF2 *":
"""cast(itkLightObject obj) -> itkReinitializeLevelSetImageFilterIF2"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkReinitializeLevelSetImageFilterIF2
Create a new object of the class itkReinitializeLevelSetImageFilterIF2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkReinitializeLevelSetImageFilterIF2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkReinitializeLevelSetImageFilterIF2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkReinitializeLevelSetImageFilterIF2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkReinitializeLevelSetImageFilterIF2.Clone = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_Clone, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.SetLevelSetValue = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.GetLevelSetValue = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetLevelSetValue, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.SetNarrowBanding = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetNarrowBanding, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.GetNarrowBanding = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetNarrowBanding, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.NarrowBandingOn = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_NarrowBandingOn, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.NarrowBandingOff = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_NarrowBandingOff, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.SetInputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetInputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.GetInputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetInputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.SetOutputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetOutputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.GetOutputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetOutputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.SetNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.SetInputNarrowBand = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetInputNarrowBand, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.GetInputNarrowBand = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetInputNarrowBand, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2.GetOutputNarrowBand = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetOutputNarrowBand, None, itkReinitializeLevelSetImageFilterIF2)
itkReinitializeLevelSetImageFilterIF2_swigregister = _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_swigregister
itkReinitializeLevelSetImageFilterIF2_swigregister(itkReinitializeLevelSetImageFilterIF2)
def itkReinitializeLevelSetImageFilterIF2___New_orig__() -> "itkReinitializeLevelSetImageFilterIF2_Pointer":
"""itkReinitializeLevelSetImageFilterIF2___New_orig__() -> itkReinitializeLevelSetImageFilterIF2_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2___New_orig__()
def itkReinitializeLevelSetImageFilterIF2_cast(obj: 'itkLightObject') -> "itkReinitializeLevelSetImageFilterIF2 *":
"""itkReinitializeLevelSetImageFilterIF2_cast(itkLightObject obj) -> itkReinitializeLevelSetImageFilterIF2"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_cast(obj)
class itkReinitializeLevelSetImageFilterIF3(itkImageToImageFilterAPython.itkImageToImageFilterIF3IF3):
"""Proxy of C++ itkReinitializeLevelSetImageFilterIF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkReinitializeLevelSetImageFilterIF3_Pointer":
"""__New_orig__() -> itkReinitializeLevelSetImageFilterIF3_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkReinitializeLevelSetImageFilterIF3_Pointer":
"""Clone(itkReinitializeLevelSetImageFilterIF3 self) -> itkReinitializeLevelSetImageFilterIF3_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_Clone(self)
def SetLevelSetValue(self, _arg: 'double const') -> "void":
"""SetLevelSetValue(itkReinitializeLevelSetImageFilterIF3 self, double const _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue(self, _arg)
def GetLevelSetValue(self) -> "double":
"""GetLevelSetValue(itkReinitializeLevelSetImageFilterIF3 self) -> double"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetLevelSetValue(self)
def SetNarrowBanding(self, _arg: 'bool const') -> "void":
"""SetNarrowBanding(itkReinitializeLevelSetImageFilterIF3 self, bool const _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetNarrowBanding(self, _arg)
def GetNarrowBanding(self) -> "bool":
"""GetNarrowBanding(itkReinitializeLevelSetImageFilterIF3 self) -> bool"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetNarrowBanding(self)
def NarrowBandingOn(self) -> "void":
"""NarrowBandingOn(itkReinitializeLevelSetImageFilterIF3 self)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_NarrowBandingOn(self)
def NarrowBandingOff(self) -> "void":
"""NarrowBandingOff(itkReinitializeLevelSetImageFilterIF3 self)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_NarrowBandingOff(self)
def SetInputNarrowBandwidth(self, _arg: 'double') -> "void":
"""SetInputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF3 self, double _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetInputNarrowBandwidth(self, _arg)
def GetInputNarrowBandwidth(self) -> "double":
"""GetInputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF3 self) -> double"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetInputNarrowBandwidth(self)
def SetOutputNarrowBandwidth(self, _arg: 'double') -> "void":
"""SetOutputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF3 self, double _arg)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetOutputNarrowBandwidth(self, _arg)
def GetOutputNarrowBandwidth(self) -> "double":
"""GetOutputNarrowBandwidth(itkReinitializeLevelSetImageFilterIF3 self) -> double"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetOutputNarrowBandwidth(self)
def SetNarrowBandwidth(self, value: 'double') -> "void":
"""SetNarrowBandwidth(itkReinitializeLevelSetImageFilterIF3 self, double value)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetNarrowBandwidth(self, value)
def SetInputNarrowBand(self, ptr: 'itkVectorContainerUILSNF3') -> "void":
"""SetInputNarrowBand(itkReinitializeLevelSetImageFilterIF3 self, itkVectorContainerUILSNF3 ptr)"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetInputNarrowBand(self, ptr)
def GetInputNarrowBand(self) -> "itkVectorContainerUILSNF3_Pointer":
"""GetInputNarrowBand(itkReinitializeLevelSetImageFilterIF3 self) -> itkVectorContainerUILSNF3_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetInputNarrowBand(self)
def GetOutputNarrowBand(self) -> "itkVectorContainerUILSNF3_Pointer":
"""GetOutputNarrowBand(itkReinitializeLevelSetImageFilterIF3 self) -> itkVectorContainerUILSNF3_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetOutputNarrowBand(self)
LevelSetDoubleAdditiveOperatorsCheck = _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_LevelSetDoubleAdditiveOperatorsCheck
LevelSetOStreamWritableCheck = _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_LevelSetOStreamWritableCheck
__swig_destroy__ = _itkReinitializeLevelSetImageFilterPython.delete_itkReinitializeLevelSetImageFilterIF3
def cast(obj: 'itkLightObject') -> "itkReinitializeLevelSetImageFilterIF3 *":
"""cast(itkLightObject obj) -> itkReinitializeLevelSetImageFilterIF3"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkReinitializeLevelSetImageFilterIF3
Create a new object of the class itkReinitializeLevelSetImageFilterIF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkReinitializeLevelSetImageFilterIF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkReinitializeLevelSetImageFilterIF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkReinitializeLevelSetImageFilterIF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkReinitializeLevelSetImageFilterIF3.Clone = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_Clone, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.SetLevelSetValue = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.GetLevelSetValue = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetLevelSetValue, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.SetNarrowBanding = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetNarrowBanding, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.GetNarrowBanding = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetNarrowBanding, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.NarrowBandingOn = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_NarrowBandingOn, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.NarrowBandingOff = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_NarrowBandingOff, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.SetInputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetInputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.GetInputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetInputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.SetOutputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetOutputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.GetOutputNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetOutputNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.SetNarrowBandwidth = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetNarrowBandwidth, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.SetInputNarrowBand = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetInputNarrowBand, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.GetInputNarrowBand = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetInputNarrowBand, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3.GetOutputNarrowBand = new_instancemethod(_itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetOutputNarrowBand, None, itkReinitializeLevelSetImageFilterIF3)
itkReinitializeLevelSetImageFilterIF3_swigregister = _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_swigregister
itkReinitializeLevelSetImageFilterIF3_swigregister(itkReinitializeLevelSetImageFilterIF3)
def itkReinitializeLevelSetImageFilterIF3___New_orig__() -> "itkReinitializeLevelSetImageFilterIF3_Pointer":
"""itkReinitializeLevelSetImageFilterIF3___New_orig__() -> itkReinitializeLevelSetImageFilterIF3_Pointer"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3___New_orig__()
def itkReinitializeLevelSetImageFilterIF3_cast(obj: 'itkLightObject') -> "itkReinitializeLevelSetImageFilterIF3 *":
"""itkReinitializeLevelSetImageFilterIF3_cast(itkLightObject obj) -> itkReinitializeLevelSetImageFilterIF3"""
return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_cast(obj)
def reinitialize_level_set_image_filter(*args, **kwargs):
"""Procedural interface for ReinitializeLevelSetImageFilter"""
import itk
instance = itk.ReinitializeLevelSetImageFilter.New(*args, **kwargs)
return instance.__internal_call__()
def reinitialize_level_set_image_filter_init_docstring():
import itk
import itkTemplate
if isinstance(itk.ReinitializeLevelSetImageFilter, itkTemplate.itkTemplate):
reinitialize_level_set_image_filter.__doc__ = itk.ReinitializeLevelSetImageFilter.values()[0].__doc__
else:
reinitialize_level_set_image_filter.__doc__ = itk.ReinitializeLevelSetImageFilter.__doc__
| [
"274065539@qq.com"
] | 274065539@qq.com |
b9d28f8e40fad24068d2f2fe803f3add69834370 | 3486ac68422353638bdf593c9cdbd7cbc0f925ce | /classes_new.py | 904ab17768c08a9524c212e09a0ba06073ec5c70 | [
"MIT"
] | permissive | NickF40/Buisbot | 35fba26ba694f61ca144db8a553208478271682e | 7c16804a864fc43d92aba04f0220dac789059093 | refs/heads/master | 2022-05-11T18:34:42.170177 | 2022-05-08T19:04:50 | 2022-05-08T19:04:50 | 137,491,511 | 1 | 1 | MIT | 2022-05-08T19:05:37 | 2018-06-15T13:33:50 | Python | UTF-8 | Python | false | false | 9,261 | py | """
Updated version of classes.py
Added some base classes, wide usage of inheritance planned
Todo: Create LessonsBuilder & define msanagers
"""
import time
from math import fabs
from markups import choice_marup
import mongo
import postgres
format = "%H:%M"
class Lesson:
def __init__(self, id_, seq_id, day, time, data, more, link):
self._id_ = id_
self._seq_id_ = seq_id
self.day = day
self.time = time
self.data = data
self.more = more
self.link = link
self._update()
def _update(self):
postgres.update_lesson(self._seq_id_, self.__dict__)
class AbstractBuilder:
params = []
specifications = []
text = []
def __init__(self, bot, seq_id):
self.seq = Sequence(*[None for i in range(9)], key_id=seq_id)
self.bot = bot
def set(self, param, value):
self.seq.__dict__[param] = value
self.seq._update()
def func(self, *args):
# overridable function
return 0
def step_iterator(self, *args):
# overridable function
return 0
def iterate(self, step, *args):
def wrapper(message, step=step):
msg = self.func(message.text, step)
self.bot.register_next_step_handler(msg, self.iterate(self.step_iterator(step, args)))
return wrapper
def start(self, msg):
self.bot.register_next_step_handler(msg, self.iterate(0))
def yes_no(self, step, seq_id):
def wrapper(message, bot=self.bot, step=step, seq_id=self.seq.id_):
seq_builder = SequenceBuilder(bot, seq_id)
seq_builder.set(self.params[step], message.text)
next_step = self.step_iterator(step)
msg = seq_builder.bot.send_message(message.chat.id, self.text[next_step])
self.bot.register_next_step_handler(msg, self.iterate(next_step))
return wrapper
# 12.12.17
# Создать функцию в main, принимающую Да\Нет-ки, инициализирующая Builder (+)
# и запускающая\пропускающая приём с условием. (+)
# После шага lessons сделать переход на LessonsManager -> LessonBuilder (-)
#
# 13.12.17
# Создать функцию приема yes/not - ок, прописать её еспользование в main (+)
# После шага lessons сделать переход на LessonsManager -> LessonBuilder, оформить LessonBuilder с использованием
# готовых наработок, возможно, абстрагировать некоторые имеюиеся методы(больше - лучше)
#
# 14.12.17
# Просмотреть wrap_lesson в main, взять от туда все параметры
# !Важно! абстрагировать по типу specifications в некоторых случаях будут как yes/no, иногда в виде regexp, либо создать другой интсрумент
# Документировать сиё творение
# Проверить на баги + написать unit-тесты(!)
class SequenceBuilder(AbstractBuilder):
params = ['name', 'description', 'start_message', 'more', 'finish_message', 'price', 'lessons']
specifications = [None, None, None, None, None, 1, 1]
text = []
def func(self, message, step):
if not self.specifications[step]:
return self.bot.send_message(message.chat.id, self.text[step])
else:
if self.specifications[step] == 1:
# yes/no type
self.bot.send_message(message.from_user.id, self.text[step][0],
reply_markup=choice_marup(step, self.seq.id_))
def step_iterator(self, *args):
pass
class LessonsManager(AbstractBuilder):
pass
class LessonBuilder(AbstractBuilder):
pass
class Sequence:
def __init__(self, id_, name, key, lessons, start_message, finish_message, description, more, price, key_id=None):
if key_id:
data = postgres.get_sequence(key_id)
self.id_ = data.get('id')
self.name = data.get('name')
self.key = data.get('key')
self.lessons = data.get('lessons')
if self.lessons:
self.lessons.sort(key=lambda x: x['_id_'])
self.start_message = data.get('start_message')
self.finish_message = data.get('finish_message')
self.description = data.get('description')
self.more = data.get('more')
self.price = data.get('price')
else:
self.id_ = id_
self.name = name
self.key = key
self.lessons = lessons
if self.lessons:
self.lessons.sort(key=lambda x: x['_id_'])
self.start_message = start_message
self.finish_message = finish_message
self.description = description
self.more = more
self.price = price
self._update()
def _update(self):
postgres.update_sequence(self.__dict__)
@staticmethod
def get_links():
data = postgres.get_links()
return data
@staticmethod
def set_link(link, id_):
return postgres.upload_link(link, id_)
@staticmethod
def create():
return postgres.create_sequence()
def check_last(self, lesson_id):
return len(self.lessons) == lesson_id
def start(self, user_id):
time_ = mongo.add_lesson(self.lessons[0], user_id, int(self.lessons[0]['day']))
return time_
def next(self, lesson_id, user_id):
day = int(self.lessons[lesson_id]['day'])
nday = int(self.lessons[lesson_id + 1]['day'])
if nday - day < 0:
raise Exception('Fatal error occurred.\nWrong day identification!')
mongo.add_lesson(self.lessons[lesson_id + 1], user_id, nday - day)
def feed_stars(self, user_id, stars):
postgres.add_feedback(self.id_, user_id, stars=stars)
def feed_comment(self, user_id, comment):
postgres.add_feedback(self.id_, user_id, stars=None, comments=comment)
def get_lessons(self, day):
return [lesson for lesson in self.lessons if lesson['day'] and (int(lesson['day']) == int(day))]
def get_times(self, day):
result = []
for lesson in self.get_lessons(day):
if not lesson['time']: continue
result.append(time.strptime(lesson['time'], format))
return result
def is_max_day(self, day):
for lesson in self.lessons:
if not lesson['day']: continue
if int(lesson['day']) > int(day):
return False
return True
def check_time(self, day, time_):
time_ = time.strptime(time_, format)
for tm in self.get_times(day):
if tm > time_:
return False
return True
# только как интерфейс для хранения lesson'ов
class LessonsPool:
def __init__(self):
# should return dicts!!!
self.pool = [data for data in mongo.get_lessons()]
def reload(self):
self.pool = [data for data in mongo.get_lessons()]
@staticmethod
def time_comparator(time_, lesson):
if fabs(int(time_) - int(lesson['time'])) < 30:
return True
return False
# should return dicts!!!
def pop_lessons(self):
current = int(time.time())
result = []
if not self.pool:
return None
for i in self.pool:
if self.time_comparator(current, i):
result.append(i)
if result:
mongo.remove_lessons(result)
self.reload()
return result
def push_lesson(self, lesson):
# convert everything to dict
if isinstance(lesson, dict):
self.pool.append(lesson)
# mongo.set_next_lesson(lesson)
elif isinstance(lesson, Lesson):
self.pool.append(lesson.__dict__)
else:
raise Exception('Type Error Occured.\nUnsupported type %s in LessonsPool.push_lesson()' % str(type(lesson)))
def add_user(self):
self.reload()
def get_subscribes(self, user_id):
result = []
for lesson in self.pool:
if user_id in lesson['users']:
result.append(lesson['_seq_id_'])
return result
def delete_from(self, user_id, seq_id):
for lesson in self.pool:
if seq_id == lesson['_seq_id_']:
if user_id in lesson['users']:
lesson['users'] = [uid for uid in lesson['users'] if uid != user_id]
mongo.upd_lesson(lesson['time'], lesson['_seq_id_'], lesson['_id_'], lesson['users'])
else:
raise Exception('Fatal Error occurred\nIncorrect user_id')
| [
"e_n_k@list.ru"
] | e_n_k@list.ru |
b6afcf86358caeaa41b9c3f1ff61df606b231ec9 | 9f81a38d42b2fc096b601a556c58c1b3bdf5508d | /bincol.py | 448a45668efc4dcbc948f5efd7c249291f98edc0 | [] | no_license | rizzo00/bincol | 1e7aaa58e2a3105afa68784992ad803b1f22b754 | 89960cf256b774a5d9410216753d78fd30c75752 | refs/heads/main | 2023-02-28T01:29:19.764583 | 2021-02-03T11:34:48 | 2021-02-03T11:34:48 | 332,751,273 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | import requests
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
from waveshare_epd import epd2in13_V2
from PIL import Image, ImageDraw, ImageFont
from bs4 import BeautifulSoup
url='https://secure.derby.gov.uk/binday/Binday?PremisesId=xxxxxxxxxx' # binday/Binday?PremisesId= need to change to the address of the site
req=requests.get(url)
soup=BeautifulSoup(req.text, "html.parser")
epd = epd2in13_V2.EPD() # get the display
epd.init(epd.FULL_UPDATE) # initialize the display
print("Clear...") # prints to console, not the display, for debugging
epd.Clear(0xFF) # clear the display
def printToDisplay(string):
HBlackImage = Image.new('1', (epd2in13_V2.EPD_HEIGHT, epd2in13_V2.EPD_WIDTH), 255)
draw = ImageDraw.Draw(HBlackImage) # Create draw object and pass in the image layer we want to work with (HBlackImage)
font = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 17) # Create our font, passing in the font file and font size
draw.text((0, 30), string, font = font, fill = 0)
epd.display(epd.getbuffer(HBlackImage))
for bin in soup.select('.binresult .mainbintext', limit = 1):
bindate=bin.find('strong').getText()
bintype=(bin.getText()).split(":")[1]
bin = "Next Bin Collection"
result = os.linesep.join([bin,bindate,bintype])
printToDisplay(result)
| [
"rizzo.00@gmail.com"
] | rizzo.00@gmail.com |
4fa0efb8dc0dd6987ee766db7a94b8729fca3054 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_Class168.py | 6263a03eb4672e046fa05de76c19787287b6648e | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,188 | py | # qubit number=3
# total number=30
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class168.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
c7de402e23ffc1e90db73f1063f4c6f29ac379e0 | a0f0bfa44979566732b76c609a9198457e031a2b | /docs/source/conf.py | de97daef2a9bc7e1ee09bea6783833a6f2872132 | [
"BSD-3-Clause"
] | permissive | davidfischer/sporco-cuda | 951aebd1001e9cfe58e490c470b6addb163ca271 | ffe633b9cf7055550a0f2250e29bedd99cd098f1 | refs/heads/master | 2020-03-16T11:41:31.922467 | 2018-05-08T00:41:26 | 2018-05-08T00:41:26 | 132,652,756 | 0 | 0 | null | 2018-05-08T19:03:16 | 2018-05-08T19:03:15 | null | UTF-8 | Python | false | false | 10,177 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sporco_cuda documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 22 21:03:01 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from ast import parse
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx_tabs.tabs',
'sphinx_fontawesome'
]
# generate autosummary pages
autosummary_generate = True
autodoc_mock_imports = ['sporco_cuda.util', 'sporco_cuda.cbpdn']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sporco-cuda'
copyright = '2017-2018, Gustavo Silva, Brendt Wohlberg'
author = 'Gustavo Silva, Brendt Wohlberg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open(os.path.join('../../sporco_cuda', '__init__.py')) as f:
version = parse(next(filter(
lambda line: line.startswith('__version__'),
f))).body[0].value.s
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['tmp', '*.tmp.*', '*.tmp']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = "haiku"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sporco.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sporco_cudadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sporco_cuda.tex', 'SPORCO-CUDA Documentation',
[author], 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sporco_cuda', 'SPORCO-CUDA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sporco_cuda', 'SPORCO-CUDA Documentation',
author, 'sporco_cuda', 'SParse Optimization Research COde (SPORCO) CUDA',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None,
'http://sporco.rtfd.io/en/latest/': None}
| [
"brendt@ieee.org"
] | brendt@ieee.org |
e20f1fad0a2da6986fc810ebc91ca2187ff03bae | 6415a3f0cc4dd8be73b57bb1352a312fe12fe306 | /Spotkanie3/Zad2.py | b98fb0d03b3349e405a3bce458223f1a21558339 | [] | no_license | Borys-karton/LabPython | f0689f2ac58dfa7bbae841fe973b5172bd74e0c9 | 37f8ab72a712090542ab8009b7f5ab826c6ff53a | refs/heads/main | 2023-01-29T04:44:38.514106 | 2020-12-10T01:24:38 | 2020-12-10T01:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | imiona = ["Adam", "Andrzej", "Ania", "Agata"]
for element in imiona:
print("Witaj uzytkowniku {}, jak Ci minął dzień?".format(element))
print("\n1.")
for x in range(0,5,1):
print(x)
print("\n2.")
for x in range(2,5,1):
print(x)
print("\n3.")
for x in range(0,10,2):
print(x)
| [
"mojkmac@gmail.com"
] | mojkmac@gmail.com |
ac11f4731115622edd1d97e678c31e4ac6f0883f | 255bf9d2516d8477b84c7b2513ffecaf10872713 | /simple3/migrations/0007_imagenes.py | d2d87428ad79a43fead94f9adcea2f46f6389149 | [] | no_license | tob447/simple3 | 5cbe4f2bb16fd2a2b1fe61de59be61fed424417b | b411793e31d55600f45da176353b81f5a0e55ba6 | refs/heads/master | 2021-01-24T09:12:01.055717 | 2018-04-27T04:57:07 | 2018-04-27T04:57:07 | 123,002,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-04-21 22:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('simple3', '0006_compresores'),
]
operations = [
migrations.CreateModel(
name='Imagenes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=10000, null=True)),
],
),
]
| [
"fagudel7@eafit.edu.co"
] | fagudel7@eafit.edu.co |
41728409217d12d86f0b8c641f03b7a620528261 | feae8f7c8b3f1d79dbd918095bd198db156cd740 | /Restriction_API/urls.py | d32440faae5b0694eeb2942b8b37a44c814cd835 | [
"MIT"
] | permissive | ksarthak4ever/Restrict_API | 9a16879009bbcc5a8243b6366732cfa9e977bd67 | e53e671965b825fa167b080fe7212ec6f3e4c6ca | refs/heads/master | 2020-05-18T01:07:34.639542 | 2019-04-30T01:02:09 | 2019-04-30T01:02:09 | 184,081,467 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | """Restriction_API URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('user_profile.urls'))
]
| [
"ksarthak4ever@gmail.com"
] | ksarthak4ever@gmail.com |
e35b160dbed289599189d98ce6fabebd210e1af1 | 27375f6266f47c325f67337a3851f358cfcc533b | /django_app/config/urls.py | 5581a01b03f431a82d21b613d37c8d2141d25bdc | [] | no_license | hongdonghyun/drf_insta | ec044e3bfc300329edcf1812238620d2c427453f | 3398cd59a873f4a0e213589ce4271be83cfef730 | refs/heads/master | 2020-12-02T08:09:52.545944 | 2017-07-10T13:11:35 | 2017-07-10T13:11:35 | 96,778,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | """instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework.urlpatterns import format_suffix_patterns
from post.views import api_views
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
# post앱의 index뷰를 root url에 연결시킨다.
# url(r'^$', post_views.index),
# 새 view를 만들어 redirect시키는 방법
url(r'^$', views.index, name='index'),
# Class-based View중 RedirectView를 사용하는 방법
# url(r'^$', RedirectView.as_view(pattern_name='post:post_list')),
# post앱의 urls.py모듈을 include시킨다
url(r'^post/', include('post.urls')),
url(r'^member/', include('member.urls')),
url(r'^api_post_list/', api_views.Api_Post_List.as_view()),
url(r'^api_post_detail/(?P<pk>[0-9]+)/$', api_views.Api_Post_Detail.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns)
urlpatterns += static(
prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
| [
"hong4367@gmail.com"
] | hong4367@gmail.com |
f4a37d716fd65c1a64a19bac2998ce2d01b7e931 | cbfe0a03ecf3cd423dd33c5219be2293e5377305 | /task30.py | 669e99f90b053debd9230e2043edbda86541b6a9 | [] | no_license | astarostin/projecteulertasks | bdefafee6e24f15060adab5b9377eecfdceed56d | 2edc99f15838ec711ad3d551d823c8d2ef47787a | refs/heads/master | 2021-01-21T21:39:36.836295 | 2016-03-18T14:53:13 | 2016-03-18T14:53:13 | 16,258,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | import math
powers = {}
def main():
print 'task 30'
fill_powers(5)
res = 0
for i in range(10, 1000000):
if test(i):
res += i
print res
def test(n):
s = 0
for c in str(n):
s += powers[int(c)]
return n == s
def fill_powers(n):
for i in range(10):
powers[i] = math.pow(i, n)
if __name__ == '__main__':
main()
| [
"astarfryazino@gmail.com"
] | astarfryazino@gmail.com |
b1baa37ccc2300a62d8d9375b75162e34c2989df | 5a281cb78335e06c631181720546f6876005d4e5 | /openstack-placement-1.0.0/placement/tests/fixtures.py | be235b355a8c4dee2334904091ad0f96969adaab | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 3,272 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Placement tests."""
from __future__ import absolute_import
from oslo_config import cfg
from oslo_db.sqlalchemy import test_fixtures
from placement.db.sqlalchemy import migration
from placement import db_api as placement_db
from placement import deploy
from placement.objects import resource_class
from placement.objects import trait
from placement import resource_class_cache as rc_cache
class Database(test_fixtures.GeneratesSchema, test_fixtures.AdHocDbFixture):
def __init__(self, conf_fixture, set_config=False):
"""Create a database fixture."""
super(Database, self).__init__()
if set_config:
try:
conf_fixture.register_opt(
cfg.StrOpt('connection'), group='placement_database')
except cfg.DuplicateOptError:
# already registered
pass
conf_fixture.config(connection='sqlite://',
group='placement_database')
self.conf_fixture = conf_fixture
self.get_engine = placement_db.get_placement_engine
placement_db.configure(self.conf_fixture.conf)
def get_enginefacade(self):
return placement_db.placement_context_manager
def generate_schema_create_all(self, engine):
# note: at this point in oslo_db's fixtures, the incoming
# Engine has **not** been associated with the global
# context manager yet.
migration.create_schema(engine)
# so, to work around that placement's setup code really wants to
# use the enginefacade, we will patch the engine into it early.
# oslo_db is going to patch it anyway later. So the bug in oslo.db
# is that code these days really wants the facade to be set up fully
# when it's time to create the database. When oslo_db's fixtures
# were written, enginefacade was not in use yet so it was not
# anticipated that everyone would be doing things this way
_reset_facade = placement_db.placement_context_manager.patch_engine(
engine)
self.addCleanup(_reset_facade)
# Make sure db flags are correct at both the start and finish
# of the test.
self.addCleanup(self.cleanup)
self.cleanup()
# Sync traits and resource classes.
deploy.update_database(self.conf_fixture.conf)
def cleanup(self):
trait._TRAITS_SYNCED = False
resource_class._RESOURCE_CLASSES_SYNCED = False
rc_cache.RC_CACHE = None
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
393ccdde7ef08c546deeb32ee7f792b458c689fa | a63419b2c457a219c010876ece3980af8cfc3c1b | /_DJANGO_/django-player/gameplay/views.py | 556971862e1019a0cf46647d9fb7fb1688c69685 | [] | no_license | thomasm1/python_2018 | ba87560a1e25343c0429fcafe51bb867dc299223 | 6a57c7603055a2511a8734ab34ce21f76e4427ef | refs/heads/master | 2023-05-10T07:20:07.911734 | 2023-05-05T03:58:36 | 2023-05-05T03:58:36 | 147,065,041 | 2 | 5 | null | 2023-03-03T15:15:08 | 2018-09-02T07:41:32 | Rich Text Format | UTF-8 | Python | false | false | 1,150 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.views.generic import ListView
from .models import Game
from .forms import MoveForm
@login_required()
def game_detail(request, id):
game = get_object_or_404(Game, pk=id)
context = {'game': game }
if game.is_users_move(request.user):
context['form'] = MoveForm()
return render(request,
"gameplay/game_detail.html",
context
)
@login_required()
def make_move(request, id):
game = get_object_or_404(Game, pk=id)
if not game.is_users_move(request.user):
raise PermissionDenied
move = game.new_move()
form = MoveForm(instance=move, data=request.POST)
if form.is_valid():
move.save()
return redirect("gameplay_detail", id)
else:
return render(request,
"gameplay/game_detail.html",
{'game': game, 'form': form}
)
class AllGamesList(ListView):
model = Game
| [
"thomasm1.maestas@gmail.com"
] | thomasm1.maestas@gmail.com |
3110c67c6673e46cd839713921988a00f652a37e | e7290064b5df4731167bab10606f451b446a21f7 | /python/ray/runtime_context.py | 1ec4a38511968bc138256290746c4f4428646c0f | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | sven1977/ray | dce9f6fa114741837341f14aef0a8c64c442aba6 | b73a496af19bce627a611e7af2cb02a3c5d99684 | refs/heads/master | 2023-09-02T00:57:47.167794 | 2023-08-17T09:33:04 | 2023-08-17T09:33:04 | 229,269,728 | 2 | 5 | Apache-2.0 | 2023-07-29T07:08:41 | 2019-12-20T13:27:01 | Python | UTF-8 | Python | false | false | 13,583 | py | import logging
from typing import Any, Dict, Optional
import ray._private.worker
from ray._private.client_mode_hook import client_mode_hook
from ray._private.utils import pasre_pg_formatted_resources_to_original
from ray.runtime_env import RuntimeEnv
from ray.util.annotations import Deprecated, PublicAPI
logger = logging.getLogger(__name__)
@PublicAPI
class RuntimeContext(object):
"""A class used for getting runtime context."""
def __init__(self, worker):
assert worker is not None
self.worker = worker
@Deprecated(
message="Use get_xxx_id() methods to get relevant ids instead", warning=True
)
def get(self) -> Dict[str, Any]:
"""Get a dictionary of the current context.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
"namespace": self.namespace,
}
if self.worker.mode == ray._private.worker.WORKER_MODE:
if self.task_id is not None:
context["task_id"] = self.task_id
if self.actor_id is not None:
context["actor_id"] = self.actor_id
return context
@property
@Deprecated(message="Use get_job_id() instead", warning=True)
def job_id(self):
"""Get current job ID for this worker or driver.
Job ID is the id of your Ray drivers that create tasks or actors.
Returns:
If called by a driver, this returns the job ID. If called in
a task, return the job ID of the associated driver.
"""
job_id = self.worker.current_job_id
assert not job_id.is_nil()
return job_id
def get_job_id(self) -> str:
"""Get current job ID for this worker or driver.
Job ID is the id of your Ray drivers that create tasks or actors.
Returns:
If called by a driver, this returns the job ID. If called in
a task, return the job ID of the associated driver. The
job ID will be hex format.
Raises:
AssertionError: If not called in a driver or worker. Generally,
this means that ray.init() was not called.
"""
assert ray.is_initialized(), (
"Job ID is not available because " "Ray has not been initialized."
)
job_id = self.worker.current_job_id
return job_id.hex()
@property
@Deprecated(message="Use get_node_id() instead", warning=True)
def node_id(self):
"""Get current node ID for this worker or driver.
Node ID is the id of a node that your driver, task, or actor runs.
Returns:
A node id for this worker or driver.
"""
node_id = self.worker.current_node_id
assert not node_id.is_nil()
return node_id
def get_node_id(self) -> str:
"""Get current node ID for this worker or driver.
Node ID is the id of a node that your driver, task, or actor runs.
The ID will be in hex format.
Returns:
A node id in hex format for this worker or driver.
Raises:
AssertionError: If not called in a driver or worker. Generally,
this means that ray.init() was not called.
"""
assert ray.is_initialized(), (
"Node ID is not available because " "Ray has not been initialized."
)
node_id = self.worker.current_node_id
return node_id.hex()
def get_worker_id(self) -> str:
"""Get current worker ID for this worker or driver process.
Returns:
A worker id in hex format for this worker or driver process.
"""
assert (
ray.is_initialized()
), "Worker ID is not available because Ray has not been initialized."
return self.worker.worker_id.hex()
@property
@Deprecated(message="Use get_task_id() instead", warning=True)
def task_id(self):
"""Get current task ID for this worker or driver.
Task ID is the id of a Ray task.
This shouldn't be used in a driver process.
Example:
.. testcode::
import ray
@ray.remote
class Actor:
def ready(self):
return True
@ray.remote
def f():
return True
# All the below code generates different task ids.
# Task ids are available for actor creation.
a = Actor.remote()
# Task ids are available for actor tasks.
a.ready.remote()
# Task ids are available for normal tasks.
f.remote()
Returns:
The current worker's task id. None if there's no task id.
"""
# only worker mode has actor_id
assert (
self.worker.mode == ray._private.worker.WORKER_MODE
), f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}"
task_id = self.worker.current_task_id
return task_id if not task_id.is_nil() else None
def get_task_id(self) -> Optional[str]:
"""Get current task ID for this worker or driver.
Task ID is the id of a Ray task. The ID will be in hex format.
This shouldn't be used in a driver process.
Example:
.. testcode::
import ray
@ray.remote
class Actor:
def get_task_id(self):
return ray.get_runtime_context().get_task_id()
@ray.remote
def get_task_id():
return ray.get_runtime_context().get_task_id()
# All the below code generates different task ids.
a = Actor.remote()
# Task ids are available for actor tasks.
print(ray.get(a.get_task_id.remote()))
# Task ids are available for normal tasks.
print(ray.get(get_task_id.remote()))
.. testoutput::
:options: +MOCK
16310a0f0a45af5c2746a0e6efb235c0962896a201000000
c2668a65bda616c1ffffffffffffffffffffffff01000000
Returns:
The current worker's task id in hex. None if there's no task id.
"""
# only worker mode has actor_id
if self.worker.mode != ray._private.worker.WORKER_MODE:
logger.warning(
"This method is only available when the process is a "
f"worker. Current mode: {self.worker.mode}"
)
return None
task_id = self.worker.current_task_id
return task_id.hex() if not task_id.is_nil() else None
@property
@Deprecated(message="Use get_actor_id() instead", warning=True)
def actor_id(self):
"""Get the current actor ID in this worker.
ID of the actor of the current process.
This shouldn't be used in a driver process.
Returns:
The current actor id in this worker. None if there's no actor id.
"""
# only worker mode has actor_id
assert (
self.worker.mode == ray._private.worker.WORKER_MODE
), f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}"
actor_id = self.worker.actor_id
return actor_id if not actor_id.is_nil() else None
def get_actor_id(self) -> Optional[str]:
"""Get the current actor ID in this worker.
ID of the actor of the current process.
This shouldn't be used in a driver process.
The ID will be in hex format.
Returns:
The current actor id in hex format in this worker. None if there's no
actor id.
"""
# only worker mode has actor_id
if self.worker.mode != ray._private.worker.WORKER_MODE:
logger.warning(
"This method is only available when the process is a "
"worker. Current mode: {self.worker.mode}"
)
return None
actor_id = self.worker.actor_id
return actor_id.hex() if not actor_id.is_nil() else None
@property
def namespace(self):
"""Get the current namespace of this worker.
Returns:
The current namespace of this worker.
"""
return self.worker.namespace
@property
def was_current_actor_reconstructed(self):
"""Check whether this actor has been restarted.
Returns:
Whether this actor has been ever restarted.
"""
assert (
not self.actor_id.is_nil()
), "This method should't be called inside Ray tasks."
actor_info = ray._private.state.actors(self.actor_id.hex())
return actor_info and actor_info["NumRestarts"] != 0
@property
@Deprecated(message="Use get_placement_group_id() instead", warning=True)
def current_placement_group_id(self):
"""Get the current Placement group ID of this worker.
Returns:
The current placement group id of this worker.
"""
return self.worker.placement_group_id
def get_placement_group_id(self) -> Optional[str]:
"""Get the current Placement group ID of this worker.
Returns:
The current placement group id in hex format of this worker.
"""
pg_id = self.worker.placement_group_id
return pg_id.hex() if not pg_id.is_nil() else None
@property
def should_capture_child_tasks_in_placement_group(self):
"""Get if the current task should capture parent's placement group.
This returns True if it is called inside a driver.
Returns:
Return True if the current task should implicitly
capture the parent placement group.
"""
return self.worker.should_capture_child_tasks_in_placement_group
def get_assigned_resources(self):
"""Get the assigned resources to this worker.
By default for tasks, this will return {"CPU": 1}.
By default for actors, this will return {}. This is because
actors do not have CPUs assigned to them by default.
Returns:
A dictionary mapping the name of a resource to a float, where
the float represents the amount of that resource reserved
for this worker.
"""
assert (
self.worker.mode == ray._private.worker.WORKER_MODE
), f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}"
self.worker.check_connected()
resource_id_map = self.worker.core_worker.resource_ids()
resource_map = {
res: sum(amt for _, amt in mapping)
for res, mapping in resource_id_map.items()
}
return pasre_pg_formatted_resources_to_original(resource_map)
def get_runtime_env_string(self):
"""Get the runtime env string used for the current driver or worker.
Returns:
The runtime env string currently using by this worker.
"""
return self.worker.runtime_env
@property
def runtime_env(self):
"""Get the runtime env used for the current driver or worker.
Returns:
The runtime env currently using by this worker. The type of
return value is ray.runtime_env.RuntimeEnv.
"""
return RuntimeEnv.deserialize(self.get_runtime_env_string())
@property
def current_actor(self):
"""Get the current actor handle of this actor itsself.
Returns:
The handle of current actor.
"""
worker = self.worker
worker.check_connected()
actor_id = worker.actor_id
if actor_id.is_nil():
raise RuntimeError("This method is only available in an actor.")
return worker.core_worker.get_actor_handle(actor_id)
@property
def gcs_address(self):
"""Get the GCS address of the ray cluster.
Returns:
The GCS address of the cluster.
"""
self.worker.check_connected()
return self.worker.gcs_client.address
def _get_actor_call_stats(self):
"""Get the current worker's task counters.
Returns:
A dictionary keyed by the function name. The values are
dictionaries with form ``{"pending": 0, "running": 1,
"finished": 2}``.
"""
worker = self.worker
worker.check_connected()
return worker.core_worker.get_actor_call_stats()
_runtime_context = None
@PublicAPI
@client_mode_hook
def get_runtime_context() -> RuntimeContext:
"""Get the runtime context of the current driver/worker.
The obtained runtime context can be used to get the metadata
of the current task and actor.
Example:
.. testcode::
import ray
# Get the job id.
ray.get_runtime_context().get_job_id()
# Get the actor id.
ray.get_runtime_context().get_actor_id()
# Get the task id.
ray.get_runtime_context().get_task_id()
"""
global _runtime_context
if _runtime_context is None:
_runtime_context = RuntimeContext(ray._private.worker.global_worker)
return _runtime_context
| [
"noreply@github.com"
] | sven1977.noreply@github.com |
e208d251cbdca4ad8acfc48abbf05d97815c3c72 | f18785b7fa3a12f06a29562282dc50f2d1a7bc67 | /src/mariaClust_image.py | c41de83ff1fc3e6d578b8c42a49d33861667c75e | [] | no_license | IuliaRadulescu/MariaClust | b487ae86391f05b2ea32fbbf221df9423ee76196 | c188b837704507bcd507eb0abdc77113e36a9f8e | refs/heads/master | 2020-03-19T03:24:50.960121 | 2018-10-14T19:36:24 | 2018-10-14T19:36:24 | 135,725,809 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,417 | py | from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import sys
import os
from random import randint
from random import shuffle
import math
import collections
import evaluation_measures
'''
=============================================
FUNCTII AUXILIARE
'''
def agglomerative_clustering2(partitions, final_no_clusters, cluster_distance):
'''
Clusterizare ierarhica aglomerativa pornind de la niste clustere (partitii) deja create
Fiecare partitie este reprezentata de catre centroidul ei
Identificatorii partitiilor sunt pastrati in lista intermediary_centroids, iar clusterele cu punctele asociate sunt pastrate in dictionarul cluster_points.
cluster_points => cheile sunt identificatorii partitiilor (centroizii lor), iar valorile sunt punctele asociate
Criteriul de unire a doua clustere variaza'''
global no_dims
no_agg_clusters = len(partitions)
intermediary_centroids = list()
#intermediary_centroids este de o lista cu identificatorii clusterelor
'''
clusterele sunt mentiunte in cluster_points
id-ul unui cluster este centroidul lui
'''
#cluster_points = collections.defaultdict(list)
cluster_points = dict()
print("len partitions "+str(len(partitions)))
for k in partitions:
centroid_partition = centroid(partitions[k])
idx_dict = list()
for dim in range(no_dims):
idx_dict.append(centroid_partition[dim])
cluster_points[tuple(idx_dict)] = []
intermediary_centroids.append(centroid_partition)
for k in partitions:
centroid_partition = centroid(partitions[k])
idx_dict = list()
for dim in range(no_dims):
idx_dict.append(centroid_partition[dim])
for pixel in partitions[k]:
cluster_points[tuple(idx_dict)].append(pixel)
#print cluster_points
while(no_agg_clusters > final_no_clusters):
uneste_a_idx = 0
uneste_b_idx = 0
minDist = 99999
minDistancesWeights = list()
mdw_uneste_a_idx = list()
mdw_uneste_b_idx = list()
for q in range(len(intermediary_centroids)):
for p in range(q+1, len(intermediary_centroids)):
idx_dict_q = list()
idx_dict_p = list()
for dim in range(no_dims):
idx_dict_q.append(intermediary_centroids[q][dim])
idx_dict_p.append(intermediary_centroids[p][dim])
centroid_q = centroid(cluster_points[tuple(idx_dict_q)])
centroid_p = centroid(cluster_points[tuple(idx_dict_p)])
if(centroid_q!=centroid_p):
# calculate_smallest_pairwise pentru jain si spiral
if(cluster_distance==1):
dist = calculate_centroid(cluster_points[tuple(idx_dict_q)], cluster_points[tuple(idx_dict_p)])
elif(cluster_distance==2):
dist = calculate_average_pairwise(cluster_points[tuple(idx_dict_q)], cluster_points[tuple(idx_dict_p)])
elif(cluster_distance==3):
dist = calculate_smallest_pairwise(cluster_points[tuple(idx_dict_q)], cluster_points[tuple(idx_dict_p)])
elif(cluster_distance==4):
dist = calculate_weighted_average_pairwise(cluster_points[tuple(idx_dict_q)], cluster_points[tuple(idx_dict_p)])
else:
dist = calculate_centroid(cluster_points[tuple(idx_dict_q)], cluster_points[tuple(idx_dict_p)])
if(dist<minDist):
minDist = dist
uneste_a_idx = q
uneste_b_idx = p
helperCluster = list()
idx_uneste_a = list()
idx_uneste_b = list()
for dim in range(no_dims):
idx_uneste_a.append(intermediary_centroids[uneste_a_idx][dim])
idx_uneste_b.append(intermediary_centroids[uneste_b_idx][dim])
for cluster_point in cluster_points[tuple(idx_uneste_a)]:
helperCluster.append(cluster_point)
for cluster_point in cluster_points[tuple(idx_uneste_b)]:
helperCluster.append(cluster_point)
newCluster = centroid(helperCluster)
del cluster_points[tuple(idx_uneste_a)]
del cluster_points[tuple(idx_uneste_b)]
idx_cluster = list()
for dim in range(no_dims):
idx_cluster.append(newCluster[dim])
cluster_points[tuple(idx_cluster)] = []
for pointHelper in helperCluster:
cluster_points[tuple(idx_cluster)].append(pointHelper)
value_a = intermediary_centroids[uneste_a_idx]
value_b = intermediary_centroids[uneste_b_idx]
for cluster_point in cluster_points[tuple(idx_cluster)]:
if(cluster_point in intermediary_centroids):
intermediary_centroids = list(filter(lambda a: a != cluster_point, intermediary_centroids))
if(value_a in intermediary_centroids):
intermediary_centroids = list(filter(lambda a: a != value_a, intermediary_centroids))
if(value_b in intermediary_centroids):
intermediary_centroids = list(filter(lambda a: a != value_b, intermediary_centroids))
'''
if row is a list, then don't forget that deleting an element of a list will move all following elements back one place to fill the gap
https://stackoverflow.com/questions/3392677/python-list-assignment-index-out-of-range
---de ce am scazut 1
'''
intermediary_centroids.append(newCluster)
no_agg_clusters = len(cluster_points)
return intermediary_centroids, cluster_points
def compute_pdf_kde(dataset_xy, each_dimension_values):
'''
Calculeaza functia probabilitate de densitate si intoarce valorile ei pentru
punctele din dataset_xy
'''
stacking_list = list()
for dim_id in each_dimension_values:
stacking_list.append(each_dimension_values[dim_id])
values = np.vstack(stacking_list)
kernel = st.gaussian_kde(values) #bw_method=
pdf = kernel.evaluate(values)
scott_fact = kernel.scotts_factor()
print("who is scott? "+str(scott_fact))
return pdf
def evaluate_pdf_kde(dataset_xy, each_dimension_values):
'''
Functioneaza doar pentru doua dimensiuni
Genereaza graficul in nuante de albastru pentru functia probabilitate de densitate
calculata pentru dataset_xy
'''
x = list()
y = list()
x = each_dimension_values[0]
y = each_dimension_values[1]
xmin = min(x)-2
xmax = max(x)+2
ymin = min(y)-2
ymax = max(y)+2
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values) #bw_method=
scott_fact = kernel.scotts_factor()
print("who is scott eval? "+str(scott_fact))
f = np.reshape(kernel(positions).T, xx.shape)
return (f,xmin, xmax, ymin, ymax, xx, yy)
def random_color_scaled():
b = randint(0, 255)
g = randint(0, 255)
r = randint(0, 255)
return [round(b/255,2), round(g/255,2), round(r/255,2)]
#Distanta Euclidiana dintre doua puncte 2d
def DistFunc(x, y):
global no_dims
sum_powers = 0
for dim in range(no_dims):
sum_powers = math.pow(x[dim]-y[dim], 2) + sum_powers
return math.sqrt(sum_powers)
def centroid(pixels):
global no_dims
sum_each_dim = {}
for dim in range(no_dims):
sum_each_dim[dim] = 0
for pixel in pixels:
for dim in range(no_dims):
sum_each_dim[dim] = sum_each_dim[dim] + pixel[dim]
centroid_coords = list()
for sum_id in sum_each_dim:
centroid_coords.append(round(sum_each_dim[sum_id]/len(pixels), 2))
centroid_coords = tuple(centroid_coords)
return centroid_coords
def outliers_iqr(ys):
'''
Determina outlierii cu metoda inter-quartilelor
'''
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
outliers_iqr = list()
for idx in range(len(ys)):
if ys[idx] > upper_bound:
outliers_iqr.append(idx)
if ys[idx] < lower_bound:
outliers_iqr.append(idx)
return outliers_iqr
def get_closest_mean(dataset_k):
'''
Media distantelor celor mai apropiati k vecini pentru fiecare punct in parte
'''
global no_dims
just_pdfs = [point[no_dims+1] for point in dataset_k]
just_pdfs = list(set(just_pdfs))
mean_pdf = sum(just_pdfs)/len(just_pdfs)
k=int(math.ceil(0.1*len(dataset_k)))
distances = list()
for point in dataset_k:
deja_parsati = list()
if(point[no_dims+1] > mean_pdf):
while(k>0):
neigh_id = 0
minDist = 99999
for id_point_k in range(len(dataset_k)):
point_k = dataset_k[id_point_k]
if(point_k not in deja_parsati):
dist = DistFunc(point, point_k)
if(dist < minDist and dist > 0):
minDist = dist
neigh_id = id_point_k
distances.append(minDist)
neigh = dataset_k[neigh_id]
deja_parsati.append(neigh)
k=k-1
distances = list(set(distances))
return sum(distances)/len(distances)
def get_closestk_neigh(point, dataset_k, id_point, expand_factor):
'''
Cei mai apropiati v vecini fata de un punct.
Numarul v nu e constant, pentru fiecare punct ma extind cat de mult pot, adica
atata timp cat distanta dintre punct si urmatorul vecin este mai mica decat
expand_factor * closest_mean (closest_mean este calculata de functia anterioara)
'''
neigh_ids = list()
distances = list()
deja_parsati = list()
pot_continua = 1
closest_mean = get_closest_mean(dataset_k)
while(pot_continua==1):
minDist = 99999
neigh_id = 0
for id_point_k in range(len(dataset_k)):
point_k = dataset_k[id_point_k]
if(point_k not in deja_parsati):
dist = DistFunc(point, point_k)
if(dist < minDist and dist > 0):
minDist = dist
neigh_id = id_point_k
if(minDist <= expand_factor*closest_mean):
neigh = dataset_k[neigh_id]
neigh_ids.append([neigh_id, neigh])
distances.append(minDist)
deja_parsati.append(neigh)
else:
pot_continua = 0
neigh_ids.sort(key=lambda x: x[1])
neigh_ids_final = [n_id[0] for n_id in neigh_ids]
return neigh_ids_final
def expand_knn(point_id, expand_factor):
'''
Extind clusterul curent
Iau cei mai apropiati v vecini ai punctului curent
Ii adaug in cluster
Iau cei mai apropiati v vecini ai celor v vecini
Cand toate punctele sunt parcurse (toti vecinii au fost parcursi) ma opresc si incep cluster nou
'''
global id_cluster, clusters, pixels_partition_clusters, no_dims
point = pixels_partition_clusters[point_id]
neigh_ids = get_closestk_neigh(point, pixels_partition_clusters, point_id, expand_factor)
clusters[id_cluster].append(point)
if(len(neigh_ids)>0):
pixels_partition_clusters[point_id][no_dims] = id_cluster
pixels_partition_clusters[point_id][no_dims+2] = 1
for neigh_id in neigh_ids:
if(pixels_partition_clusters[neigh_id][no_dims+2]==-1):
expand_knn(neigh_id, expand_factor)
else:
pixels_partition_clusters[point_id][no_dims] = -1
pixels_partition_clusters[point_id][no_dims+2] = 1
def calculate_weighted_average_pairwise(cluster1, cluster2):
'''
Average link method ponderat - functia calculate_average_pairwise
- calculeaza media ponderata a punctelor dintre doua clustere candidat
- ponderile sunt densitatile punctelor estimate cu metoda kernel density estimation
'''
average_pairwise = 0
sum_pairwise = 0
sum_ponderi = 0
for pixel1 in cluster1:
for pixel2 in cluster2:
distBetween = DistFunc(pixel1, pixel2)
sum_pairwise = sum_pairwise + abs(pixel1[no_dims+1]-pixel2[no_dims+1])*distBetween
sum_ponderi = sum_ponderi + abs(pixel1[no_dims+1]-pixel2[no_dims+1])
average_pairwise = sum_pairwise/sum_ponderi
return average_pairwise
def calculate_average_pairwise(cluster1, cluster2):
average_pairwise = 0
sum_pairwise = 0
nr = 0
for pixel1 in cluster1:
for pixel2 in cluster2:
distBetween = DistFunc(pixel1, pixel2)
sum_pairwise = sum_pairwise + distBetween
nr = nr + 1
average_pairwise = sum_pairwise/nr
return average_pairwise
def calculate_smallest_pairwise(cluster1, cluster2):
min_pairwise = 999999
for pixel1 in cluster1:
for pixel2 in cluster2:
if(pixel1!=pixel2):
distBetween = DistFunc(pixel1, pixel2)
if(distBetween < min_pairwise):
min_pairwise = distBetween
return min_pairwise
def calculate_centroid(cluster1, cluster2):
centroid1 = centroid(cluster1)
centroid2 = centroid(cluster2)
dist = DistFunc(centroid1, centroid2)
return dist
def split_partitions(partition_dict, expand_factor):
global id_cluster, clusters, pixels_partition_clusters, pdf, no_dims
print(expand_factor)
noise = list()
part_id=0
final_partitions = collections.defaultdict(list)
clusters = collections.defaultdict(list)
for k in partition_dict:
pixels_partition = partition_dict[k]
clusters = collections.defaultdict(list)
id_cluster = -1
pixels_partition_clusters = list()
pixels_partition_anchors = list()
pixels_partition_clusters = pixels_partition
for pixel_id in range(len(pixels_partition_clusters)):
pixel = pixels_partition_clusters[pixel_id]
if(pixels_partition_clusters[pixel_id][no_dims]==-1):
id_cluster = id_cluster + 1
pixels_partition_clusters[pixel_id][no_dims+2] = 1
pixels_partition_clusters[pixel_id][no_dims] = id_cluster
clusters[id_cluster].append(pixel)
neigh_ids = get_closestk_neigh(pixel, pixels_partition_clusters, pixel_id, expand_factor)
for neigh_id in neigh_ids:
if(pixels_partition_clusters[neigh_id][no_dims]==-1):
pixels_partition_clusters[neigh_id][no_dims+2]=1
pixels_partition_clusters[neigh_id][no_dims]=id_cluster
expand_knn(neigh_id, expand_factor)
inner_partitions = collections.defaultdict(list)
inner_partitions_filtered = collections.defaultdict(list)
part_id_inner = 0
for i in range(len(clusters)):
for pixel in pixels_partition_clusters:
if(pixel[no_dims]==i):
inner_partitions[part_id_inner].append(pixel)
part_id_inner = part_id_inner+1
#adaug si zgomotul
for pixel in pixels_partition_clusters:
if(pixel[no_dims]==-1):
inner_partitions[part_id_inner].append(pixel)
part_id_inner = part_id_inner+1
#filter partitions - le elimin pe cele care contin un singur punct
keys_to_delete = list()
for k in inner_partitions:
if(len(inner_partitions[k])<=1):
keys_to_delete.append(k)
#salvam aceste puncte si le reasignam la sfarsit celui mai apropiat cluster
if(len(inner_partitions[k])>0):
for pinner in inner_partitions[k]:
noise.append(pinner)
for k in keys_to_delete:
del inner_partitions[k]
part_id_filtered = 0
for part_id_k in inner_partitions:
inner_partitions_filtered[part_id_filtered] = inner_partitions[part_id_k]
part_id_filtered = part_id_filtered + 1
for part_id_inner in inner_partitions_filtered:
final_partitions[part_id] = inner_partitions_filtered[part_id_inner]
part_id = part_id + 1
return (final_partitions, noise)
def create_validation_dict(clase_points, cluster_points, intermediary_centroids):
'''
{
clasa_1 : {cluster_1: 140, cluster_2: 10},
clasa_2 : {cluster_1: 20, cluster_2: 230}
}
'''
evaluation_dict = {}
for clasa_pct in clase_points: #clase pcte e un dictionar care are ca iduri numerele claselor
print("clasa pct: "+str(clasa_pct))
clusters_dict = {}
for centroid in intermediary_centroids:
#pentru fiecare clasa, parcurgem clusterele unul cate unul sa vedem cate puncte avem din clasa respectiva
pcte_clasa_in_cluster = list()
print("=====centroid "+str(centroid))
for pixel in cluster_points[centroid]:
if(pixel[6]==clasa_pct):
pcte_clasa_in_cluster.append((pixel[0], pixel[1]))
'''
Linia comentata se aplica in cazul in care cluster_1 este un tuplu format doar din punctele din clasa_1 care apartin de cluster_1
Dar cred ca am inteles gresit si de fapt cluster1 este un tuplu cu toate punctele din acel cluster, ca mai jos
tuplu_pcte_clasa_in_cluster = tuple(point for point in pcte_clasa_in_cluster)
'''
tuplu_pcte_clasa_in_cluster = tuple( (point[0], point[1]) for point in cluster_points[centroid])
clusters_dict[tuplu_pcte_clasa_in_cluster] = len(pcte_clasa_in_cluster)
#verificare
for clusterx in clusters_dict:
print("=====nr_pcte in clasa pct in clusterul x "+str(clusters_dict[clusterx]))
tuplu_pcte_in_clasa = tuple(point for point in clase_points[clasa_pct])
evaluation_dict[clasa_pct] = clusters_dict
print(evaluation_dict)
return evaluation_dict
def evaluate_cluster(clase_points, cluster_points):
global no_dims
evaluation_dict = {}
point2cluster = {}
point2class = {}
idx = 0
for elem in clase_points:
evaluation_dict[idx] = {}
for points in clase_points[elem]:
point2class[points] = idx
idx += 1
idx = 0
for elem in cluster_points:
for point in cluster_points[elem]:
index_dict = list()
for dim in range(no_dims):
index_dict.append(point[dim])
point2cluster[tuple(index_dict)] = idx
for c in evaluation_dict:
evaluation_dict[c][idx] = 0
idx += 1
'''for point in point2class:
if point2cluster.get(point, -1) == -1:
print("punct pierdut dupa clustering:", point)'''
for point in point2cluster:
evaluation_dict[point2class[point]][point2cluster[point]] += 1
print('Purity: ', evaluation_measures.purity(evaluation_dict))
print('Entropy: ', evaluation_measures.entropy(evaluation_dict)) # perfect results have entropy == 0
print('RI ', evaluation_measures.rand_index(evaluation_dict))
print('ARI ', evaluation_measures.adj_rand_index(evaluation_dict))
'''
=============================================
ALGORITM MARIACLUST
'''
if __name__ == "__main__":
filename = sys.argv[1]
no_clusters = int(sys.argv[2]) #no clusters
no_bins = int(sys.argv[3]) #no bins
expand_factor = float(sys.argv[4]) # expantion factor how much a cluster can expand based on the number of neighbours -- factorul cu care inmultesc closest mean (cat de mult se poate extinde un cluster pe baza vecinilor)
cluster_distance = int(sys.argv[5])
no_dims = int(sys.argv[6]) #no dims
'''
how you compute the dinstance between clusters:
1 = centroid linkage
2 = average linkage
3 = single linkage
4 = average linkage ponderat
'''
#citire fisier imagine cu opencv
each_dimension_values = collections.defaultdict(list)
dataset_xy = list()
img = cv2.imread(filename,cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_width = len(img_rgb[0])
image_height = len(img_rgb)
dataset_xy = np.reshape(img_rgb, (image_width*image_height, 3))
no_dims = 3
for pixel in dataset_xy:
for dim in range(no_dims):
each_dimension_values[dim].append(float(pixel[dim]))
pdf = compute_pdf_kde(dataset_xy, each_dimension_values) #calculez functia densitate probabilitate utilizand kde
#detectie si eliminare outlieri
outliers_iqr_pdf = outliers_iqr(pdf)
print("Am identificat urmatorii outlieri: ")
for outlier_id in outliers_iqr_pdf:
print(dataset_xy[outlier_id])
print("======================================")
dataset_xy_aux = list()
each_dimension_values_aux = collections.defaultdict(list)
#eliminare outlieri, refac dataset_xy, x si y
for q in range(len(dataset_xy)):
if(q not in outliers_iqr_pdf):
dataset_xy_aux.append(dataset_xy[q])
for dim in range(no_dims):
each_dimension_values_aux[dim].append(dataset_xy[q][dim])
dataset_xy = dataset_xy_aux
each_dimension_values = each_dimension_values_aux
#recalculez pdf, ca altfel se produc erori
pdf = compute_pdf_kde(dataset_xy, each_dimension_values) #calculez functia densitate probabilitate din nou
if(no_dims==2):
#coturul cu albastru este plotat doar pentru 2 dimensiuni
f,xmin, xmax, ymin, ymax, xx, yy = evaluate_pdf_kde(dataset_xy, each_dimension_values) #pentru afisare zone dense albastre
plt.contourf(xx, yy, f, cmap='Blues') #pentru afisare zone dense albastre
partition_dict = collections.defaultdict(list)
'''
Impart punctele din setul de date in n bin-uri in functie de densitatea probabilitatii.
Numarul de bin-uri este numarul de clustere - 1
'''
pixels_per_bin, bins = np.histogram(pdf, bins=no_bins)
#afisare bin-uri rezultate si creare partitii - un bin = o partitie
for idx_bin in range( (len(bins)-1) ):
culoare = random_color_scaled()
for idx_point in range(len(dataset_xy)):
if(pdf[idx_point]>=bins[idx_bin] and pdf[idx_point]<=bins[idx_bin+1]):
element_to_append = list()
for dim in range(no_dims):
element_to_append.append(dataset_xy[idx_point][dim])
element_to_append.append(-1) #clusterul nearest neighbour din care face parte punctul
element_to_append.append(pdf[idx_point])
element_to_append.append(-1) #daca punctul e deja parsta nearest neighbour
element_to_append.append(idx_point)
partition_dict[idx_bin].append(element_to_append)
#scatter doar pentru 2 sau 3 dimensiuni
if(no_dims == 2):
plt.scatter(dataset_xy[idx_point][0], dataset_xy[idx_point][1], color=culoare)
elif(no_dims == 3):
plt.scatter(dataset_xy[idx_point][0], dataset_xy[idx_point][1], dataset_xy[idx_point][2], color=culoare)
if(no_dims == 2 or no_dims == 3):
plt.show()
'''
Pasul anterior atribuie zonele care au aceeasi densitate aceluiasi cluster, chiar daca aceste zone se afla la distanta mare una fata de cealalta.
De aceea aplic un algoritm similar DBSCAN pentru a determina cat de mult se extinde o zona de densitate, si astfel partitionez zonele care se afla la distanta mare una fata de alta.
Unesc partitiile rezultate in urma separarii utilizand clusterizarea ierarhica aglomerativa modificata (utilizeaza media ponderata pentru unirea clusterelor)
'''
final_partitions, noise = split_partitions(partition_dict, expand_factor) #functie care scindeaza partitiile
if(no_dims==2):
for k in final_partitions:
color = random_color_scaled()
for pixel in final_partitions[k]:
plt.scatter(pixel[0], pixel[1], color=color)
plt.show()
intermediary_centroids, cluster_points = agglomerative_clustering2(final_partitions, no_clusters, cluster_distance) #paramateri: partitiile rezultate, numarul de clustere
print(intermediary_centroids)
#reasignez zgomotul clasei cu cel mai apropiat vecin
for noise_point in noise:
#verific care e cel mai apropiat cluster de punctul noise_point
closest_centroid = 0
minDist = 99999
for centroid in intermediary_centroids:
for pixel in cluster_points[centroid]:
dist = DistFunc(noise_point, pixel)
if(dist < minDist):
minDist = dist
closest_centroid = centroid
cluster_points[closest_centroid].append(noise_point)
for centroid in cluster_points:
for pixel in cluster_points[centroid]:
if(pixel in dataset_xy):
idx_to_change = dataset_xy.index(pixel)
dataset_xy[idx_to_change] = centroid
img_final = np.reshape(dataset_xy, (image_height, image_width, 3))
img_final = cv2.cvtColor(img_final, cv2.COLOR_RGB2BGR)
cv2.imshow('image',img_final)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"iulia.radulescu@prospectiuni.local"
] | iulia.radulescu@prospectiuni.local |
dd1a357c59491004293be2c4a668cf2b765e501c | 5a21320778eff1159fee232320dc3c22309d542b | /lineByLine.py | 20f5a0bd274662324e0fea9f792d59b17581890a | [] | no_license | TheRochVoices/LSTM | 5dc337a83a6bba535828795e94190a77e66542d6 | 6515d7127bb1af9104887bf9b7839b4fc6dfbc3e | refs/heads/master | 2020-03-21T22:31:32.223499 | 2018-06-29T17:19:12 | 2018-06-29T17:19:12 | 139,132,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,770 | py | import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
def genSeq(model, tokenizer, max_length, seed_text, n_words):
in_text = seed_text
for i in range(n_words):
encoded = tokenizer.texts_to_sequences([in_text])[0]
encoded = pad_sequences([encoded], maxlen=max_length, padding='pre')
# predict probabilities for each word
yhat = model.predict_classes(encoded, verbose=0)
# map predicted word index to word
out_word = ''
for word, index in tokenizer.word_index.items():
if index == yhat:
out_word = word
break
# append to input
in_text += ' ' + out_word
return in_text
data = """ Jack and Jill went up the hill\n
To fetch a pail of water\n
Jack fell down and broke his crown\n
And Jill came tumbling after\n """
# 1. Tokenize the input
tokenizer = Tokenizer()
tokenizer.fit_on_texts([data])
vocab_size = len(tokenizer.word_index) + 1
print('vocab size %d' % vocab_size)
print(tokenizer.texts_to_sequences([data])[0])
#[2, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 14, 15, 1, 16, 17, 18, 1, 3, 19, 20, 21]
sequences = []
for line in data.split('\n'):
encoded = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(encoded)):
sequence = encoded[: i+1]
sequences.append(sequence)
print(sequences)
#[[2, 1], [2, 1, 3], [2, 1, 3, 4], [2, 1, 3, 4, 5], [2, 1, 3, 4, 5, 6], [2, 1, 3, 4, 5, 6, 7], [8, 9], [8, 9, 10], [8, 9, 10, 11], [8, 9, 10, 11, 12], [8, 9, 10, 11, 12, 13], [2, 14], [2, 14, 15], [2, 14, 15, 1], [2, 14, 15, 1, 16], [2, 14, 15, 1, 16, 17], [2, 14, 15, 1, 16, 17, 18], [1, 3], [1, 3, 19], [1, 3, 19, 20], [1, 3, 19, 20, 21]]
# 2. normalize all the sequences
max_length = max([len(seq) for seq in sequences])
sequences = pad_sequences(sequences, maxlen=max_length, padding='pre')
print('Max Sequence Length: %d' % max_length)
# 3. one shot the output of the sequences
sequences = np.asarray(sequences)
x, y = sequences[:,:6],sequences[:,6]
y = to_categorical(y, num_classes = vocab_size)
print(y)
'''[[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]'''
model = Sequential()
model.add(Embedding(vocab_size, 10, input_length = max_length-1))
model.add(LSTM(50))
model.add(Dense(vocab_size, activation='softmax'))
print(model.summary())
'''Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, 6, 10) 220
_________________________________________________________________
lstm_1 (LSTM) (None, 50) 12200
_________________________________________________________________
dense_1 (Dense) (None, 22) 1122
=================================================================
Total params: 13,542
Trainable params: 13,542
Non-trainable params: 0
_________________________________________________________________
None'''
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x, y, epochs=500, verbose=2)
print(genSeq(model, tokenizer, max_length-1, 'Jack', 4))
print(genSeq(model, tokenizer, max_length-1, 'Jill', 4))
| [
"rochaks001@gmail.com"
] | rochaks001@gmail.com |
ae77d3ac091bad1efdaf9614fef017f8eeb6c022 | ade9a658d227f8b91c9c19ceb7dfaf1bab24f909 | /tests/test_items/test_contacts.py | 4e77130f21744d24f35265713b9da73ed45cedc5 | [
"BSD-2-Clause"
] | permissive | spmsh/exchangelib | 3da14e89e5606178f0594ac0498063db0c3e62a2 | 9cf3d9f08926d8a923c00d3a3cb9d96203cb25b1 | refs/heads/master | 2023-08-30T23:01:03.929611 | 2021-11-23T00:41:03 | 2021-11-23T00:41:03 | 203,696,045 | 0 | 0 | BSD-2-Clause | 2021-11-23T00:43:06 | 2019-08-22T02:04:23 | Python | UTF-8 | Python | false | false | 10,088 | py | import datetime
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
from exchangelib.errors import ErrorInvalidIdMalformed
from exchangelib.folders import Contacts
from exchangelib.indexed_properties import EmailAddress, PhysicalAddress, PhoneNumber
from exchangelib.items import Contact, DistributionList, Persona
from exchangelib.properties import Mailbox, Member, Attribution, SourceId, FolderId, StringAttributedValue, \
PhoneNumberAttributedValue, PersonaPhoneNumberTypeValue
from exchangelib.services import GetPersona
from ..common import get_random_string, get_random_email
from .test_basics import CommonItemTest
class ContactsTest(CommonItemTest):
TEST_FOLDER = 'contacts'
FOLDER_CLASS = Contacts
ITEM_CLASS = Contact
def test_order_by_on_indexed_field(self):
# Test order_by() on IndexedField (simple and multi-subfield). Only Contact items have these
test_items = []
label = self.random_val(EmailAddress.get_field_by_fieldname('label'))
for i in range(4):
item = self.get_test_item()
item.email_addresses = [EmailAddress(email='%s@foo.com' % i, label=label)]
test_items.append(item)
self.test_folder.bulk_create(items=test_items)
qs = self.test_folder.filter(categories__contains=self.categories)
self.assertEqual(
[i[0].email for i in qs.order_by('email_addresses__%s' % label)
.values_list('email_addresses', flat=True)],
['0@foo.com', '1@foo.com', '2@foo.com', '3@foo.com']
)
self.assertEqual(
[i[0].email for i in qs.order_by('-email_addresses__%s' % label)
.values_list('email_addresses', flat=True)],
['3@foo.com', '2@foo.com', '1@foo.com', '0@foo.com']
)
self.bulk_delete(qs)
test_items = []
label = self.random_val(PhysicalAddress.get_field_by_fieldname('label'))
for i in range(4):
item = self.get_test_item()
item.physical_addresses = [PhysicalAddress(street='Elm St %s' % i, label=label)]
test_items.append(item)
self.test_folder.bulk_create(items=test_items)
qs = self.test_folder.filter(categories__contains=self.categories)
self.assertEqual(
[i[0].street for i in qs.order_by('physical_addresses__%s__street' % label)
.values_list('physical_addresses', flat=True)],
['Elm St 0', 'Elm St 1', 'Elm St 2', 'Elm St 3']
)
self.assertEqual(
[i[0].street for i in qs.order_by('-physical_addresses__%s__street' % label)
.values_list('physical_addresses', flat=True)],
['Elm St 3', 'Elm St 2', 'Elm St 1', 'Elm St 0']
)
self.bulk_delete(qs)
def test_order_by_failure(self):
# Test error handling on indexed properties with labels and subfields
qs = self.test_folder.filter(categories__contains=self.categories)
with self.assertRaises(ValueError):
qs.order_by('email_addresses') # Must have label
with self.assertRaises(ValueError):
qs.order_by('email_addresses__FOO') # Must have a valid label
with self.assertRaises(ValueError):
qs.order_by('email_addresses__EmailAddress1__FOO') # Must not have a subfield
with self.assertRaises(ValueError):
qs.order_by('physical_addresses__Business') # Must have a subfield
with self.assertRaises(ValueError):
qs.order_by('physical_addresses__Business__FOO') # Must have a valid subfield
def test_update_on_single_field_indexed_field(self):
home = PhoneNumber(label='HomePhone', phone_number='123')
business = PhoneNumber(label='BusinessPhone', phone_number='456')
item = self.get_test_item()
item.phone_numbers = [home]
item.save()
item.phone_numbers = [business]
item.save(update_fields=['phone_numbers'])
item.refresh()
self.assertListEqual(item.phone_numbers, [business])
def test_update_on_multi_field_indexed_field(self):
home = PhysicalAddress(label='Home', street='ABC')
business = PhysicalAddress(label='Business', street='DEF', city='GHI')
item = self.get_test_item()
item.physical_addresses = [home]
item.save()
item.physical_addresses = [business]
item.save(update_fields=['physical_addresses'])
item.refresh()
self.assertListEqual(item.physical_addresses, [business])
def test_distribution_lists(self):
dl = DistributionList(folder=self.test_folder, display_name=get_random_string(255), categories=self.categories)
dl.save()
new_dl = self.test_folder.get(categories__contains=dl.categories)
self.assertEqual(new_dl.display_name, dl.display_name)
self.assertEqual(new_dl.members, None)
dl.refresh()
# We set mailbox_type to OneOff because otherwise the email address must be an actual account
dl.members = {
Member(mailbox=Mailbox(email_address=get_random_email(), mailbox_type='OneOff')) for _ in range(4)
}
dl.save()
new_dl = self.test_folder.get(categories__contains=dl.categories)
self.assertEqual({m.mailbox.email_address for m in new_dl.members}, dl.members)
dl.delete()
def test_find_people(self):
# The test server may not have any contacts. Just test that the FindPeople and GetPersona services work.
self.assertGreaterEqual(len(list(self.test_folder.people())), 0)
self.assertGreaterEqual(
len(list(
self.test_folder.people().only('display_name').filter(display_name='john').order_by('display_name')
)),
0
)
def test_get_persona(self):
xml = b'''\
<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Body>
<m:GetPersonaResponseMessage ResponseClass="Success"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Persona>
<t:PersonaId Id="AAQkADEzAQAKtOtR="/>
<t:PersonaType>Person</t:PersonaType>
<t:CreationTime>2012-06-01T17:00:34Z</t:CreationTime>
<t:DisplayName>Brian Johnson</t:DisplayName>
<t:RelevanceScore>4255550110</t:RelevanceScore>
<t:Attributions>
<t:Attribution>
<t:Id>0</t:Id>
<t:SourceId Id="AAMkA =" ChangeKey="EQAAABY+"/>
<t:DisplayName>Outlook</t:DisplayName>
<t:IsWritable>true</t:IsWritable>
<t:IsQuickContact>false</t:IsQuickContact>
<t:IsHidden>false</t:IsHidden>
<t:FolderId Id="AAMkA=" ChangeKey="AQAAAA=="/>
</t:Attribution>
</t:Attributions>
<t:DisplayNames>
<t:StringAttributedValue>
<t:Value>Brian Johnson</t:Value>
<t:Attributions>
<t:Attribution>2</t:Attribution>
<t:Attribution>3</t:Attribution>
</t:Attributions>
</t:StringAttributedValue>
</t:DisplayNames>
<t:MobilePhones>
<t:PhoneNumberAttributedValue>
<t:Value>
<t:Number>(425)555-0110</t:Number>
<t:Type>Mobile</t:Type>
</t:Value>
<t:Attributions>
<t:Attribution>0</t:Attribution>
</t:Attributions>
</t:PhoneNumberAttributedValue>
<t:PhoneNumberAttributedValue>
<t:Value>
<t:Number>(425)555-0111</t:Number>
<t:Type>Mobile</t:Type>
</t:Value>
<t:Attributions>
<t:Attribution>1</t:Attribution>
</t:Attributions>
</t:PhoneNumberAttributedValue>
</t:MobilePhones>
</m:Persona>
</m:GetPersonaResponseMessage>
</s:Body>
</s:Envelope>'''
ws = GetPersona(account=self.account)
persona = ws.parse(xml)
self.assertEqual(persona.id, 'AAQkADEzAQAKtOtR=')
self.assertEqual(persona.persona_type, 'Person')
self.assertEqual(
persona.creation_time, datetime.datetime(2012, 6, 1, 17, 0, 34, tzinfo=zoneinfo.ZoneInfo('UTC'))
)
self.assertEqual(persona.display_name, 'Brian Johnson')
self.assertEqual(persona.relevance_score, '4255550110')
self.assertEqual(persona.attributions[0], Attribution(
ID=None,
_id=SourceId(id='AAMkA =', changekey='EQAAABY+'),
display_name='Outlook',
is_writable=True,
is_quick_contact=False,
is_hidden=False,
folder_id=FolderId(id='AAMkA=', changekey='AQAAAA==')
))
self.assertEqual(persona.display_names, [
StringAttributedValue(value='Brian Johnson', attributions=['2', '3']),
])
self.assertEqual(persona.mobile_phones, [
PhoneNumberAttributedValue(
value=PersonaPhoneNumberTypeValue(number='(425)555-0110', type='Mobile'),
attributions=['0'],
),
PhoneNumberAttributedValue(
value=PersonaPhoneNumberTypeValue(number='(425)555-0111', type='Mobile'),
attributions=['1'],
)
])
def test_get_persona_failure(self):
# The test server may not have any personas. Just test that the service response with something we can parse
persona = Persona(id='AAA=', changekey='xxx')
try:
GetPersona(account=self.account).call(persona=persona)
except ErrorInvalidIdMalformed:
pass
| [
"erik@cederstrand.dk"
] | erik@cederstrand.dk |
e1eca6386c795d0c9133574f9c9d774114791f16 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/5cf7181856df4d0a963c76fedfbdd36a.py | f9930a4d6aa5f919c9e249a5f9a7b5e2abcc8d31 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 908 | py | """Word Counting."""
from collections import Counter
import re
import string
# A regular expression that matches any punctuation character.
PUNCTUATION_REGEX = re.compile("[{}]".format(re.escape(string.punctuation)))
class Phrase(str):
"""A subclass of str that supports word counting."""
def __init__(self, phrase=''):
super(Phrase, self).__init__(phrase)
self._counter = None
def __repr__(self):
return "{!s}({!r})".format(self.__class__.__name__, str(self))
def word_count(self):
"""Return a word frequency dictionary.
A word is delimited by runs of consecutive whitespace or punctuation.
"""
if self._counter is None:
punctuation_erased = re.sub(PUNCTUATION_REGEX, ' ', self)
self._counter = Counter(
word.lower() for word in punctuation_erased.split())
return self._counter
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
126ea475038316b83e1fc50e4731c75444167e40 | 77eee7ae5859c3aabaf20697265fb618334a83e2 | /funcionario/migrations/0001_initial.py | afb792f4b535a5d9a50414febdec372416610aaa | [] | no_license | IvesCostaBr/sistema-protocolo | fac2d9bb3b5b67bdf0986ec9482e034837d231e2 | b0ca415b91c9168a4a70e42c84e185472e65802d | refs/heads/master | 2023-04-19T18:52:29.533153 | 2021-05-17T02:52:14 | 2021-05-17T02:52:14 | 367,528,044 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # Generated by Django 3.2.2 on 2021-05-16 04:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('setor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Funcionario',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_completo', models.CharField(max_length=90)),
('cpf', models.CharField(max_length=15)),
('data_nascimento', models.DateField(blank=True, null=True)),
('status_empresa', models.CharField(max_length=20)),
('setor', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.PROTECT, to='setor.setor')),
],
),
]
| [
"ivespauiniam@gmail.com"
] | ivespauiniam@gmail.com |
36b4cbfa903749ebd0abeca63bc54624190c90ac | b22cdd12b2cd7dcf7f32e02c74792e2a13315a94 | /redis/carsaddition.py | 3016fc5ab7b7c22bd2ff641c700856d97d955348 | [] | no_license | rabichith10/teamOrange_CRS | 7e86fa10be720db3b1ce9c7ba399af109d0ce2a3 | 81c533bbc1c87b2c290b4ff18e6122c10706c25f | refs/heads/main | 2023-06-18T13:26:44.478433 | 2021-07-23T06:34:06 | 2021-07-23T06:34:06 | 388,608,646 | 0 | 1 | null | 2021-07-22T22:32:21 | 2021-07-22T22:02:16 | null | UTF-8 | Python | false | false | 25,134 | py | import redis
redis = redis.Redis(
host= '127.0.0.1',
port= '6379')
redis.geoadd("carlocation",52.5167, 13.3833, 1001)
redis.geoadd("carlocation",52.5167, 13.3833, 1002)
redis.geoadd("carlocation",48.1372, 11.5755, 1003)
redis.geoadd("carlocation",50.9422, 6.9578, 1004)
redis.geoadd("carlocation",50.1136, 8.6797, 1005)
redis.geoadd("carlocation",48.7761, 9.1775, 1006)
redis.geoadd("carlocation",51.2311, 6.7724, 1007)
redis.geoadd("carlocation",51.2311, 6.7724, 1008)
redis.geoadd("carlocation",51.5139, 7.4653, 1009)
redis.geoadd("carlocation",51.4508, 7.0131, 1010)
redis.geoadd("carlocation",51.0493, 13.7384, 1011)
redis.geoadd("carlocation",51.35, 12.3833, 1012)
redis.geoadd("carlocation",52.3744, 9.7386, 1013)
redis.geoadd("carlocation",49.4528, 11.0778, 1014)
redis.geoadd("carlocation",51.4322, 6.7611, 1015)
redis.geoadd("carlocation",51.4833, 7.2167, 1016)
redis.geoadd("carlocation",51.2667, 7.1833, 1017)
redis.geoadd("carlocation",52.0167, 8.5333, 1018)
redis.geoadd("carlocation",50.7339, 7.0997, 1019)
redis.geoadd("carlocation",51.9625, 7.6256, 1020)
redis.geoadd("carlocation",52.5167, 13.3833, 1021)
redis.geoadd("carlocation",52.5167, 13.3833, 1022)
redis.geoadd("carlocation",48.1372, 11.5755, 1046)
redis.geoadd("carlocation",50.9422, 6.9578, 1024)
redis.geoadd("carlocation",50.1136, 8.6797, 1025)
redis.geoadd("carlocation",48.7761, 9.1775, 1026)
redis.geoadd("carlocation",51.2311, 6.7724, 1027)
redis.geoadd("carlocation",51.2311, 6.7724, 1028)
redis.geoadd("carlocation",51.5139, 7.4653, 1029)
redis.geoadd("carlocation",51.4508, 7.0131, 1030)
redis.geoadd("carlocation",51.0493, 13.7384, 1031)
redis.geoadd("carlocation",51.35, 12.3833, 1032)
redis.geoadd("carlocation",52.3744, 9.7386, 1033)
redis.geoadd("carlocation",49.4528, 11.0778, 1034)
redis.geoadd("carlocation",51.4322, 6.7611, 1035)
redis.geoadd("carlocation",51.4833, 7.2167, 1036)
redis.geoadd("carlocation",51.2667, 7.1833, 1058)
redis.geoadd("carlocation",52.0167, 8.5333, 1038)
redis.geoadd("carlocation",50.7339, 7.0997, 1039)
redis.geoadd("carlocation",51.9625, 7.6256, 1040)
redis.geoadd("carlocation",52.5167, 13.3833, 1041)
redis.geoadd("carlocation",52.5167, 13.3833, 1042)
redis.geoadd("carlocation",48.1372, 11.5755, 1043)
redis.geoadd("carlocation",50.9422, 6.9578, 1044)
redis.geoadd("carlocation",50.1136, 8.6797, 1045)
redis.geoadd("carlocation",48.7761, 9.1775, 1064)
redis.geoadd("carlocation",51.2311, 6.7724, 1047)
redis.geoadd("carlocation",51.2311, 6.7724, 1048)
redis.geoadd("carlocation",51.5139, 7.4653, 1049)
redis.geoadd("carlocation",51.4508, 7.0131, 1050)
redis.geoadd("carlocation",51.0493, 13.7384, 1051)
redis.geoadd("carlocation",51.35, 12.3833, 1052)
redis.geoadd("carlocation",52.3744, 9.7386, 1053)
redis.geoadd("carlocation",49.4528, 11.0778, 1054)
redis.geoadd("carlocation",51.4322, 6.7611, 1055)
redis.geoadd("carlocation",51.4833, 7.2167, 1056)
redis.geoadd("carlocation",51.2667, 7.1833, 1057)
redis.geoadd("carlocation",52.0167, 8.5333, 1100)
redis.geoadd("carlocation",50.7339, 7.0997, 1059)
redis.geoadd("carlocation",51.9625, 7.6256, 1060)
redis.geoadd("carlocation",52.5167, 13.3833, 1110)
redis.geoadd("carlocation",53.55, 10, 1062)
redis.geoadd("carlocation",48.1372, 11.5755, 1063)
redis.geoadd("carlocation",50.9422, 6.9578, 1149)
redis.geoadd("carlocation",50.1136, 8.6797, 1065)
redis.geoadd("carlocation",53.1153, 8.7975, 1066)
redis.geoadd("carlocation",48.7761, 9.1775, 1067)
redis.geoadd("carlocation",51.2311, 6.7724, 1068)
redis.geoadd("carlocation",51.5139, 7.4653, 1069)
redis.geoadd("carlocation",51.4508, 7.0131, 1070)
redis.geoadd("carlocation",51.0493, 13.7384, 1071)
redis.geoadd("carlocation",51.35, 12.3833, 1072)
redis.geoadd("carlocation",52.3744, 9.7386, 1073)
redis.geoadd("carlocation",49.4528, 11.0778, 1074)
redis.geoadd("carlocation",51.4322, 6.7611, 1075)
redis.geoadd("carlocation",51.4833, 7.2167, 1076)
redis.geoadd("carlocation",51.2667, 7.1833, 1160)
redis.geoadd("carlocation",52.0167, 8.5333, 1078)
redis.geoadd("carlocation",50.7339, 7.0997, 1079)
redis.geoadd("carlocation",51.9625, 7.6256, 1080)
redis.geoadd("carlocation",52.5167, 13.3833, 1189)
redis.geoadd("carlocation",53.55, 10, 1082)
redis.geoadd("carlocation",48.1372, 11.5755, 1230)
redis.geoadd("carlocation",50.9422, 6.9578, 1084)
redis.geoadd("carlocation",50.1136, 8.6797, 1085)
redis.geoadd("carlocation",53.1153, 8.7975, 1086)
redis.geoadd("carlocation",48.7761, 9.1775, 1241)
redis.geoadd("carlocation",51.2311, 6.7724, 1088)
redis.geoadd("carlocation",51.5139, 7.4653, 1089)
redis.geoadd("carlocation",51.4508, 7.0131, 1090)
redis.geoadd("carlocation",51.0493, 13.7384, 1091)
redis.geoadd("carlocation",51.35, 12.3833, 1092)
redis.geoadd("carlocation",52.3744, 9.7386, 1093)
redis.geoadd("carlocation",49.4528, 11.0778, 1094)
redis.geoadd("carlocation",51.4322, 6.7611, 1095)
redis.geoadd("carlocation",51.4833, 7.2167, 1096)
redis.geoadd("carlocation",51.2667, 7.1833, 1097)
redis.geoadd("carlocation",52.0167, 8.5333, 1098)
redis.geoadd("carlocation",50.7339, 7.0997, 1099)
redis.geoadd("carlocation",51.9625, 7.6256, 1256)
redis.geoadd("carlocation",52.5167, 13.3833, 1101)
redis.geoadd("carlocation",53.55, 10, 1102)
redis.geoadd("carlocation",48.1372, 11.5755, 1103)
redis.geoadd("carlocation",50.9422, 6.9578, 1104)
redis.geoadd("carlocation",50.1136, 8.6797, 1105)
redis.geoadd("carlocation",53.1153, 8.7975, 1106)
redis.geoadd("carlocation",48.7761, 9.1775, 1107)
redis.geoadd("carlocation",51.2311, 6.7724, 1108)
redis.geoadd("carlocation",51.5139, 7.4653, 1109)
redis.geoadd("carlocation",51.4508, 7.0131, 1262)
redis.geoadd("carlocation",51.0493, 13.7384, 1111)
redis.geoadd("carlocation",51.35, 12.3833, 1112)
redis.geoadd("carlocation",52.3744, 9.7386, 1275)
redis.geoadd("carlocation",49.4528, 11.0778, 1114)
redis.geoadd("carlocation",51.4322, 6.7611, 1115)
redis.geoadd("carlocation",51.4833, 7.2167, 1116)
redis.geoadd("carlocation",51.2667, 7.1833, 1117)
redis.geoadd("carlocation",52.0167, 8.5333, 1118)
redis.geoadd("carlocation",50.7339, 7.0997, 1119)
redis.geoadd("carlocation",51.9625, 7.6256, 1120)
redis.geoadd("carlocation",52.5167, 13.3833, 1121)
redis.geoadd("carlocation",53.55, 10, 1122)
redis.geoadd("carlocation",48.1372, 11.5755, 1123)
redis.geoadd("carlocation",50.9422, 6.9578, 1124)
redis.geoadd("carlocation",50.1136, 8.6797, 1125)
redis.geoadd("carlocation",53.1153, 8.7975, 1126)
redis.geoadd("carlocation",48.7761, 9.1775, 1127)
redis.geoadd("carlocation",51.2311, 6.7724, 1128)
redis.geoadd("carlocation",51.5139, 7.4653, 1129)
redis.geoadd("carlocation",51.4508, 7.0131, 1130)
redis.geoadd("carlocation",51.0493, 13.7384, 1131)
redis.geoadd("carlocation",51.35, 12.3833, 1132)
redis.geoadd("carlocation",52.3744, 9.7386, 1133)
redis.geoadd("carlocation",49.4528, 11.0778, 1134)
redis.geoadd("carlocation",51.4322, 6.7611, 1135)
redis.geoadd("carlocation",51.4833, 7.2167, 1136)
redis.geoadd("carlocation",51.2667, 7.1833, 1137)
redis.geoadd("carlocation",52.0167, 8.5333, 1138)
redis.geoadd("carlocation",50.7339, 7.0997, 1139)
redis.geoadd("carlocation",51.9625, 7.6256, 1140)
redis.geoadd("carlocation",52.5167, 13.3833, 1141)
redis.geoadd("carlocation",53.55, 10, 1142)
redis.geoadd("carlocation",48.1372, 11.5755, 1143)
redis.geoadd("carlocation",50.9422, 6.9578, 1144)
redis.geoadd("carlocation",50.1136, 8.6797, 1145)
redis.geoadd("carlocation",53.1153, 8.7975, 1146)
redis.geoadd("carlocation",48.7761, 9.1775, 1283)
redis.geoadd("carlocation",51.2311, 6.7724, 1148)
redis.geoadd("carlocation",51.5139, 7.4653, 1293)
redis.geoadd("carlocation",51.4508, 7.0131, 1150)
redis.geoadd("carlocation",51.0493, 13.7384, 1151)
redis.geoadd("carlocation",51.35, 12.3833, 1152)
redis.geoadd("carlocation",52.3744, 9.7386, 1153)
redis.geoadd("carlocation",49.4528, 11.0778, 1154)
redis.geoadd("carlocation",51.4322, 6.7611, 1155)
redis.geoadd("carlocation",51.4833, 7.2167, 1156)
redis.geoadd("carlocation",51.2667, 7.1833, 1157)
redis.geoadd("carlocation",52.0167, 8.5333, 1158)
redis.geoadd("carlocation",50.7339, 7.0997, 1159)
redis.geoadd("carlocation",51.9625, 7.6256, 1309)
redis.geoadd("carlocation",52.5167, 13.3833, 1161)
redis.geoadd("carlocation",48.1372, 11.5755, 1162)
redis.geoadd("carlocation",50.9422, 6.9578, 1163)
redis.geoadd("carlocation",50.1136, 8.6797, 1164)
redis.geoadd("carlocation",48.7761, 9.1775, 1165)
redis.geoadd("carlocation",51.2311, 6.7724, 1166)
redis.geoadd("carlocation",51.5139, 7.4653, 1167)
redis.geoadd("carlocation",51.4508, 7.0131, 1168)
redis.geoadd("carlocation",51.0493, 13.7384, 1169)
redis.geoadd("carlocation",51.35, 12.3833, 1170)
redis.geoadd("carlocation",52.3744, 9.7386, 1171)
redis.geoadd("carlocation",49.4528, 11.0778, 1172)
redis.geoadd("carlocation",51.4322, 6.7611, 1173)
redis.geoadd("carlocation",51.4833, 7.2167, 1174)
redis.geoadd("carlocation",51.2667, 7.1833, 1175)
redis.geoadd("carlocation",52.0167, 8.5333, 1176)
redis.geoadd("carlocation",50.7339, 7.0997, 1177)
redis.geoadd("carlocation",51.9625, 7.6256, 1178)
redis.geoadd("carlocation",52.5167, 13.3833, 1179)
redis.geoadd("carlocation",48.1372, 11.5755, 1180)
redis.geoadd("carlocation",50.9422, 6.9578, 1181)
redis.geoadd("carlocation",50.1136, 8.6797, 1182)
redis.geoadd("carlocation",48.7761, 9.1775, 1183)
redis.geoadd("carlocation",51.2311, 6.7724, 1184)
redis.geoadd("carlocation",51.5139, 7.4653, 1185)
redis.geoadd("carlocation",51.4508, 7.0131, 1186)
redis.geoadd("carlocation",51.0493, 13.7384, 1187)
redis.geoadd("carlocation",51.35, 12.3833, 1188)
redis.geoadd("carlocation",52.3744, 9.7386, 1316)
redis.geoadd("carlocation",49.4528, 11.0778, 1190)
redis.geoadd("carlocation",51.4322, 6.7611, 1191)
redis.geoadd("carlocation",51.4833, 7.2167, 1192)
redis.geoadd("carlocation",51.2667, 7.1833, 1193)
redis.geoadd("carlocation",52.0167, 8.5333, 1194)
redis.geoadd("carlocation",50.7339, 7.0997, 1195)
redis.geoadd("carlocation",51.9625, 7.6256, 1196)
redis.geoadd("carlocation",52.5167, 13.3833, 1197)
redis.geoadd("carlocation",48.1372, 11.5755, 1198)
redis.geoadd("carlocation",50.9422, 6.9578, 1199)
redis.geoadd("carlocation",50.1136, 8.6797, 1200)
redis.geoadd("carlocation",48.7761, 9.1775, 1201)
redis.geoadd("carlocation",51.2311, 6.7724, 1202)
redis.geoadd("carlocation",51.5139, 7.4653, 1203)
redis.geoadd("carlocation",51.4508, 7.0131, 1204)
redis.geoadd("carlocation",51.0493, 13.7384, 1205)
redis.geoadd("carlocation",51.35, 12.3833, 1206)
redis.geoadd("carlocation",52.3744, 9.7386, 1207)
redis.geoadd("carlocation",49.4528, 11.0778, 1208)
redis.geoadd("carlocation",51.4322, 6.7611, 1385)
redis.geoadd("carlocation",51.4833, 7.2167, 1210)
redis.geoadd("carlocation",51.2667, 7.1833, 1211)
redis.geoadd("carlocation",52.0167, 8.5333, 1212)
redis.geoadd("carlocation",50.7339, 7.0997, 1213)
redis.geoadd("carlocation",51.9625, 7.6256, 1214)
redis.geoadd("carlocation",52.5167, 13.3833, 1215)
redis.geoadd("carlocation",48.1372, 11.5755, 1216)
redis.geoadd("carlocation",50.9422, 6.9578, 1217)
redis.geoadd("carlocation",50.1136, 8.6797, 1218)
redis.geoadd("carlocation",48.7761, 9.1775, 1411)
redis.geoadd("carlocation",51.2311, 6.7724, 1220)
redis.geoadd("carlocation",51.5139, 7.4653, 1221)
redis.geoadd("carlocation",51.4508, 7.0131, 1222)
redis.geoadd("carlocation",51.0493, 13.7384, 1223)
redis.geoadd("carlocation",51.35, 12.3833, 1224)
redis.geoadd("carlocation",52.3744, 9.7386, 1225)
redis.geoadd("carlocation",49.4528, 11.0778, 1226)
redis.geoadd("carlocation",51.4322, 6.7611, 1417)
redis.geoadd("carlocation",51.4833, 7.2167, 1228)
redis.geoadd("carlocation",51.2667, 7.1833, 1229)
redis.geoadd("carlocation",52.0167, 8.5333, 1446)
redis.geoadd("carlocation",50.7339, 7.0997, 1231)
redis.geoadd("carlocation",51.9625, 7.6256, 1232)
redis.geoadd("carlocation",49.4528, 11.0778, 1233)
redis.geoadd("carlocation",51.4322, 6.7611, 1234)
redis.geoadd("carlocation",51.4833, 7.2167, 1235)
redis.geoadd("carlocation",51.2667, 7.1833, 1236)
redis.geoadd("carlocation",52.0167, 8.5333, 1237)
redis.geoadd("carlocation",50.7339, 7.0997, 1492)
redis.geoadd("carlocation",51.9625, 7.6256, 1239)
redis.geoadd("carlocation",52.5167, 13.3833, 1240)
redis.geoadd("carlocation",48.1372, 11.5755, 1023)
redis.geoadd("carlocation",50.9422, 6.9578, 1242)
redis.geoadd("carlocation",50.1136, 8.6797, 1243)
redis.geoadd("carlocation",48.7761, 9.1775, 1244)
redis.geoadd("carlocation",51.2311, 6.7724, 1245)
redis.geoadd("carlocation",51.5139, 7.4653, 1246)
redis.geoadd("carlocation",51.4508, 7.0131, 1247)
redis.geoadd("carlocation",51.0493, 13.7384, 1248)
redis.geoadd("carlocation",51.35, 12.3833, 1249)
redis.geoadd("carlocation",52.3744, 9.7386, 1250)
redis.geoadd("carlocation",49.4528, 11.0778, 1251)
redis.geoadd("carlocation",51.4322, 6.7611, 1252)
redis.geoadd("carlocation",51.4833, 7.2167, 1253)
redis.geoadd("carlocation",51.2667, 7.1833, 1254)
redis.geoadd("carlocation",52.0167, 8.5333, 1255)
redis.geoadd("carlocation",50.7339, 7.0997, 1037)
redis.geoadd("carlocation",51.9625, 7.6256, 1257)
redis.geoadd("carlocation",52.5167, 13.3833, 1258)
redis.geoadd("carlocation",48.1372, 11.5755, 1259)
redis.geoadd("carlocation",50.9422, 6.9578, 1260)
redis.geoadd("carlocation",50.1136, 8.6797, 1261)
redis.geoadd("carlocation",48.7761, 9.1775, 1061)
redis.geoadd("carlocation",51.2311, 6.7724, 1263)
redis.geoadd("carlocation",51.5139, 7.4653, 1264)
redis.geoadd("carlocation",51.4508, 7.0131, 1265)
redis.geoadd("carlocation",51.0493, 13.7384, 1266)
redis.geoadd("carlocation",51.35, 12.3833, 1267)
redis.geoadd("carlocation",52.3744, 9.7386, 1268)
redis.geoadd("carlocation",49.4528, 11.0778, 1269)
redis.geoadd("carlocation",51.4322, 6.7611, 1270)
redis.geoadd("carlocation",51.4833, 7.2167, 1271)
redis.geoadd("carlocation",51.2667, 7.1833, 1077)
redis.geoadd("carlocation",52.0167, 8.5333, 1081)
redis.geoadd("carlocation",50.7339, 7.0997, 1083)
redis.geoadd("carlocation",51.9625, 7.6256, 1087)
redis.geoadd("carlocation",49.4528, 11.0778, 1276)
redis.geoadd("carlocation",51.4322, 6.7611, 1277)
redis.geoadd("carlocation",51.4833, 7.2167, 1278)
redis.geoadd("carlocation",51.2667, 7.1833, 1279)
redis.geoadd("carlocation",52.0167, 8.5333, 1280)
redis.geoadd("carlocation",50.7339, 7.0997, 1281)
redis.geoadd("carlocation",51.9625, 7.6256, 1282)
redis.geoadd("carlocation",52.5167, 13.3833, 1113)
redis.geoadd("carlocation",48.1372, 11.5755, 1284)
redis.geoadd("carlocation",50.9422, 6.9578, 1285)
redis.geoadd("carlocation",50.1136, 8.6797, 1286)
redis.geoadd("carlocation",48.7761, 9.1775, 1287)
redis.geoadd("carlocation",51.2311, 6.7724, 1288)
redis.geoadd("carlocation",51.5139, 7.4653, 1289)
redis.geoadd("carlocation",51.4508, 7.0131, 1290)
redis.geoadd("carlocation",51.0493, 13.7384, 1291)
redis.geoadd("carlocation",51.35, 12.3833, 1292)
redis.geoadd("carlocation",52.3744, 9.7386, 1147)
redis.geoadd("carlocation",49.4528, 11.0778, 1294)
redis.geoadd("carlocation",51.4322, 6.7611, 1295)
redis.geoadd("carlocation",51.4833, 7.2167, 1296)
redis.geoadd("carlocation",51.2667, 7.1833, 1297)
redis.geoadd("carlocation",52.0167, 8.5333, 1298)
redis.geoadd("carlocation",50.7339, 7.0997, 1299)
redis.geoadd("carlocation",51.9625, 7.6256, 1300)
redis.geoadd("carlocation",52.5167, 13.3833, 1301)
redis.geoadd("carlocation",48.1372, 11.5755, 1302)
redis.geoadd("carlocation",50.9422, 6.9578, 1303)
redis.geoadd("carlocation",50.1136, 8.6797, 1304)
redis.geoadd("carlocation",48.7761, 9.1775, 1305)
redis.geoadd("carlocation",51.2311, 6.7724, 1209)
redis.geoadd("carlocation",51.5139, 7.4653, 1307)
redis.geoadd("carlocation",51.4508, 7.0131, 1308)
redis.geoadd("carlocation",51.0493, 13.7384, 1219)
redis.geoadd("carlocation",51.35, 12.3833, 1310)
redis.geoadd("carlocation",52.3744, 9.7386, 1227)
redis.geoadd("carlocation",49.4528, 11.0778, 1312)
redis.geoadd("carlocation",51.4322, 6.7611, 1313)
redis.geoadd("carlocation",51.4833, 7.2167, 1314)
redis.geoadd("carlocation",51.2667, 7.1833, 1315)
redis.geoadd("carlocation",52.0167, 8.5333, 1238)
redis.geoadd("carlocation",50.7339, 7.0997, 1317)
redis.geoadd("carlocation",51.9625, 7.6256, 1318)
redis.geoadd("carlocation",49.4528, 11.0778, 1319)
redis.geoadd("carlocation",51.4322, 6.7611, 1320)
redis.geoadd("carlocation",51.4833, 7.2167, 1321)
redis.geoadd("carlocation",51.2667, 7.1833, 1322)
redis.geoadd("carlocation",52.0167, 8.5333, 1323)
redis.geoadd("carlocation",50.7339, 7.0997, 1324)
redis.geoadd("carlocation",51.9625, 7.6256, 1325)
redis.geoadd("carlocation",52.5167, 13.3833, 1326)
redis.geoadd("carlocation",48.1372, 11.5755, 1327)
redis.geoadd("carlocation",50.9422, 6.9578, 1328)
redis.geoadd("carlocation",50.1136, 8.6797, 1329)
redis.geoadd("carlocation",48.7761, 9.1775, 1330)
redis.geoadd("carlocation",51.2311, 6.7724, 1331)
redis.geoadd("carlocation",51.5139, 7.4653, 1332)
redis.geoadd("carlocation",51.4508, 7.0131, 1333)
redis.geoadd("carlocation",51.0493, 13.7384, 1334)
redis.geoadd("carlocation",51.35, 12.3833, 1335)
redis.geoadd("carlocation",52.3744, 9.7386, 1336)
redis.geoadd("carlocation",49.4528, 11.0778, 1337)
redis.geoadd("carlocation",51.4322, 6.7611, 1338)
redis.geoadd("carlocation",51.4833, 7.2167, 1339)
redis.geoadd("carlocation",51.2667, 7.1833, 1340)
redis.geoadd("carlocation",52.0167, 8.5333, 1341)
redis.geoadd("carlocation",50.7339, 7.0997, 1342)
redis.geoadd("carlocation",51.9625, 7.6256, 1343)
redis.geoadd("carlocation",52.5167, 13.3833, 1344)
redis.geoadd("carlocation",48.1372, 11.5755, 1345)
redis.geoadd("carlocation",50.9422, 6.9578, 1346)
redis.geoadd("carlocation",50.1136, 8.6797, 1347)
redis.geoadd("carlocation",48.7761, 9.1775, 1348)
redis.geoadd("carlocation",51.2311, 6.7724, 1349)
redis.geoadd("carlocation",51.5139, 7.4653, 1350)
redis.geoadd("carlocation",51.4508, 7.0131, 1351)
redis.geoadd("carlocation",51.0493, 13.7384, 1352)
redis.geoadd("carlocation",51.35, 12.3833, 1353)
redis.geoadd("carlocation",52.3744, 9.7386, 1354)
redis.geoadd("carlocation",49.4528, 11.0778, 1355)
redis.geoadd("carlocation",51.4322, 6.7611, 1356)
redis.geoadd("carlocation",51.4833, 7.2167, 1357)
redis.geoadd("carlocation",51.2667, 7.1833, 1358)
redis.geoadd("carlocation",52.0167, 8.5333, 1359)
redis.geoadd("carlocation",50.7339, 7.0997, 1360)
redis.geoadd("carlocation",51.9625, 7.6256, 1361)
redis.geoadd("carlocation",49.4528, 11.0778, 1362)
redis.geoadd("carlocation",51.4322, 6.7611, 1363)
redis.geoadd("carlocation",51.4833, 7.2167, 1364)
redis.geoadd("carlocation",51.2667, 7.1833, 1365)
redis.geoadd("carlocation",52.0167, 8.5333, 1366)
redis.geoadd("carlocation",50.7339, 7.0997, 1367)
redis.geoadd("carlocation",51.9625, 7.6256, 1368)
redis.geoadd("carlocation",52.5167, 13.3833, 1369)
redis.geoadd("carlocation",48.1372, 11.5755, 1370)
redis.geoadd("carlocation",50.9422, 6.9578, 1371)
redis.geoadd("carlocation",50.1136, 8.6797, 1272)
redis.geoadd("carlocation",48.7761, 9.1775, 1373)
redis.geoadd("carlocation",51.2311, 6.7724, 1374)
redis.geoadd("carlocation",51.5139, 7.4653, 1375)
redis.geoadd("carlocation",51.4508, 7.0131, 1376)
redis.geoadd("carlocation",51.0493, 13.7384, 1377)
redis.geoadd("carlocation",51.35, 12.3833, 1378)
redis.geoadd("carlocation",52.3744, 9.7386, 1273)
redis.geoadd("carlocation",49.4528, 11.0778, 1380)
redis.geoadd("carlocation",51.4322, 6.7611, 1381)
redis.geoadd("carlocation",51.4833, 7.2167, 1382)
redis.geoadd("carlocation",51.2667, 7.1833, 1383)
redis.geoadd("carlocation",52.0167, 8.5333, 1384)
redis.geoadd("carlocation",50.7339, 7.0997, 1274)
redis.geoadd("carlocation",51.9625, 7.6256, 1386)
redis.geoadd("carlocation",52.5167, 13.3833, 1387)
redis.geoadd("carlocation",48.1372, 11.5755, 1388)
redis.geoadd("carlocation",50.9422, 6.9578, 1389)
redis.geoadd("carlocation",50.1136, 8.6797, 1390)
redis.geoadd("carlocation",48.7761, 9.1775, 1391)
redis.geoadd("carlocation",51.2311, 6.7724, 1392)
redis.geoadd("carlocation",51.5139, 7.4653, 1393)
redis.geoadd("carlocation",51.4508, 7.0131, 1394)
redis.geoadd("carlocation",51.0493, 13.7384, 1395)
redis.geoadd("carlocation",51.35, 12.3833, 1396)
redis.geoadd("carlocation",52.3744, 9.7386, 1397)
redis.geoadd("carlocation",49.4528, 11.0778, 1398)
redis.geoadd("carlocation",51.4322, 6.7611, 1399)
redis.geoadd("carlocation",51.4833, 7.2167, 1400)
redis.geoadd("carlocation",51.2667, 7.1833, 1306)
redis.geoadd("carlocation",52.0167, 8.5333, 1402)
redis.geoadd("carlocation",50.7339, 7.0997, 1403)
redis.geoadd("carlocation",51.9625, 7.6256, 1404)
redis.geoadd("carlocation",49.4528, 11.0778, 1405)
redis.geoadd("carlocation",51.4322, 6.7611, 1406)
redis.geoadd("carlocation",51.4833, 7.2167, 1407)
redis.geoadd("carlocation",51.2667, 7.1833, 1408)
redis.geoadd("carlocation",52.0167, 8.5333, 1409)
redis.geoadd("carlocation",50.7339, 7.0997, 1410)
redis.geoadd("carlocation",51.9625, 7.6256, 1311)
redis.geoadd("carlocation",52.5167, 13.3833, 1412)
redis.geoadd("carlocation",48.1372, 11.5755, 1413)
redis.geoadd("carlocation",50.9422, 6.9578, 1414)
redis.geoadd("carlocation",50.1136, 8.6797, 1415)
redis.geoadd("carlocation",48.7761, 9.1775, 1416)
redis.geoadd("carlocation",51.2311, 6.7724, 1372)
redis.geoadd("carlocation",51.5139, 7.4653, 1418)
redis.geoadd("carlocation",51.4508, 7.0131, 1419)
redis.geoadd("carlocation",51.0493, 13.7384, 1420)
redis.geoadd("carlocation",51.35, 12.3833, 1421)
redis.geoadd("carlocation",52.3744, 9.7386, 1422)
redis.geoadd("carlocation",49.4528, 11.0778, 1423)
redis.geoadd("carlocation",51.4322, 6.7611, 1424)
redis.geoadd("carlocation",51.4833, 7.2167, 1425)
redis.geoadd("carlocation",51.2667, 7.1833, 1426)
redis.geoadd("carlocation",52.0167, 8.5333, 1427)
redis.geoadd("carlocation",50.7339, 7.0997, 1428)
redis.geoadd("carlocation",51.9625, 7.6256, 1429)
redis.geoadd("carlocation",52.5167, 13.3833, 1379)
redis.geoadd("carlocation",48.1372, 11.5755, 1431)
redis.geoadd("carlocation",50.9422, 6.9578, 1432)
redis.geoadd("carlocation",50.1136, 8.6797, 1433)
redis.geoadd("carlocation",48.7761, 9.1775, 1434)
redis.geoadd("carlocation",51.2311, 6.7724, 1435)
redis.geoadd("carlocation",51.5139, 7.4653, 1436)
redis.geoadd("carlocation",51.4508, 7.0131, 1437)
redis.geoadd("carlocation",51.0493, 13.7384, 1438)
redis.geoadd("carlocation",51.35, 12.3833, 1439)
redis.geoadd("carlocation",52.3744, 9.7386, 1440)
redis.geoadd("carlocation",49.4528, 11.0778, 1441)
redis.geoadd("carlocation",51.4322, 6.7611, 1442)
redis.geoadd("carlocation",51.4833, 7.2167, 1443)
redis.geoadd("carlocation",51.2667, 7.1833, 1401)
redis.geoadd("carlocation",52.0167, 8.5333, 1445)
redis.geoadd("carlocation",50.7339, 7.0997, 1430)
redis.geoadd("carlocation",51.9625, 7.6256, 1447)
redis.geoadd("carlocation",49.4528, 11.0778, 1448)
redis.geoadd("carlocation",51.4322, 6.7611, 1449)
redis.geoadd("carlocation",51.4833, 7.2167, 1450)
redis.geoadd("carlocation",51.2667, 7.1833, 1451)
redis.geoadd("carlocation",52.0167, 8.5333, 1452)
redis.geoadd("carlocation",50.7339, 7.0997, 1453)
redis.geoadd("carlocation",51.9625, 7.6256, 1454)
redis.geoadd("carlocation",52.5167, 13.3833, 1455)
redis.geoadd("carlocation",48.1372, 11.5755, 1456)
redis.geoadd("carlocation",50.9422, 6.9578, 1457)
redis.geoadd("carlocation",50.1136, 8.6797, 1458)
redis.geoadd("carlocation",48.7761, 9.1775, 1459)
redis.geoadd("carlocation",51.2311, 6.7724, 1460)
redis.geoadd("carlocation",51.5139, 7.4653, 1461)
redis.geoadd("carlocation",51.4508, 7.0131, 1462)
redis.geoadd("carlocation",51.0493, 13.7384, 1463)
redis.geoadd("carlocation",51.35, 12.3833, 1464)
redis.geoadd("carlocation",52.3744, 9.7386, 1465)
redis.geoadd("carlocation",49.4528, 11.0778, 1466)
redis.geoadd("carlocation",51.4322, 6.7611, 1467)
redis.geoadd("carlocation",51.4833, 7.2167, 1468)
redis.geoadd("carlocation",51.2667, 7.1833, 1469)
redis.geoadd("carlocation",52.0167, 8.5333, 1470)
redis.geoadd("carlocation",50.7339, 7.0997, 1471)
redis.geoadd("carlocation",51.9625, 7.6256, 1472)
redis.geoadd("carlocation",52.5167, 13.3833, 1473)
redis.geoadd("carlocation",48.1372, 11.5755, 1474)
redis.geoadd("carlocation",50.9422, 6.9578, 1475)
redis.geoadd("carlocation",50.1136, 8.6797, 1476)
redis.geoadd("carlocation",48.7761, 9.1775, 1477)
redis.geoadd("carlocation",51.2311, 6.7724, 1478)
redis.geoadd("carlocation",51.5139, 7.4653, 1479)
redis.geoadd("carlocation",51.4508, 7.0131, 1480)
redis.geoadd("carlocation",51.0493, 13.7384, 1481)
redis.geoadd("carlocation",51.35, 12.3833, 1482)
redis.geoadd("carlocation",52.3744, 9.7386, 1483)
redis.geoadd("carlocation",49.4528, 11.0778, 1484)
redis.geoadd("carlocation",51.4322, 6.7611, 1485)
redis.geoadd("carlocation",51.4833, 7.2167, 1486)
redis.geoadd("carlocation",51.2667, 7.1833, 1487)
redis.geoadd("carlocation",52.0167, 8.5333, 1488)
redis.geoadd("carlocation",50.7339, 7.0997, 1489)
redis.geoadd("carlocation",51.9625, 7.6256, 1444)
redis.geoadd("carlocation",49.4528, 11.0778, 1491)
redis.geoadd("carlocation",51.4322, 6.7611, 1490)
redis.geoadd("carlocation",51.4833, 7.2167, 1493)
redis.geoadd("carlocation",51.2667, 7.1833, 1494)
redis.geoadd("carlocation",52.0167, 8.5333, 1495)
redis.geoadd("carlocation",50.7339, 7.0997, 1496)
redis.geoadd("carlocation",51.9625, 7.6256, 1497)
redis.geoadd("carlocation",52.5167, 13.3833, 1498)
redis.geoadd("carlocation",48.1372, 11.5755, 1499)
redis.geoadd("carlocation",50.9422, 6.9578, 1500)
| [
"noreply@github.com"
] | rabichith10.noreply@github.com |
5f1ecd3237ac06fa0500003d3ce52987a8a22090 | c0afbbc2dc0079ddbbcf63c206d37b5db529c879 | /webapp/migrations/0002_auto_20170906_1407.py | 4eed98a7940044708ff87583202c8bdf78240135 | [] | no_license | bchangip/ratemyprofessor | 615fe2bb2eb12c3b51e7b116b885069e477ef7e7 | 2af92904b46641f6b8c941ee7bcab5ccc8d9ad47 | refs/heads/master | 2021-01-18T16:21:32.313000 | 2017-10-11T22:39:02 | 2017-10-11T22:39:02 | 100,443,082 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-09-06 14:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='interests',
),
migrations.AddField(
model_name='student',
name='interest1',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='student',
name='interest2',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='student',
name='interest3',
field=models.CharField(blank=True, max_length=10),
),
]
| [
"xchangip@gmail.com"
] | xchangip@gmail.com |
1b0df24d4c5b13149929ecf7e8e911387dd5a6e3 | b1aa109324d1b9b11edffe4bce89282ae195b3b5 | /sentimentanalysis.py | bf7aba8e6ee7378d40c754815642a00982c7490d | [] | no_license | kaivalyavohra/sentimentanalysis | 1a21fd95062ba6d7d012eebf1a784b41277a35ce | 8d6627011f25ead1b4915abc1defa7837fdc3689 | refs/heads/master | 2020-06-14T16:56:14.153675 | 2019-07-03T13:57:41 | 2019-07-03T13:57:41 | 195,064,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import tweepy
from textblob import TextBlob
numoftweets=0
#twitter account authentication
auth = tweepy.OAuthHandler("SEE TWITTER API","SEE TWITTER API")
auth.set_access_token("SEE TWITTER API", "SEE TWITTER API")
api = tweepy.API(auth)
#initialize subjectivity and positivty rating
score,score2=0,0
searchstring=input("Enter a subject to search: ")
#search for latest 1500 tweets
public_tweets=api.search(q=searchstring,count=1500)
for tweet in public_tweets:
i=TextBlob(tweet.text)
#if a subjective tweet
if i.sentiment.subjectivity>0.5:
score+=i.sentiment.polarity
score2+=i.sentiment.subjectivity
numoftweets+=1
# print(i.sentiment)
# print(tweet.text)
print(round(((score/numoftweets)+1)*50),"percent approval")
print(round((score2/numoftweets)*100),"percent subjective") | [
"noreply@github.com"
] | kaivalyavohra.noreply@github.com |
88441b7e0974e4fc5de5bd965e9a9ad800acd21e | c7dfacea4969b4fef264429e7c21d6c2d4c932b4 | /src/baxter_examples/src/baxter_examples/recorder.py | 032f365f567755301f351b69f2b1e7a75a832475 | [
"BSD-2-Clause"
] | permissive | DeepBlue14/arm_wkspc | 697944c72be9a8efaf97a84b6c26a84ebc8de3a6 | 04009550321868722d207924eed3609be7f54882 | refs/heads/master | 2020-03-21T10:10:05.644158 | 2018-06-23T23:16:40 | 2018-06-23T23:16:40 | 138,436,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,350 | py | # Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
class JointRecorder(object):
def __init__(self, filename, rate):
"""
Records joint data to a file at a specified rate.
"""
self._filename = filename
self._raw_rate = rate
self._rate = rospy.Rate(rate)
self._start_time = rospy.get_time()
self._done = False
self._limb_left = baxter_interface.Limb("left")
self._limb_right = baxter_interface.Limb("right")
self._gripper_left = baxter_interface.Gripper("left", CHECK_VERSION)
self._gripper_right = baxter_interface.Gripper("right", CHECK_VERSION)
self._io_left_lower = baxter_interface.DigitalIO('left_lower_button')
self._io_left_upper = baxter_interface.DigitalIO('left_upper_button')
self._io_right_lower = baxter_interface.DigitalIO('right_lower_button')
self._io_right_upper = baxter_interface.DigitalIO('right_upper_button')
# Verify Grippers Have No Errors and are Calibrated
if self._gripper_left.error():
self._gripper_left.reset()
if self._gripper_right.error():
self._gripper_right.reset()
if (not self._gripper_left.calibrated() and
self._gripper_left.type() != 'custom'):
self._gripper_left.calibrate()
if (not self._gripper_right.calibrated() and
self._gripper_right.type() != 'custom'):
self._gripper_right.calibrate()
def _time_stamp(self):
return rospy.get_time() - self._start_time
def stop(self):
"""
Stop recording.
"""
self._done = True
def done(self):
"""
Return whether or not recording is done.
"""
if rospy.is_shutdown():
self.stop()
return self._done
def record(self):
"""
Records the current joint positions to a csv file if outputFilename was
provided at construction this function will record the latest set of
joint angles in a csv format.
This function does not test to see if a file exists and will overwrite
existing files.
"""
if self._filename:
joints_left = self._limb_left.joint_names()
joints_right = self._limb_right.joint_names()
with open(self._filename, 'w') as f:
f.write('time,')
f.write(','.join([j for j in joints_left]) + ',')
f.write('left_gripper,')
f.write(','.join([j for j in joints_right]) + ',')
f.write('right_gripper\n')
while not self.done():
# Look for gripper button presses
if self._io_left_lower.state:
self._gripper_left.open()
elif self._io_left_upper.state:
self._gripper_left.close()
if self._io_right_lower.state:
self._gripper_right.open()
elif self._io_right_upper.state:
self._gripper_right.close()
angles_left = [self._limb_left.joint_angle(j)
for j in joints_left]
angles_right = [self._limb_right.joint_angle(j)
for j in joints_right]
f.write("%f," % (self._time_stamp(),))
f.write(','.join([str(x) for x in angles_left]) + ',')
f.write(str(self._gripper_left.position()) + ',')
f.write(','.join([str(x) for x in angles_right]) + ',')
f.write(str(self._gripper_right.position()) + '\n')
self._rate.sleep()
| [
"james.perl12@gmail.com"
] | james.perl12@gmail.com |
5c7619b4c3404664eee56b77a698ea3584697135 | c3c4a3b1ae65f6855d5b333bf7e3e1e9c427fa8d | /code in python/7-3 查询水果价格.py | 5385788c8503a2a2994df8cba45aac8bf01dd02d | [] | no_license | Yif1999/Programming-in-Python | fb400f19e6e123e1afcf768b678c580a2c0d3324 | 515995cd9f383aba60fdbacc6476b679f6c9595a | refs/heads/master | 2023-06-22T18:52:27.574016 | 2021-07-02T07:15:34 | 2021-07-02T07:15:34 | 373,847,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | print('[1] apple\n[2] pear\n[3] orange\n[4] grape\n[0] exit')
cmd=list(map(int,input().split()))
for i in range(5):
if cmd[i]==0 :
break
elif cmd[i]==1:
print('price = 3.00')
elif cmd[i]==2:
print('price = 2.50')
elif cmd[i]==3:
print('price = 4.10')
elif cmd[i]==4:
print('price = 10.20')
else:
print('price = 0.00')
| [
"3180101376@zju.edu.cn"
] | 3180101376@zju.edu.cn |
1b619b69b89bc115f7076f0884b11de16029f768 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_graph_ql_api_resolver_policy_operations.py | 38e54b6b1cfaa067458f84d88918cd32ea53c32f | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 38,418 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import ApiManagementClientMixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_resolver_request(
resource_group_name: str, service_name: str, api_id: str, resolver_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serviceName": _SERIALIZER.url(
"service_name",
service_name,
"str",
max_length=50,
min_length=1,
pattern=r"^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$",
),
"apiId": _SERIALIZER.url("api_id", api_id, "str", max_length=256, min_length=1, pattern=r"^[^*#&+:<>?]+$"),
"resolverId": _SERIALIZER.url("resolver_id", resolver_id, "str", max_length=80, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_entity_tag_request(
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serviceName": _SERIALIZER.url(
"service_name",
service_name,
"str",
max_length=50,
min_length=1,
pattern=r"^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$",
),
"apiId": _SERIALIZER.url("api_id", api_id, "str", max_length=256, min_length=1, pattern=r"^[^*#&+:<>?]+$"),
"resolverId": _SERIALIZER.url("resolver_id", resolver_id, "str", max_length=80, min_length=1),
"policyId": _SERIALIZER.url("policy_id", policy_id, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
subscription_id: str,
*,
format: Union[str, _models.PolicyExportFormat] = "xml",
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serviceName": _SERIALIZER.url(
"service_name",
service_name,
"str",
max_length=50,
min_length=1,
pattern=r"^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$",
),
"apiId": _SERIALIZER.url("api_id", api_id, "str", max_length=256, min_length=1, pattern=r"^[^*#&+:<>?]+$"),
"resolverId": _SERIALIZER.url("resolver_id", resolver_id, "str", max_length=80, min_length=1),
"policyId": _SERIALIZER.url("policy_id", policy_id, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if format is not None:
_params["format"] = _SERIALIZER.query("format", format, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
subscription_id: str,
*,
if_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serviceName": _SERIALIZER.url(
"service_name",
service_name,
"str",
max_length=50,
min_length=1,
pattern=r"^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$",
),
"apiId": _SERIALIZER.url("api_id", api_id, "str", max_length=256, min_length=1, pattern=r"^[^*#&+:<>?]+$"),
"resolverId": _SERIALIZER.url("resolver_id", resolver_id, "str", max_length=80, min_length=1),
"policyId": _SERIALIZER.url("policy_id", policy_id, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
subscription_id: str,
*,
if_match: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serviceName": _SERIALIZER.url(
"service_name",
service_name,
"str",
max_length=50,
min_length=1,
pattern=r"^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$",
),
"apiId": _SERIALIZER.url("api_id", api_id, "str", max_length=256, min_length=1, pattern=r"^[^*#&+:<>?]+$"),
"resolverId": _SERIALIZER.url("resolver_id", resolver_id, "str", max_length=80, min_length=1),
"policyId": _SERIALIZER.url("policy_id", policy_id, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class GraphQLApiResolverPolicyOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.apimanagement.ApiManagementClient`'s
:attr:`graph_ql_api_resolver_policy` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resolver(
self, resource_group_name: str, service_name: str, api_id: str, resolver_id: str, **kwargs: Any
) -> Iterable["_models.PolicyContract"]:
"""Get the list of policy configuration at the GraphQL API Resolver level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyContract or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.PolicyContract]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PolicyCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resolver_request(
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
resolver_id=resolver_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resolver.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resolver.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies"
}
@distributed_trace
def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
**kwargs: Any
) -> bool:
"""Gets the entity state (Etag) version of the GraphQL API resolver policy specified by its
identifier.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:param policy_id: The identifier of the Policy. "policy" Required.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_get_entity_tag_request(
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
resolver_id=resolver_id,
policy_id=policy_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_entity_tag.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}"
}
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
format: Union[str, _models.PolicyExportFormat] = "xml",
**kwargs: Any
) -> _models.PolicyContract:
"""Get the policy configuration at the GraphQL API Resolver level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:param policy_id: The identifier of the Policy. "policy" Required.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param format: Policy Export Format. Known values are: "xml" and "rawxml". Default value is
"xml".
:type format: str or ~azure.mgmt.apimanagement.models.PolicyExportFormat
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PolicyContract] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
resolver_id=resolver_id,
policy_id=policy_id,
subscription_id=self._config.subscription_id,
format=format,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
deserialized = self._deserialize("PolicyContract", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}"
}
@overload
def create_or_update(
self,
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
parameters: _models.PolicyContract,
if_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PolicyContract:
"""Creates or updates policy configuration for the GraphQL API Resolver level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:param policy_id: The identifier of the Policy. "policy" Required.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param parameters: The policy contents to apply. Required.
:type parameters: ~azure.mgmt.apimanagement.models.PolicyContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity. Default value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
parameters: IO,
if_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PolicyContract:
"""Creates or updates policy configuration for the GraphQL API Resolver level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:param policy_id: The identifier of the Policy. "policy" Required.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param parameters: The policy contents to apply. Required.
:type parameters: IO
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity. Default value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
parameters: Union[_models.PolicyContract, IO],
if_match: Optional[str] = None,
**kwargs: Any
) -> _models.PolicyContract:
"""Creates or updates policy configuration for the GraphQL API Resolver level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:param policy_id: The identifier of the Policy. "policy" Required.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param parameters: The policy contents to apply. Is either a PolicyContract type or a IO type.
Required.
:type parameters: ~azure.mgmt.apimanagement.models.PolicyContract or IO
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity. Default value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PolicyContract] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PolicyContract")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
resolver_id=resolver_id,
policy_id=policy_id,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
deserialized = self._deserialize("PolicyContract", pipeline_response)
if response.status_code == 201:
response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
deserialized = self._deserialize("PolicyContract", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}"
}
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
api_id: str,
resolver_id: str,
policy_id: Union[str, _models.PolicyIdName],
if_match: str,
**kwargs: Any
) -> None:
"""Deletes the policy configuration at the GraphQL Api Resolver.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number. Required.
:type api_id: str
:param resolver_id: Resolver identifier within a GraphQL API. Must be unique in the current API
Management service instance. Required.
:type resolver_id: str
:param policy_id: The identifier of the Policy. "policy" Required.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update. Required.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
resolver_id=resolver_id,
policy_id=policy_id,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/resolvers/{resolverId}/policies/{policyId}"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
d019d9c4441d4b9861d5ae74a62bd4d78938c1aa | d271c9ab8686ab3f76f409fc1a73b3b06347c6d8 | /AgentNet.py | 7cdeb4c70255af750e98389c6ad977a1f932c77d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | guoyaq/ElegantRL | a741269341d0c02b9f150c9e174ab5d9f7693eaa | 271f7ee37c524f3184990303880c1056dd67312a | refs/heads/master | 2023-01-20T23:45:35.230154 | 2020-11-26T01:42:50 | 2020-11-26T01:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,085 | py | import torch
import torch.nn as nn # import torch.nn.functional as F
import numpy as np # import numpy.random as rd
"""ZenJiaHao, GitHub: YonV1943 ElegantRL (Pytorch model-free DRL)
Issay, Easy Essay, EAsy esSAY 谐音: 意识
"""
class InterDPG(nn.Module): # class AgentIntelAC
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.enc_s = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
)
self.enc_a = nn.Sequential(
nn.Linear(action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
)
self.net = DenseNet(mid_dim)
net_out_dim = self.net.out_dim
self.dec_a = nn.Sequential(nn.Linear(net_out_dim, mid_dim), HardSwish(),
nn.Linear(mid_dim, action_dim), nn.Tanh(), )
self.dec_q = nn.Sequential(nn.Linear(net_out_dim, mid_dim), HardSwish(),
nn.utils.spectral_norm(nn.Linear(mid_dim, 1)), )
@staticmethod
def add_noise(a, noise_std): # 2020-03-03
# noise_normal = torch.randn_like(a) * noise_std
# a_temp = a + noise_normal
a_temp = torch.normal(a, noise_std)
mask = ((a_temp < -1.0) + (a_temp > 1.0)).type(torch.float32) # 2019-12-30
noise_uniform = torch.rand_like(a)
a_noise = noise_uniform * mask + a_temp * (-mask + 1)
return a_noise
def forward(self, s, noise_std=0.0): # actor
s_ = self.enc_s(s)
a_ = self.net(s_)
a = self.dec_a(a_)
return a if noise_std == 0.0 else self.add_noise(a, noise_std)
def critic(self, s, a):
s_ = self.enc_s(s)
a_ = self.enc_a(a)
q_ = self.net(s_ + a_)
q = self.dec_q(q_)
return q
def next__q_a(self, s, s_next, noise_std):
s_ = self.enc_s(s)
a_ = self.net(s_)
a = self.dec_a(a_)
'''q_target (without noise)'''
a_ = self.enc_a(a)
s_next_ = self.enc_s(s_next)
q_target0_ = self.net(s_next_ + a_)
q_target0 = self.dec_q(q_target0_)
'''q_target (with noise)'''
a_noise = self.add_noise(a, noise_std)
a_noise_ = self.enc_a(a_noise)
q_target1_ = self.net(s_next_ + a_noise_)
q_target1 = self.dec_q(q_target1_)
q_target = (q_target0 + q_target1) * 0.5
return q_target, a
class InterSPG(nn.Module): # class AgentIntelAC for SAC (SPG means stochastic policy gradient)
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.log_std_min = -20
self.log_std_max = 2
self.constant_log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# encoder
self.enc_s = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
) # state
self.enc_a = nn.Sequential(
nn.Linear(action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
) # action without nn.Tanh()
self.net = DenseNet(mid_dim)
net_out_dim = self.net.out_dim
# decoder
self.dec_a = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim),
) # action_mean
self.dec_d = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim),
) # action_std_log (d means standard dev.)
self.dec_q1 = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1),
) # q_value1 SharedTwinCritic
self.dec_q2 = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1),
) # q_value2 SharedTwinCritic
layer_norm(self.dec_a[-1], std=0.01) # net[-1] is output layer for action, it is no necessary.
layer_norm(self.dec_q1[-1], std=0.1)
layer_norm(self.dec_q2[-1], std=0.1)
'''Not need to use both SpectralNorm and TwinCritic
I choose TwinCritc instead of SpectralNorm,
because SpectralNorm is conflict with soft target update,
if is_spectral_norm:
self.dec_q1[1] = nn.utils.spectral_norm(self.dec_q1[1])
self.dec_q2[1] = nn.utils.spectral_norm(self.dec_q2[1])
'''
def forward(self, s, noise_std=0.0): # actor, in fact, noise_std is a boolean
s_ = self.enc_s(s)
a_ = self.net(s_)
a_mean = self.dec_a(a_) # NOTICE! it is a_mean without tensor.tanh()
if noise_std != 0.0:
a_std_log = self.dec_d(a_).clamp(self.log_std_min, self.log_std_max)
a_std = a_std_log.exp()
a_mean = torch.normal(a_mean, a_std) # NOTICE! it is a_mean without .tanh()
return a_mean.tanh()
def get__a__log_prob(self, state): # actor
s_ = self.enc_s(state)
a_ = self.net(s_)
a_mean = self.dec_a(a_) # NOTICE! it is a_mean without .tanh()
a_std_log = self.dec_d(a_).clamp(self.log_std_min, self.log_std_max)
a_std = a_std_log.exp()
"""add noise to action, stochastic policy"""
# a_noise = torch.normal(a_mean, a_std, requires_grad=True)
# the above is not same as below, because it needs gradient
a_noise = a_mean + a_std * torch.randn_like(a_mean, requires_grad=True, device=self.device)
'''compute log_prob according to mean and std of action (stochastic policy)'''
# a_delta = a_noise - a_mean).pow(2) /(2* a_std.pow(2)
# log_prob_noise = -a_delta - a_std.log() - np.log(np.sqrt(2 * np.pi))
# same as:
a_delta = ((a_noise - a_mean) / a_std).pow(2) * 0.5
log_prob_noise = a_delta + a_std_log + self.constant_log_sqrt_2pi
a_noise_tanh = a_noise.tanh()
# log_prob = log_prob_noise - (1 - a_noise_tanh.pow(2) + epsilon).log() # epsilon = 1e-6
# same as:
log_prob = log_prob_noise + (-a_noise_tanh.pow(2) + 1.000001).log()
return a_noise_tanh, log_prob.sum(1, keepdim=True)
def get__a__std(self, state):
s_ = self.enc_s(state)
a_ = self.net(s_)
a_mean = self.dec_a(a_) # NOTICE! it is a_mean without .tanh()
a_std_log = self.dec_d(a_).clamp(self.log_std_min, self.log_std_max)
return a_mean.tanh(), a_std_log
def get__a__avg_std_noise_prob(self, state): # actor
s_ = self.enc_s(state)
a_ = self.net(s_)
a_mean = self.dec_a(a_) # NOTICE! it is a_mean without .tanh()
a_std_log = self.dec_d(a_).clamp(self.log_std_min, self.log_std_max)
a_std = a_std_log.exp()
"""add noise to action, stochastic policy"""
# a_noise = torch.normal(a_mean, a_std, requires_grad=True)
# the above is not same as below, because it needs gradient
noise = torch.randn_like(a_mean, requires_grad=True, device=self.device)
a_noise = a_mean + a_std * noise
'''compute log_prob according to mean and std of action (stochastic policy)'''
# a_delta = a_noise - a_mean).pow(2) /(2* a_std.pow(2)
# log_prob_noise = -a_delta - a_std.log() - np.log(np.sqrt(2 * np.pi))
# same as:
a_delta = ((a_noise - a_mean) / a_std).pow(2) * 0.5
log_prob_noise = a_delta + a_std_log + self.constant_log_sqrt_2pi
a_noise_tanh = a_noise.tanh()
# log_prob = log_prob_noise - (1 - a_noise_tanh.pow(2) + epsilon).log() # epsilon = 1e-6
# same as:
log_prob = log_prob_noise + (-a_noise_tanh.pow(2) + 1.000001).log()
return a_mean.tanh(), a_std_log, a_noise_tanh, log_prob.sum(1, keepdim=True)
def get__q1_q2(self, s, a): # critic
s_ = self.enc_s(s)
a_ = self.enc_a(a)
q_ = self.net(s_ + a_)
q1 = self.dec_q1(q_)
q2 = self.dec_q2(q_)
return q1, q2
class InterGAE(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.log_std_min = -20
self.log_std_max = 2
self.constant_log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# encoder
self.enc_s = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
) # state
'''use densenet'''
self.net = DenseNet(mid_dim)
net_out_dim = self.net.out_dim
# '''not use densenet'''
# self.net = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(), )
# net_out_dim = mid_dim
'''todo two layer'''
self.dec_a = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), HardSwish(),
nn.Linear(mid_dim, action_dim),
) # action_mean
self.dec_d = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), HardSwish(),
nn.Linear(mid_dim, action_dim),
) # action_std_log (d means standard dev.)
self.dec_q1 = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), HardSwish(),
nn.Linear(mid_dim, 1),
) # q_value1 SharedTwinCritic
self.dec_q2 = nn.Sequential(
nn.Linear(net_out_dim, mid_dim), HardSwish(),
nn.Linear(mid_dim, 1),
) # q_value2 SharedTwinCritic
'''todo one layer'''
# self.dec_a = nn.Sequential(
# nn.Linear(net_out_dim, action_dim),
# ) # action_mean
# self.dec_d = nn.Sequential(
# nn.Linear(net_out_dim, action_dim),
# ) # action_std_log (d means standard dev.)
#
# self.dec_q1 = nn.Sequential(
# nn.Linear(net_out_dim, 1),
# ) # q_value1 SharedTwinCritic
# self.dec_q2 = nn.Sequential(
# nn.Linear(net_out_dim, 1),
# ) # q_value2 SharedTwinCritic
# layer_norm(self.net[0], std=1.0)
layer_norm(self.dec_a[-1], std=0.01) # output layer for action
layer_norm(self.dec_d[-1], std=0.01) # output layer for std_log
layer_norm(self.dec_q1[-1], std=0.1) # output layer for q value
layer_norm(self.dec_q1[-1], std=0.1) # output layer for q value
def forward(self, s):
x = self.enc_s(s)
x = self.net(x)
a_mean = self.dec_a(x)
return a_mean.tanh()
def get__a__log_prob(self, state):
x = self.enc_s(state)
x = self.net(x)
a_mean = self.dec_a(x)
a_log_std = self.dec_d(x).clamp(self.log_std_min, self.log_std_max)
a_std = torch.exp(a_log_std)
a_noise = a_mean + a_std * torch.randn_like(a_mean, requires_grad=True, device=self.device)
a_delta = (a_noise - a_mean).pow(2) / (2 * a_std.pow(2))
log_prob = -(a_delta + a_log_std + self.constant_log_sqrt_2pi)
return a_noise, log_prob.sum(1)
def compute__log_prob(self, state, a_noise):
x = self.enc_s(state)
x = self.net(x)
a_mean = self.dec_a(x)
a_log_std = self.dec_d(x).clamp(self.log_std_min, self.log_std_max)
a_std = torch.exp(a_log_std)
a_delta = (a_noise - a_mean).pow(2) / (2 * a_std.pow(2))
log_prob = -(a_delta + a_log_std + self.constant_log_sqrt_2pi)
log_prob = log_prob.sum(1)
return log_prob
def get__q1_q2(self, s):
x = self.enc_s(s)
x = self.net(x)
q1 = self.dec_q1(x)
q2 = self.dec_q2(x)
return q1, q2
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
# nn.BatchNorm1d(mid_dim),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim), nn.Tanh(), )
def forward(self, s, noise_std=0.0):
a = self.net(s)
return a if noise_std == 0 else self.add_noise(a, noise_std)
@staticmethod
def add_noise(action, noise_std):
normal_noise = (torch.randn_like(action) * noise_std).clamp(-0.5, 0.5)
a_noise = (action + normal_noise).clamp(-1.0, 1.0)
return a_noise
class ActorDN(nn.Module): # dn: DenseNet
def __init__(self, state_dim, action_dim, mid_dim, use_dn):
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if use_dn: # use DenseNet (there are both shallow and deep network in DenseNet)
nn_dense_net = DenseNet(mid_dim)
self.net = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn_dense_net,
nn.Linear(nn_dense_net.out_dim, action_dim),
)
else: # use a simple network for actor. In RL, deeper network does not mean better performance.
self.net = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim),
)
layer_norm(self.net[-1], std=0.01) # net[-1] is output layer for action, it is no necessary.
def forward(self, s, noise_std=0.0):
a = self.net(s)
return a if noise_std == 0.0 else self.add_noise(a, noise_std)
@staticmethod
def add_noise(a, noise_std): # 2020-03-03
# noise_normal = torch.randn_like(a, device=self.device) * noise_std
# a_temp = a + noise_normal
a_temp = torch.normal(a, noise_std)
mask = ((a_temp < -1.0) + (a_temp > 1.0)).type(torch.float32) # 2019-12-30
noise_uniform = torch.rand_like(a) # , device=self.device)
a_noise = noise_uniform * mask + a_temp * (-mask + 1)
return a_noise
class ActorSAC(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim, use_dn):
super().__init__()
if use_dn: # use DenseNet (DenseNet has both shallow and deep linear layer)
nn_dense_net = DenseNet(mid_dim)
self.net__mid = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn_dense_net,
)
lay_dim = nn_dense_net.out_dim
else: # use a simple network for actor. Deeper network does not mean better performance in RL.
self.net__mid = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
)
lay_dim = mid_dim
self.net__mean = nn.Linear(lay_dim, action_dim)
self.net__std_log = nn.Linear(lay_dim, action_dim)
layer_norm(self.net__mean, std=0.01) # net[-1] is output layer for action, it is no necessary.
self.log_std_min = -20
self.log_std_max = 2
self.constant_log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def forward(self, state, noise_std=0.0): # in fact, noise_std is a boolean
x = self.net__mid(state)
a_mean = self.net__mean(x) # NOTICE! it is a_mean without .tanh()
if noise_std != 0.0:
a_std_log = self.net__std_log(x).clamp(self.log_std_min, self.log_std_max)
a_std = a_std_log.exp()
a_mean = torch.normal(a_mean, a_std) # NOTICE! it needs .tanh()
return a_mean.tanh()
def get__a__log_prob(self, state):
x = self.net__mid(state)
a_mean = self.net__mean(x) # NOTICE! it needs a_mean.tanh()
a_std_log = self.net__std_log(x).clamp(self.log_std_min, self.log_std_max)
a_std = a_std_log.exp()
"""add noise to action in stochastic policy"""
a_noise = a_mean + a_std * torch.randn_like(a_mean, requires_grad=True, device=self.device)
# Can only use above code instead of below, because the tensor need gradients here.
# a_noise = torch.normal(a_mean, a_std, requires_grad=True)
'''compute log_prob according to mean and std of action (stochastic policy)'''
a_delta = ((a_noise - a_mean) / a_std).pow(2) * 0.5
# self.constant_log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
log_prob_noise = a_delta + a_std_log + self.constant_log_sqrt_2pi
# same as below:
# from torch.distributions.normal import Normal
# log_prob_noise = Normal(a_mean, a_std).log_prob(a_noise)
# same as below:
# a_delta = a_noise - a_mean).pow(2) /(2* a_std.pow(2)
# log_prob_noise = -a_delta - a_std.log() - np.log(np.sqrt(2 * np.pi))
a_noise_tanh = a_noise.tanh()
log_prob = log_prob_noise + (-a_noise_tanh.pow(2) + 1.000001).log()
# same as below:
# epsilon = 1e-6
# log_prob = log_prob_noise - (1 - a_noise_tanh.pow(2) + epsilon).log()
return a_noise_tanh, log_prob.sum(1, keepdim=True)
class ActorPPO(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net__mean = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim), )
self.net__std_log = nn.Parameter(torch.zeros(1, action_dim), requires_grad=True)
self.constant_log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
layer_norm(self.net__mean[0], std=1.0)
layer_norm(self.net__mean[2], std=1.0)
layer_norm(self.net__mean[4], std=0.01) # output layer for action
def forward(self, s):
a_mean = self.net__mean(s)
return a_mean.tanh()
def get__a__log_prob(self, state):
a_mean = self.net__mean(state)
a_log_std = self.net__std_log.expand_as(a_mean)
a_std = torch.exp(a_log_std)
a_noise = torch.normal(a_mean, a_std)
a_delta = (a_noise - a_mean).pow(2) / (2 * a_std.pow(2))
log_prob = -(a_delta + a_log_std + self.constant_log_sqrt_2pi)
log_prob = log_prob.sum(1)
return a_noise, log_prob
def compute__log_prob(self, state, a_noise):
a_mean = self.net__mean(state)
a_log_std = self.net__std_log.expand_as(a_mean)
a_std = torch.exp(a_log_std)
a_delta = (a_noise - a_mean).pow(2) / (2 * a_std.pow(2))
log_prob = -(a_delta + a_log_std + self.constant_log_sqrt_2pi)
return log_prob.sum(1)
class ActorGAE(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
nn_dense = DenseNet(mid_dim)
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn_dense, )
layer_dim = nn_dense.out_dim
self.net__mean = nn.Linear(layer_dim, action_dim)
self.net__std_log = nn.Linear(layer_dim, action_dim)
self.log_std_min = -20
self.log_std_max = 2
self.constant_log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
layer_norm(self.net[0], std=1.0)
layer_norm(self.net__mean, std=0.01) # output layer for action
layer_norm(self.net__std_log, std=0.01) # output layer for std_log
def forward(self, s):
x = self.net(s)
a_mean = self.net__mean(x)
return a_mean.tanh()
def get__a__log_prob(self, state):
x = self.net(state)
a_mean = self.net__mean(x)
a_log_std = self.net__std_log(x).clamp(self.log_std_min, self.log_std_max)
a_std = torch.exp(a_log_std)
a_noise = a_mean + a_std * torch.randn_like(a_mean, requires_grad=True, device=self.device)
a_delta = (a_noise - a_mean).pow(2) / (2 * a_std.pow(2))
log_prob = -(a_delta + a_log_std + self.constant_log_sqrt_2pi)
return a_noise, log_prob.sum(1)
def compute__log_prob(self, state, a_noise):
x = self.net(state)
a_mean = self.net__mean(x)
a_log_std = self.net__std_log(x).clamp(self.log_std_min, self.log_std_max)
a_std = torch.exp(a_log_std)
a_delta = (a_noise - a_mean).pow(2) / (2 * a_std.pow(2))
log_prob = -(a_delta + a_log_std + self.constant_log_sqrt_2pi)
log_prob = log_prob.sum(1)
return log_prob
class Critic(nn.Module): # 2020-05-05 fix bug
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1), )
def forward(self, s, a):
x = torch.cat((s, a), dim=1)
q = self.net(x)
return q
class CriticTwin(nn.Module): # TwinSAC <- TD3(TwinDDD) <- DoubleDQN <- Double Q-learning
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
def build_critic_network():
net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1), )
layer_norm(net[-1], std=0.01) # It is no necessary.
return net
self.net1 = build_critic_network()
self.net2 = build_critic_network()
def forward(self, state, action):
x = torch.cat((state, action), dim=1)
q_value = self.net1(x)
return q_value
def get__q1_q2(self, state, action):
x = torch.cat((state, action), dim=1)
q_value1 = self.net1(x)
q_value2 = self.net2(x)
return q_value1, q_value2
class CriticTwinShared(nn.Module): # 2020-06-18
def __init__(self, state_dim, action_dim, mid_dim, use_dn):
super().__init__()
nn_dense = DenseNet(mid_dim)
if use_dn: # use DenseNet (DenseNet has both shallow and deep linear layer)
self.net__mid = nn.Sequential(
nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn_dense,
)
lay_dim = nn_dense.out_dim
else: # use a simple network for actor. Deeper network does not mean better performance in RL.
self.net__mid = nn.Sequential(
nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
)
lay_dim = mid_dim
self.net__q1 = nn.Linear(lay_dim, 1)
self.net__q2 = nn.Linear(lay_dim, 1)
layer_norm(self.net__q1, std=0.1)
layer_norm(self.net__q2, std=0.1)
'''Not need to use both SpectralNorm and TwinCritic
I choose TwinCritc instead of SpectralNorm,
because SpectralNorm is conflict with soft target update,
if is_spectral_norm:
self.net1[1] = nn.utils.spectral_norm(self.dec_q1[1])
self.net2[1] = nn.utils.spectral_norm(self.dec_q2[1])
'''
def forward(self, state, action):
x = torch.cat((state, action), dim=1)
x = self.net_mid(x)
q_value = self.net__q1(x)
return q_value
def get__q1_q2(self, state, action):
x = torch.cat((state, action), dim=1)
x = self.net__mid(x)
q_value1 = self.net__q1(x)
q_value2 = self.net__q2(x)
return q_value1, q_value2
class CriticAdv(nn.Module): # 2020-05-05 fix bug
def __init__(self, state_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1), )
layer_norm(self.net[0], std=1.0)
layer_norm(self.net[2], std=1.0)
layer_norm(self.net[4], std=1.0) # output layer for action
def forward(self, s):
q = self.net(s)
return q
class CriticAdvTwin(nn.Module): # 2020-05-05 fix bug
def __init__(self, state_dim, mid_dim):
super().__init__()
nn_dense = DenseNet(mid_dim)
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn_dense, )
layer_dim = nn_dense.out_dim
self.net_q1 = nn.Linear(layer_dim, 1)
self.net_q2 = nn.Linear(layer_dim, 1)
layer_norm(self.net[0], std=1.0)
layer_norm(self.net_q1, std=0.1) # output layer for q value
layer_norm(self.net_q2, std=0.1) # output layer for q value
def forward(self, s):
x = self.net(s)
q1 = self.net_q1(x)
q2 = self.net_q2(x)
return q1, q2
class QNet(nn.Module): # class AgentQLearning
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__() # same as super(QNet, self).__init__()
self.net = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim),
)
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def forward(self, s):
q = self.net(s)
return q
class QNetTwin(nn.Module): # class AgentQLearning
def __init__(self, state_dim, action_dim, mid_dim, ):
super().__init__()
nn_dense = DenseNet(mid_dim)
self.net = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn_dense,
)
layer_dim = nn_dense.out_dim
self.net_q1 = nn.Linear(layer_dim, action_dim)
self.net_q2 = nn.Linear(layer_dim, action_dim)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def forward(self, s, noise_std=0.0):
x = self.net(s)
q1 = self.net_q1(x)
q2 = self.net_q2(x)
return torch.min(q1, q2)
def get__q1_q2(self, s):
x = self.net(s)
q1 = self.net_q1(x)
q2 = self.net_q2(x)
return q1, q2
class QNetDuel(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net__head = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
)
self.net_val = nn.Sequential( # value
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1),
)
self.net_adv = nn.Sequential( # advantage value
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim),
)
def forward(self, state, noise_std=0.0):
x = self.net__head(state)
val = self.net_val(x)
adv = self.net_adv(x)
q = val + adv - adv.mean(dim=1, keepdim=True)
return q
"""utils"""
class NnnReshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.args = args
def forward(self, x):
return x.view((x.size(0),) + self.args)
class DenseNet(nn.Module): # plan to hyper-param: layer_number
def __init__(self, mid_dim):
super().__init__()
assert (mid_dim / (2 ** 3)) % 1 == 0
def id2dim(i):
return int((3 / 2) ** i * mid_dim)
self.dense1 = nn.Sequential(nn.Linear(id2dim(0), id2dim(0) // 2), nn.ReLU(), )
self.dense2 = nn.Sequential(nn.Linear(id2dim(1), id2dim(1) // 2), HardSwish(), )
self.out_dim = id2dim(2)
layer_norm(self.dense1[0], std=1.0)
layer_norm(self.dense2[0], std=1.0)
def forward(self, x1):
x2 = torch.cat((x1, self.dense1(x1)), dim=1)
x3 = torch.cat((x2, self.dense2(x2)), dim=1)
return x3
class HardSwish(nn.Module):
def __init__(self):
super().__init__()
self.relu6 = nn.ReLU6()
def forward(self, x):
return self.relu6(x + 3.) / 6. * x
def layer_norm(layer, std=1.0, bias_const=1e-6):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
| [
"noreply@github.com"
] | guoyaq.noreply@github.com |
05df3df23b0a20d367651cc969838f802024aa98 | ea16dd8717b39f45fb1450658bca94e641909120 | /src/core/views.py | 7585e36f8a1b2e313e4723b4687eb6eb2bd06167 | [] | no_license | beingabeer/MMI-CRM | 80e39e946f579a465ed69898e47ef09bad669569 | 941c5511715f857609ebd8b232b01b4d4b102b4d | refs/heads/master | 2022-12-09T09:15:15.244283 | 2020-05-10T22:56:34 | 2020-05-10T22:56:34 | 252,863,189 | 0 | 0 | null | 2022-12-08T03:58:46 | 2020-04-03T23:16:15 | Python | UTF-8 | Python | false | false | 6,850 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.forms import inlineformset_factory
from .models import *
from .forms import OrderForm, CreateUserForm, CustomerForm
from .filters import OrderFilter
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .decorators import unauthenticated_user, allowed_users, admin_only
from django.contrib.auth.models import Group
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import get_template
def index(request):
if request.method == 'POST':
name = request.POST.get('name')
email = request.POST.get('email')
message = request.POST.get('message')
subject = 'Contact Form from MMInc'
from_email = settings.DEFAULT_FROM_EMAIL
to_email = [settings.DEFAULT_FROM_EMAIL]
contact_message = f"{message}, from {name} with email {email}"
# contact_message = get_template('contact_message.txt').render(context)
send_mail(subject, contact_message, from_email,
to_email, fail_silently=True)
messages.success(request, f"Hi, Your email has been sent!")
# Reply back to user
subject_reply = 'Message from MMInc'
from_email = settings.DEFAULT_FROM_EMAIL
to_email = [email]
contact_message_reply = f"Thank you for your Email. We will get back to you asap."
send_mail(subject, contact_message_reply, from_email,
to_email, fail_silently=True)
return redirect('/#contact')
return render(request, 'index.html')
@unauthenticated_user
def login_page(request):
context = {}
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('dashboard')
else:
messages.warning(request, "Username or password incorrect")
return render(request, 'login.html', context)
def logout_user(request):
logout(request)
messages.success(request, 'You have been successfully logged out')
return redirect('login')
@unauthenticated_user
def register_page(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(
request, f"Hi {username}! Your Account has been created. You can now log in.")
return redirect('login')
context = {'form': form}
return render(request, 'register.html', context)
@login_required(login_url='login')
@admin_only
def dashboard(request):
orders = Order.objects.all()
customers = Customer.objects.all()
total_orders = orders.count()
total_customers = customers.count()
delivered = orders.filter(status='Delivered').count()
pending = orders.filter(status='Pending').count()
last_seven_orders = Order.objects.all().order_by('-id')[:7]
context = {
'orders': orders,
'customers': customers,
'total_orders': total_orders,
'total_customers': total_customers,
'delivered': delivered,
'pending': pending,
'last_seven_orders': last_seven_orders
}
return render(request, 'dashboard.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def products(request):
products = Product.objects.all()
context = {'products': products}
return render(request, 'products.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def customer(request, pk):
customer = Customer.objects.get(id=pk)
orders = customer.order_set.all()
total_orders = orders.count()
myfilter = OrderFilter(request.GET, queryset=orders)
orders = myfilter.qs
context = {
'customer': customer,
'total_orders': total_orders,
'orders': orders,
'myfilter': myfilter
}
return render(request, 'customer.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def createOrder(request, pk):
OrderFormSet = inlineformset_factory(
Customer, Order, fields=('product', 'status'), extra=5)
customer = Customer.objects.get(id=pk)
formset = OrderFormSet(queryset=Order.objects.none(), instance=customer)
# form = OrderForm(initial={'customer': customer})
if request.method == 'POST':
# form = OrderForm(request.POST)
formset = OrderFormSet(request.POST, instance=customer)
if formset.is_valid():
formset.save()
return redirect('/dashboard/')
context = {'formset': formset}
return render(request, 'order_form.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def updateOrder(request, pk):
order = Order.objects.get(id=pk)
form = OrderForm(instance=order)
if request.method == 'POST':
form = OrderForm(request.POST, instance=order)
if form.is_valid():
form.save()
return redirect('/dashboard/')
context = {'form': form}
return render(request, 'order_form.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def deleteOrder(request, pk):
order = Order.objects.get(id=pk)
if request.method == 'POST':
order.delete()
return redirect('/dashboard/')
context = {'item': order}
return render(request, 'delete_form.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def user_profile(request):
orders = request.user.customer.order_set.all()
total_orders = orders.count()
delivered = orders.filter(status='Delivered').count()
pending = orders.filter(status='Pending').count()
context = {'orders': orders, 'total_orders': total_orders,
'delivered': delivered,
'pending': pending, }
return render(request, 'user.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def account_settings(request):
customer = request.user.customer
form = CustomerForm(instance=customer)
if request.method == 'POST':
form = CustomerForm(request.POST, request.FILES, instance=customer)
if form.is_valid():
form.save()
context = {'form': form, 'customer': customer}
return render(request, 'account_settings.html', context)
| [
"abeer.sewak@gmail.com"
] | abeer.sewak@gmail.com |
94b0a380ce4543b547a5176caef0e9ce5901f3ca | ad71b2aaab2bf1127f40fef008ac6f6d1334c32c | /share/rpcauth/rpcauth.py | 219286929731362f0553450cf62c1d98e5fd84b4 | [
"MIT"
] | permissive | minblock/carpaticoin | 00eb755770f370d54d73ae9b227e4d4bbd60babb | f65cf89970b36a073b49435a3833a2a83a7f2145 | refs/heads/master | 2021-05-22T01:38:29.187393 | 2020-04-04T04:41:16 | 2020-04-04T04:41:16 | 252,909,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to carpaticoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| [
"POSTMASTER@provgn.com"
] | POSTMASTER@provgn.com |
5a0a258f467174abea5efd18a4d45fb9abc6a76c | 76c7ad077dfb1685f01117b17e5240638e52f1d5 | /env/demo/demo/apps/home/urls.py | 7ef80347855f3f1839a25ff26c0d5ee89e40d3ea | [] | no_license | simaski/pythonExample | 5394b07c66b4a1326e3628a92d135fcae5cc76ba | 5682857e084a34f2796b71e4a191b2a14f1535de | refs/heads/master | 2020-05-15T08:32:03.536714 | 2019-04-18T21:28:46 | 2019-04-18T21:28:46 | 182,159,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | __author__= 'jimmy'
from django.urls import path
from . import views
urlpatterns = [
path('', views.index_view, name="index"),
path('about', views.about_view, name="about"),
path('products', views.products_view, name="products"),
path('contact', views.contact_view, name="contact"),
] | [
"jhernandez@technisys.com"
] | jhernandez@technisys.com |
ce0673f87c6f91128d89ed3e4bce6d17338ace3c | 49aede51568d7c93aff1c0d21853954724bb2f46 | /src/index.py | 46e61f8b2beb26c8bdb45eb216ac049c18cdecfc | [] | no_license | crackoscl/pruebaGYM | fd5a0fdd22b5229fda603ca98978afab85017e95 | 6899e714cfd07f17e6620c538bd5088ec55c94b0 | refs/heads/main | 2023-02-19T16:35:07.595853 | 2021-01-27T02:23:37 | 2021-01-27T02:23:37 | 332,944,291 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | from flask import Flask,request,jsonify
import psycopg2
from psycopg2.extras import RealDictCursor
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
def conexion():
return psycopg2.connect(
host="localhost",
database="gym",
user="postgres",
password="Lagox69"
)
@app.route('/')
def principal():
return '<h1>Bienvenidos a la api GYM </h1>'
@app.route('/clientes',methods=['GET'])
def index():
conn = conexion()
cur = conn.cursor(cursor_factory=RealDictCursor)
cur.execute("SELECT * FROM cliente")
rows = cur.fetchall()
conn.close()
cur.close()
return jsonify(rows)
@app.route('/suscripciones',methods=['GET'])
def suscripciones():
conn = conexion()
cur = conn.cursor(cursor_factory=RealDictCursor)
cur.execute("SELECT * FROM tipo_suscripcion")
rows = cur.fetchall()
conn.close()
cur.close()
return jsonify(rows)
@app.route('/pases',methods=['GET'])
def pases():
conn = conexion()
cur = conn.cursor(cursor_factory=RealDictCursor)
cur.execute("SELECT * FROM pase_diario")
rows = cur.fetchall()
conn.close()
cur.close()
return jsonify(rows)
@app.route('/productos',methods=['GET'])
def productos():
conn = conexion()
cur = conn.cursor(cursor_factory=RealDictCursor)
cur.execute("SELECT * FROM producto")
rows = cur.fetchall()
conn.close()
cur.close()
return jsonify(rows)
@app.route('/procesar',methods=['POST'])
def procesar():
if request.method == 'POST':
data = request.form.get
print(data)
if __name__ == '__main__':
app.run(host='127.0.0.1',port=5000,debug=True)
| [
"gilbert.ata@gmail.com"
] | gilbert.ata@gmail.com |
e300b1c61215c776dc8a07dcd130919a3eb92c8f | 58938fe66aaba6dc6281901ae603decfa8c8e43a | /src/utils.py | 977090bb5b830f8cd8a6b2c86ada2e108aeaf0dd | [] | no_license | Jigar3/pharmazon-backend | d7a428c72bf5db69d583fc95d065a7e60d2c2eb8 | df7023edbc40c82a659bb87734ab4f935e676ed3 | refs/heads/master | 2020-05-03T06:33:12.580164 | 2019-04-04T05:35:30 | 2019-04-04T05:35:30 | 178,476,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | from bs4 import BeautifulSoup
import requests
agent = {"User-Agent": 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
url = "https://www.medplusmart.com/product/"
def getMedList(query):
page = requests.get(url + query, headers=agent).text
soup = BeautifulSoup(page, 'html.parser')
meds = soup.select(".wbrk")
price = soup.select(".cursor td:nth-of-type(3)")
count = -1
medList = []
for med in meds:
count += 1
link = med.select("a")
if(len(link) != 0):
link = link[0].attrs['href']
else:
link = ""
if(link == "" or med.text == ""):
pass
else:
temp = {
"name": med.text.strip(),
"link": link.strip(),
"price": price[count].text.strip().replace("\t", "")
}
medList.append(temp)
return medList
| [
"chavada.jigar.3@gmail.com"
] | chavada.jigar.3@gmail.com |
401839f8d0b5a34685c9b6be91601ad227d91c4d | ffce81084d0414f30ffff8527a4b85ef4bca7942 | /bin/screenshot-sway | c42533d4731bd3a02e71984c1528f8aac9a39d82 | [
"Unlicense"
] | permissive | jtprince/dotfiles | a054df68c3db35f5a1278b2ccec4b69ae6151ecf | 64cd2776832e3d651d1a9b4a62c0921823fc5677 | refs/heads/main | 2023-08-31T09:26:13.271873 | 2023-08-24T21:27:41 | 2023-08-24T21:27:41 | 9,928,741 | 4 | 1 | Unlicense | 2023-02-11T01:26:54 | 2013-05-08T04:59:36 | TeX | UTF-8 | Python | false | false | 7,209 | #!/usr/bin/env python
"""
Arch:
yay -S grimshot tesseract tesseract-data-eng swappy \
python-pytesseract espeak pico-tts sox
If you are using a different python env:
pip install pytesseract
"""
import time
import re
import datetime
import subprocess
import argparse
from pathlib import Path
class Screenshot:
DEFAULT_DIR = Path.home() / "screenshots"
SPEAK_COMMANDS = dict(
espeak="espeak --stdin",
picotts="pico-tts | play -t raw -r 16k -e signed -b 16 -c 1 - tempo {tempo}",
)
SENTENCE_SPLIT_RE = re.compile(r"(?<=[\.\?\!])\s+")
# Could use PyICU (see http://userguide.icu-project.org/boundaryanalysis)
# But this is probably about as good. Sentence endings are inherently
# ambiguous in English. But more rare to end a sentence on an
# abbreviation so assume an abbreviation is not a sentence end.
NOT_SENTENCE_ENDS = re.compile(r"(Mr|Mrs|Dr|Ms)\.")
DEFAULT_SENTENCE_PAUSE_SECONDS = 0.2
DEFAULT_TTS_TEMPO = 1.5
@staticmethod
def _concatenate_lines(text):
return re.sub(r"(?<!\n)\n(?!\n)", " ", text).replace("- ", "")
def get_args(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"type",
default="screen",
choices=["screen", "area", "active", "output", "window"],
help="grab a specific area",
)
parser.add_argument(
"--annotate",
action="store_true",
help="annotate the image",
)
parser.add_argument(
"-o",
"--ocr",
action="store_true",
help="run ocr, write txt file, and copy to clipboard",
)
parser.add_argument(
"--ocr-separate-lines", # ocr_separate_lines
action="store_true",
help="do not concatenate all the text.",
)
parser.add_argument(
"-t", "--tts", action="store_true", help="run ocr and then say it"
)
parser.add_argument(
"--tts-sentence-pause", # tts_sentence_pause
type=float,
default=self.DEFAULT_SENTENCE_PAUSE_SECONDS,
help="seconds to pause between sentences",
)
parser.add_argument(
"--tts-tempo", # tts_tempo
type=float,
default=self.DEFAULT_TTS_TEMPO,
help="how fast to read",
)
parser.add_argument(
"--tts-engine", # tts_engine
default="picotts",
choices=["picotts", "espeak"],
help="run ocr and then say it",
)
parser.add_argument(
"--eog",
action="store_true",
help="open final image in eog",
)
args = parser.parse_args()
if args.tts:
args.ocr = True
return args
def create_path_noext(self):
now = datetime.datetime.now(tz=datetime.timezone.utc)
now_stamp = (
now.isoformat().replace("T", "--").replace(":", "").split(".")[0]
)
filename_noext = f"{self.args.type}-{now_stamp}"
return self.DEFAULT_DIR / filename_noext
def __init__(self):
self.args = self.get_args()
print(self.args)
self.noext_path = self.create_path_noext()
self.image_path = self.noext_path.with_suffix(".png")
self.image_path.parent.mkdir(parents=True, exist_ok=True)
self.annotated_path = self.noext_path.with_suffix(
".annotated" + self.image_path.suffix
)
self.text_path = self.image_path.with_suffix(".txt")
def run(self):
print("CAPTURE SCREENSHOT!")
self.capture_screenshot()
print("WRITING IMAGET TO TEXT!")
self.write_image_to_text()
print("TEXT to SPEECH!")
self.text_to_speech()
print("ANNOTATING iamge!")
self.annotate_image()
print("OPEN WITH EOG!")
self.open_with_eog()
def capture_screenshot(self):
grimshot_cmd = self._create_screenshot_cmd()
subprocess.run(grimshot_cmd)
def annotate_image(self):
if self.args.annotate:
cmd = [
"swappy",
"--file",
str(self.image_path),
"--output-file",
str(self.annotated_path),
]
subprocess.run(cmd)
def write_image_to_text(self):
if self.args.ocr:
output = subprocess.check_output(
["pytesseract", str(self.image_path)], text=True
)
text = output.strip().strip("'").strip("'")
if not self.args.ocr_separate_lines:
text = self._concatenate_lines(text)
print("The text used for OCR:", text)
print("The repr of text used for OCR:", repr(text))
self.text_path.write_text(text)
self._copy_text_file_to_clipboard(self.text_path)
def _create_screenshot_cmd(self):
return ["grimshot", "save", self.args.type, str(self.image_path)]
@staticmethod
def _copy_text_file_to_clipboard(text_path):
subprocess.run(f"cat {str(text_path)} | clip", shell=True)
def _split_into_sentences(self, text):
sentences = re.split(self.SENTENCE_SPLIT_RE, text)
indexes_of_partials = [
idx
for idx, sentence in enumerate(sentences[0:-1])
if self.NOT_SENTENCE_ENDS.search(sentence)
]
final_sentences = []
for index, sentence in enumerate(sentences):
if (index - 1) in indexes_of_partials:
final_sentences[-1] = " ".join([final_sentences[-1], sentence])
else:
final_sentences.append(sentence)
return final_sentences
def text_to_speech(self):
if self.args.tts:
# picotts can only process a certain amount of text
# So, split into sentences and read each one individually.
# As a bonus, gives us ability to dial in wait between sentences.
ocr_cmd = self.SPEAK_COMMANDS[self.args.tts_engine].format(
tempo=self.args.tts_tempo
)
text = self.text_path.read_text()
sentences = self._split_into_sentences(text)
for sentence in sentences:
sentence = sentence.replace('"', r"")
sentence = sentence.replace(r"`", "'")
subprocess.run(f'echo "{sentence}" | {ocr_cmd}', shell=True)
time.sleep(self.args.tts_sentence_pause)
def open_with_eog(self):
if self.args.eog:
image_path_to_open = (
self.annotated_path
if self.annotated_path.exists()
else self.image_path
)
subprocess.run(["eog", str(image_path_to_open)])
if __name__ == "__main__":
screenshot = Screenshot()
screenshot.run()
# test the sentence splitter.
# text = str(
# "This is my text! A real sentence. "
# "A fake ending Mr. Jones, Dr. Hannibal, "
# "Mrs. Sweeney, and Ms. Limey. How about that?"
# )
# screenshot._split_into_sentences(text)
| [
"jtprince@gmail.com"
] | jtprince@gmail.com | |
31d8faa2e645049e29d423b77cba64a42291c2bc | ac4df2d4c859eb9fbcff8c41d6add8cf5beb0e4a | /env/bin/sqlformat | 9e8ba2026764967ef1a71d241dcf96d1cfc03e1c | [] | no_license | SudhanshuJoshi09/Simple_Ticket_Booking_Simulation | 351eae7b54e9aee014d5c4bba3a52c39eaacaa54 | 539c80c6f0bc4dc9959d64105514b6cda2da874a | refs/heads/master | 2023-08-16T00:05:49.607474 | 2021-09-24T01:48:12 | 2021-09-24T01:48:12 | 332,390,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | #!/Users/sudhanshujoshi/Documents/Development/trash/railway-reservation/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sudhanshujoshi49@gmail.com"
] | sudhanshujoshi49@gmail.com | |
7d27b0ea4af2d7ba836d9fc62e317a4481780e97 | 9a5b835f4ea3c9f7585d59ac48218ba4488c4e63 | /Easy/Day 17 - Breaking the Records/solution.py | e33cc60aaddbb2dd1b2844a6a7c883bde1514339 | [] | no_license | JiaweiTan/hackerrank | 95cbf5595e314af1b1cb6440fdcd7f162f740364 | 2ad1e8bb71d5bb7cb0ebf246cda84fa4ce37a73b | refs/heads/master | 2021-05-23T00:43:23.758814 | 2020-04-22T15:44:31 | 2020-04-22T15:44:31 | 253,159,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the breakingRecords function below.
def breakingRecords(scores):
noOfHighBreaks = 0
noOfLowBreaks = 0
highest = scores[0]
lowest = scores[0]
for n in range(1,len(scores)):
if scores[n] > highest:
noOfHighBreaks = noOfHighBreaks + 1
highest = scores[n]
if scores[n] < lowest:
noOfLowBreaks = noOfLowBreaks + 1
lowest = scores[n]
return [noOfHighBreaks, noOfLowBreaks]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
scores = list(map(int, input().rstrip().split()))
result = breakingRecords(scores)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| [
"JiaweiTan1996@gmail.com"
] | JiaweiTan1996@gmail.com |
6ab3e1a39cd93671912027393c5a68e9026211cb | 7941390ad02fca9f8c66ceaf1d71a9fd0815f50e | /simple_NER/annotators/remote/allenai.py | b54499e860da539887f6b0098ac177a14e67b49b | [
"MIT"
] | permissive | msgpo/simple_NER | 08cde36758f1d97560c3db9e36918a7e4abe08dd | 5eaed615d9075d879e4b2af461f2e99acc0f9e68 | refs/heads/master | 2022-04-19T13:07:56.507469 | 2020-04-12T18:43:42 | 2020-04-12T18:43:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | from simple_NER.annotators import NERWrapper
from simple_NER import Entity
import requests
def allen_NER(text, host):
url = host + "named-entity-recognition"
data = {"sentence": text}
return requests.post(url, json=data).json()
class AllenNlpNER(NERWrapper):
def __init__(self, host="http://demo.allennlp.org/predict/"):
super().__init__()
self.host = host
self.add_detector(self.annotate)
def annotate(self, text):
res = allen_NER(text, self.host)
tags = res["tags"]
words = res["words"]
for idx, tag in enumerate(tags):
if tag != 'O':
yield Entity(words[idx], tag, source_text=text)
if __name__ == "__main__":
ner = AllenNlpNER()
ents = [r for r in
ner.extract_entities("Lisbon is the capital of Portugal")]
assert ents[0].as_json() == {'confidence': 1,
'data': {},
'entity_type': 'U-LOC',
'rules': [],
'source_text': 'Lisbon is the capital of Portugal',
'spans': [(0, 6)],
'value': 'Lisbon'}
assert ents[1].as_json() == {'confidence': 1,
'data': {},
'entity_type': 'U-LOC',
'rules': [],
'source_text': 'Lisbon is the capital of Portugal',
'spans': [(25, 33)],
'value': 'Portugal'}
| [
"jarbasai@mailfence.com"
] | jarbasai@mailfence.com |
99b238bea7127df3c68b95e414f228fb460c563e | 17c8725147ac6dc3a61333affb2a2dd1102e348f | /read_statistics/migrations/0002_readdetail.py | 1c3f135b954dc085408e7c1271722683cd38767d | [] | no_license | Rcurtain/YSHSite | 4dad0ad071f6eba31d6131148ff3b20d0ce37002 | f32243ab316515d725cdb4453ebeac3aa855d394 | refs/heads/master | 2020-04-07T02:44:27.814418 | 2018-12-16T07:44:59 | 2018-12-16T07:44:59 | 157,987,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # Generated by Django 2.1.2 on 2018-11-19 10:23
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('read_statistics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ReadDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
]
| [
"r_curtain@163.com"
] | r_curtain@163.com |
21b2985688bc49f39e57f1aa2c6b541719f2a644 | 581bd715fcbfa6094e63e90fad8b3abc031c9346 | /lambda/dp_patch_story.py | 9ee7383407e324f5e06876a0e4439d24e2b83faf | [] | no_license | stevekinney/dogpointing | f602497090e004d707c48f177a67e190150665b3 | ef03d21bad3df0acd1aa8d663261d001c08cb4db | refs/heads/master | 2021-09-01T22:29:09.977341 | 2017-12-28T23:08:24 | 2017-12-28T23:08:24 | 115,666,508 | 0 | 0 | null | 2017-12-28T23:10:23 | 2017-12-28T23:10:23 | null | UTF-8 | Python | false | false | 3,050 | py | import logging
import boto3
import botocore
import time
import random
from datetime import datetime
from datetime import timedelta
from boto3.dynamodb.conditions import Key
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource("dynamodb", region_name='us-east-2', endpoint_url="http://dynamodb.us-east-2.amazonaws.com")
table = dynamodb.Table('dp_stories')
user_table = dynamodb.Table('dp_users')
def vote_to_point(vote):
if vote == "Chihuahua":
return 0
elif vote == "Corgi":
return 1
elif vote == "Pitbull":
return 2
elif vote == "Labrador":
return 3
elif vote == "Great Dane":
return 4
else:
return 9
def cmp_votes(a, b):
if vote_to_point(a["value"]) > vote_to_point(b["value"]):
return 1
elif vote_to_point(a["value"]) == vote_to_point(b["value"]):
return 0
else:
return -1
def get_user(session_key, user_key):
try:
response = user_table.get_item(Key={'session_key': str(session_key), 'user_key': user_key})
return response['Item']
except botocore.exceptions.ClientError as e:
return None
def lambda_handler(event, context):
logger.debug('got event{}'.format(event))
story_key = event['story_key']
session_key = event['session_key']
expiration = time.mktime((datetime.now() + timedelta(days=1)).timetuple())
# validate vote
user_key = event['story']['user_key']
vote = event['story']['vote']
user = get_user(session_key, user_key)
if user is None:
raise Exception('Vote not valid')
response = table.get_item(Key={'story_key': story_key, 'session_key': session_key})
story = response['Item']
logger.debug('got story{}'.format(story))
old_expiration = story['expiration']
votes = {}
for v in story["votes"]:
votes[v["key"]] = v
votes[user_key] = {"key": user_key, "name": user['name'], "value": vote}
# is it complete?
complete = True
for vote in votes.values():
if vote["value"] == "cat":
complete = False
break
votes_sorted = list(votes.values())
votes_sorted.sort(cmp_votes)
# save votes
try:
response = table.update_item(
Key={'story_key': str(story_key), 'session_key': session_key},
UpdateExpression="set votes = :votes, expiration = :expiration, complete = :complete",
ConditionExpression='closed = :exp_closed and complete = :exp_complete and expiration = :exp_expiration',
ExpressionAttributeValues={
':votes': votes_sorted,
':expiration': int(expiration),
':complete': complete,
':exp_expiration': old_expiration,
':exp_closed': False,
':exp_complete': False,
},
ReturnValues="UPDATED_NEW"
)
except botocore.exceptions.ClientError as e:
raise Exception("Error updating dynamo")
response = table.get_item(Key={'story_key': story_key, 'session_key': session_key})
return response["Item"]
| [
"brad.culberson@sendgrid.com"
] | brad.culberson@sendgrid.com |
d5710f1de78a66cab1a6ea2d011e6ecbcf373c71 | e3d8f5b953b1cf605fbbaccacdc83ede462e714e | /Game Control/directxkeypress.py | d1ac59f5343dc9aac5accac0676aeea047535fb2 | [
"MIT"
] | permissive | alanx0401/TrovePyFishing | a703bd1f69aff0a0012ac7302a09f53c3c7aef54 | d9ea8dbf199a42d1afd5c164914b5c4c020c0f44 | refs/heads/master | 2021-04-26T22:26:35.769945 | 2018-03-06T14:52:18 | 2018-03-06T14:52:18 | 124,093,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | # direct inputs
# source to this solution and code:
# http://stackoverflow.com/questions/14489013/simulate-python-keypresses-for-controlling-a-game
# http://www.gamespp.com/directx/directInputKeyboardScanCodes.html
import ctypes
import time
SendInput = ctypes.windll.user32.SendInput
W = 0x11
A = 0x1E
S = 0x1F
D = 0x20
F = 0x21
M = 0x32
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
if __name__ == '__main__':
PressKey(0x11)
time.sleep(1)
ReleaseKey(0x11)
time.sleep(1) | [
"noreply@github.com"
] | alanx0401.noreply@github.com |
3dd267f794d2b0b929fd7ea3529b59d9507ba38a | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/app/__init__.py | 9826abe1d2a81963769b6af01ae5ebf38641f8f6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 2,758 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud app group."""
from googlecloudsdk.calliope import base
DETAILED_HELP = {
'brief': 'Manage your App Engine deployments.',
'DESCRIPTION': """
The gcloud app command group lets you deploy and manage your Google App
Engine apps. These commands replace their equivalents in the appcfg
tool.
App Engine is a platform for building scalable web applications
and mobile backends. App Engine provides you with built-in services and
APIs such as NoSQL datastores, memcache, and a user authentication API,
common to most applications.
More information on App Engine can be found here:
https://cloud.google.com/appengine and detailed documentation can be
found here: https://cloud.google.com/appengine/docs/
""",
'EXAMPLES': """\
To run your app locally in the development application server
to simulate your application running in production App Engine with
sandbox restrictions and services provided by App Engine SDK libraries,
use the `dev_appserver.py` command and your app's `app.yaml`
configuration file to run:
$ dev_appserver.py ~/my_app/app.yaml
For an in-depth look into using the local development server, follow
this guide : https://cloud.google.com/appengine/docs/standard/python/tools/using-local-server.
To deploy the code and configuration of your app to the App Engine
server, run:
$ {command} deploy ~/my_app/app.yaml
To list all versions of all services of your existing deployments, run:
$ {command} versions list
To generate all relevant config files for `~/my_app` (or emit an error
message if the directory contents are not recognized), run:
$ {command} gen-config ~/my_app
"""
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class AppengineGA(base.Group):
def Filter(self, context, args):
del context, args
base.DisableUserProjectQuota()
AppengineGA.detailed_help = DETAILED_HELP
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
3e8b2173d23506f90b22a85f3582bec15dfce6a7 | 8c4f78cc3fcc00a8a46b9e3803d6f6bedddb2a68 | /contact/migrations/0003_auto_20170307_1320.py | bdc3b4946633b48dcd1ce65001026ab7f070b1c5 | [] | no_license | kidsfm/app | d09a345d9c6dfa5d3bd880d6101511bed6faa5f1 | 611e21f6e5747ac587833ca2ef77a740b6a71c24 | refs/heads/master | 2023-08-29T03:06:12.322901 | 2017-09-12T16:48:29 | 2017-09-12T16:48:29 | 81,123,507 | 0 | 0 | null | 2021-09-07T23:26:35 | 2017-02-06T19:28:49 | HTML | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-07 18:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0002_auto_20170307_1044'),
]
operations = [
migrations.AlterField(
model_name='message',
name='email',
field=models.EmailField(max_length=50),
),
]
| [
"r-champagne@neiu.edu"
] | r-champagne@neiu.edu |
92567d4c350a444364ac3fa59dfdf8eb6606265e | 3b9737755475a851677547c20384c3b545c88774 | /gvc3202/exploit.py | 50a62ed684f9400cd2500243ba785f6913d14f0b | [] | no_license | up777/grandstream_exploits | 7af6fe7a2076b0311569bb1a2e77dbf3096c1816 | 0bf7a719477b304990bb98247f3b5e025d3e56b7 | refs/heads/master | 2020-06-18T03:01:32.838911 | 2019-03-31T10:55:19 | 2019-03-31T10:56:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | #!/usr/bin/env python2
"""
CVE-2019-10655: Grandstream - GVC3200 (Video Conferencing Unit) Unauthenticated RCE
Author: Brendan Scarvell
Vulnerable versions: < 1.0.3.51
A command injection vulnerability exists in the `starttraceroute` parameter.
Additionally, supplying 93 characters in the "phonecookie" cookie overwrites
the return value of the valid_connection() function, bypassing authentication.
"""
import sys, urllib2, os, base64
if len(sys.argv) == 1:
print "Usage: {} <host> <port -- default: 80>".format(sys.argv[0])
sys.exit()
HOST = sys.argv[1]
port = 80 if len(sys.argv) < 3 else sys.argv[2]
# required -- device has no telnetd available
LHOST = "10.1.1.78"
LPORT = 4444
revshell = base64.b64encode("rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/system/xbin/sh -i 2>&1|nc {} {} >/tmp/f".format(LHOST, LPORT))
payload = "echo$IFS\"{}\"|base64$IFS-d|sh".format(revshell)
# Buffer overflow to bypass auth
cookie = "phonecookie=\"{}\"".format("A"*93)
print "[*] Trying to run command.."
req = urllib2.Request('http://' + HOST + '/manager?action=starttraceroute&addr=||'+payload)
req.add_header('Cookie', cookie)
res = urllib2.urlopen(req)
if 'Response=Success' not in res.read():
print "[!] Exploit failed. Host may not be vulnerable"
else:
print '[*] Finished.'
| [
"bscarvell@gmail.com"
] | bscarvell@gmail.com |
541e28ec93c85cc1adc61eecd87bdde2a641136b | c91d029b59f4e6090a523bf571b3094e09852258 | /src/utils/middlewares.py | 64ca375d393cdaff4d003588764a00f79181c0a1 | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 2,927 | py | import re
import threading
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect
from geral.functions import is_alternativa
from utils.functions import get_client_ip
request_cfg = threading.local()
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
from .classes import LoggedInUser, AcessoInterno
class LoggedInUserMiddleware(MiddlewareMixin):
'''
Insert this middleware after
django.contrib.auth.middleware.AuthenticationMiddleware
'''
def process_request(self, request):
'''
Returned None for continue request
'''
logged_in_user = LoggedInUser()
logged_in_user.set_user(request)
return None
class NeedToLoginOrLocalMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
user_ip = get_client_ip(request)
authenticated_by_ip = False
for ip in settings.N2LOL_ALLOWED_IP_BLOCKS:
if re.compile(ip).match(user_ip) is not None:
authenticated_by_ip = True
break
acesso_interno = AcessoInterno()
acesso_interno.set_interno(authenticated_by_ip)
acesso_interno.set_ip(user_ip)
if request.user.is_authenticated:
return self.get_response(request)
if authenticated_by_ip:
return self.get_response(request)
user_url = request.META['PATH_INFO']
for url in settings.N2LOL_ALLOWED_URLS:
if re.compile(url).match(user_url) is not None:
return self.get_response(request)
return redirect(settings.N2LOL_REDIRECT)
class AlterRouterMiddleware:
"""
Based on
https://gist.github.com/gijzelaerr/7a3130c494215a0dd9b2/
The Alternative db router middelware.
Before the view sets some context from the URL into thread local storage.
After, deletes it.
In between, any database operation will call the router, which checks for
the thread local storage and returns an appropriate database alias.
Add this to your middleware, for example:
MIDDLEWARE += ['utils.middlewares.AlterRouterMiddleware']
"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
request_cfg.alter_db = is_alternativa(request)
request.alter_db = request_cfg.alter_db
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
if hasattr(request_cfg, 'alter_db'):
del request_cfg.alter_db
return response
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
b1c5aa3c510fbdbecf5b9cd9ff4fb9a6c6e66c83 | a86bd65fc4d901ea7a33fb666caa430a1ae6d1a0 | /setting.py | fe2c8828c8bb73997e531f0e6d0c423994798fc4 | [
"Apache-2.0"
] | permissive | andyhuzhill/433QQBot | a2e2700ac7a361eb1470889ac403a26342b6fdae | 211a4757008042fb8ca4bdd5f4e356e5fca6ff12 | refs/heads/master | 2022-07-16T22:36:57.502924 | 2020-05-15T08:17:40 | 2020-05-15T08:17:40 | 265,263,012 | 1 | 0 | Apache-2.0 | 2020-05-19T14:02:00 | 2020-05-19T14:01:59 | null | UTF-8 | Python | false | false | 2,586 | py | import os
import json
import configparser
cfg_name = 'setting.conf'
BASE_DIR = os.path.dirname(__file__)
FILE_PATH = os.path.join(BASE_DIR, cfg_name)
cf = configparser.ConfigParser()
def read_config(section: str, option: str) -> str:
"""读取指定的配置值.
### Args:
``section``: 在conf文件中的段落.\n
``option``: 在conf文件中的选项.\n
### Result:
``result``: 所读取到的配置值.
"""
with open(FILE_PATH, 'r', encoding='utf-8') as cfgfile:
cf.read_file(cfgfile)
result = cf.get(section, option)
return str(result)
def write_config(section: str, option: str, value: str) -> str:
"""写入指定的配置值.
### Args:
``section``: 在conf文件中的段落.\n
``option``: 在conf文件中的选项.\n
``value``: 需要写入的值.\n
"""
with open(FILE_PATH, 'r', encoding='utf-8') as cfgfile:
cf.read_file(cfgfile)
with open(FILE_PATH, 'w+', encoding='utf-8') as cfgfile2:
cf.set(section, option, value)
cf.write(cfgfile2)
def group_id() -> list:
"""返回一个需要发送信息的QQ群的列表."""
group_id = read_config('QQgroup', 'id')
array = list(map(int, group_id.split(',')))
return array
def dev_group_id() -> list:
"""返回一个开发和实验用的QQ群的列表."""
group_id = read_config('QQgroup', 'dev_id')
array = list(map(int, group_id.split(',')))
return array
def welcome() -> str:
"""返回一个字符串, 是QQ群的欢迎词, 并且将conf当中的换行符还原."""
message = read_config('QQgroup', 'welcome')
return message.replace('\\n', '\n')
def shutword() -> list:
"""返回一个敏感词列表."""
shutword = read_config('QQgroup', 'shutword')
if shutword:
wordlist = shutword.split(',')
else:
wordlist = list()
return wordlist
def db_link() -> str:
"""返回一个适用于SQLAlchemy的数据库链接."""
return 'sqlite:///'+read_config('system', 'database')
def pk_datas() -> list:
"""返回一个列表, 里面每一项都是经过JSON解码的PK设置."""
pk_configs = read_config('pk', 'pk_lists')
if pk_configs:
pk_array = list(pk_configs.split(','))
else:
pk_array = list()
pk_datas = list()
for config in pk_array:
with open(read_config('pk', 'config_folder') + '/' + config, "r") as f:
pk_datas.append(json.loads(f.read()))
return pk_datas
| [
"xuanking@gmail.com"
] | xuanking@gmail.com |
3b70ca2b35afdf97f401ed2f67c039305769db02 | 6740ccdcdb62b5d1afd92c1ad9f57353e93bcfa1 | /checkout/migrations/0006_orderitem_name.py | 211703860e55053977c4ce4ffc68b1e4f8948049 | [] | no_license | Amyh97/miletone-project-4 | 4f2a10a8364ce0f3f1fd2da88705dddd46f16de8 | 57658de674d5de15312e1c26ecbd773f4b7607dd | refs/heads/master | 2023-04-01T09:24:58.938068 | 2021-03-31T10:29:52 | 2021-03-31T10:29:52 | 335,349,196 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # Generated by Django 3.1.6 on 2021-03-24 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0005_auto_20210324_1133'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='name',
field=models.CharField(default='placeholder', max_length=50),
preserve_default=False,
),
]
| [
"amy.hollis@btinternet.com"
] | amy.hollis@btinternet.com |
f3e46e8de53108b8175863fac2003556b51fdbdc | 5dc393ffb3d65094d2c4f6bc8b9980e2fc167670 | /pandas/stats/tests/test_fama_macbeth.py | f48dde20f138ac2a2f78bf479e668b579e96ac1f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | JWCornV/pandas | 913db2a34cb9f9820f986412e9c3cf868ecef24d | 6078fba9410918baa486ca008cc9e3ba066c03ec | refs/heads/master | 2020-12-25T10:14:13.384789 | 2012-06-27T17:10:54 | 2012-06-27T17:10:54 | 4,813,052 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | from pandas import DataFrame, Panel
from pandas.stats.api import fama_macbeth
from common import assert_almost_equal, BaseTest
import numpy as np
class TestFamaMacBeth(BaseTest):
def testFamaMacBethRolling(self):
# self.checkFamaMacBethExtended('rolling', self.panel_x, self.panel_y,
# nw_lags_beta=2)
# df = DataFrame(np.random.randn(50, 10))
x = dict((k, DataFrame(np.random.randn(50, 10))) for k in 'abcdefg')
x = Panel.from_dict(x)
y = (DataFrame(np.random.randn(50, 10)) +
DataFrame(0.01 * np.random.randn(50, 10)))
self.checkFamaMacBethExtended('rolling', x, y, nw_lags_beta=2)
self.checkFamaMacBethExtended('expanding', x, y, nw_lags_beta=2)
def checkFamaMacBethExtended(self, window_type, x, y, **kwds):
window = 25
result = fama_macbeth(y=y, x=x, window_type=window_type, window=window,
**kwds)
self._check_stuff_works(result)
index = result._index
time = len(index)
for i in xrange(time - window + 1):
if window_type == 'rolling':
start = index[i]
else:
start = index[0]
end = index[i + window - 1]
x2 = {}
for k, v in x.iteritems():
x2[k] = v.truncate(start, end)
y2 = y.truncate(start, end)
reference = fama_macbeth(y=y2, x=x2, **kwds)
assert_almost_equal(reference._stats, result._stats[:, i])
static = fama_macbeth(y=y2, x=x2, **kwds)
self._check_stuff_works(static)
def _check_stuff_works(self, result):
# does it work?
attrs = ['mean_beta', 'std_beta', 't_stat']
for attr in attrs:
getattr(result, attr)
# does it work?
result.summary
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| [
"wesmckinn@gmail.com"
] | wesmckinn@gmail.com |
29400e5d502de8984c008549b041a52bea849c26 | a5feab13bb857953bd741c6e282ab0c996971841 | /basicforms/basicapp/migrations/0027_auto_20190403_1126.py | 4a7ae193d66922f5d2a605e62a3d9b181725ee7a | [] | no_license | sbt4104/django-busapp-deployment | 823e215a01516626ebc1385f2135f878d8f741c9 | 3fc8f31583243e98739d362c629143fde07645de | refs/heads/master | 2020-06-28T22:21:48.229863 | 2020-03-14T18:56:45 | 2020-03-14T18:56:45 | 200,357,220 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # Generated by Django 2.1.7 on 2019-04-03 11:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('basicapp', '0026_auto_20190403_1055'),
]
operations = [
migrations.RenameModel(
old_name='notifi',
new_name='Notifications',
),
]
| [
"sthorat661@gmail.com"
] | sthorat661@gmail.com |
b0969a4a527c8e3a025346cc9521c1e038958cc3 | 38946ffd967f0db87b8847cc3e7dc446e83a9c7e | /hw3/p2/baseline.py | 066dc1ea10e1bf658517bacd5a993edff92cc0de | [] | no_license | cigarbeer/ML2018SPRING | 8fe2d3baf637fbc17e777a504e3b815664b2da41 | 1559a7625256d433222ec3d419af78993b8d21e8 | refs/heads/master | 2021-09-17T19:33:59.317093 | 2018-07-04T14:26:43 | 2018-07-04T14:26:43 | 124,334,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,172 | py | SEED = 0
import numpy as np
np.random.seed(SEED)
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Reshape
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Dense
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
VALIDATION_SPLIT = 0.1
BATCH_SIZE = 128
FULLY_CONNECTED_UNIT_NUM = 512
FLATTEN_IMAGE_SIZE = 48 * 48
FLATTEN_IMAGE_SHAPE = (FLATTEN_IMAGE_SIZE,)
IMAGE_SHAPE = (48, 48, 1)
OUTPUT_CLASSES_NUM = 7
def read_raw_training_data(file_name):
df = pd.read_csv(file_name)
label = df.label
feature = df.feature.apply(np.fromstring, sep=' ', dtype=np.float64)
X = np.stack(feature, axis=0)
y = np_utils.to_categorical(label, num_classes=OUTPUT_CLASSES_NUM)
m, n = X.shape
rand_idx = np.random.permutation(np.arange(m))
X = X[rand_idx]
y = y[rand_idx]
return X / 255, y
def read_raw_testing_data(file_name):
df = pd.read_csv(file_name)
ids = df.id.values
feature = df.feature.apply(np.fromstring, sep=' ', dtype=np.float32)
t = np.stack(feature, axis=0)
return ids, t / 255
def read_selected_training_data(X_file, y_file):
return np.load(X_file), np.load(y_file)
def preprocess_training_data(X):
# X = samplewise_normalization(X)
# X, mu, sigma = featurewise_normalize(X)
X = X.reshape((-1, *IMAGE_SHAPE))
# return X, mu, sigma
return X, 0, 0
def preprocess_testing_data(t, mu, sigma):
# t = samplewise_normalization(t)
# t = (t - mu) / sigma
# t = np.nan_to_num(t)
t = t.reshape((-1, *IMAGE_SHAPE))
return t
def split_validation_set(X, y, rate):
m, *n = X.shape
n_train = int((1 - rate) * m)
X_train = X[:n_train]
y_train = y[:n_train]
X_val = X[n_train:]
y_val = y[n_train:]
return (X_train, y_train), (X_val, y_val)
def get_training_data_generator(X):
training_data_generator = ImageDataGenerator(
# samplewise_center=False,
# samplewise_std_normalization=False,
# featurewise_center=False,
# featurewise_std_normalization=False,
rotation_range=0.0, ########################
width_shift_range=0, ######################
height_shift_range=0, ######################
horizontal_flip=False, #######################
# zca_whitening=False,
# zca_epsilon=1e-06,
# shear_range=0.0,
# zoom_range=0,
# channel_shift_range=0.0,
# fill_mode='nearest',
# cval=0.0,
# vertical_flip=False,
# rescale=None,
# preprocessing_function=None,
data_format='channels_last',
)
# training_data_generator.fit(X, augment=False, rounds=1, seed=SEED)
return training_data_generator
# def get_testing_data_generator(t):
# testing_data_generator = ImageDataGenerator(
# samplewise_center=False,
# samplewise_std_normalization=False,
# featurewise_center=False,
# featurewise_std_normalization=False,
# zca_whitening=False,
# zca_epsilon=1e-06,
# rotation_range=0.0,
# width_shift_range=0.0,
# height_shift_range=0.0,
# shear_range=0.0,
# zoom_range=0.0,
# channel_shift_range=0.0,
# fill_mode='nearest',
# cval=0.0,
# horizontal_flip=False,
# vertical_flip=False,
# rescale=None,
# preprocessing_function=None,
# data_format='channels_last'
# )
# testing_data_generator.fit(t, augment=False, rounds=1, seed=SEED)
# return testing_data_generator
# def get_validation_data_generator(V):
# return get_testing_data_generator(V)
def write_prediction(file_name, prediction):
df = pd.DataFrame(columns=['label'], data=prediction)
df.to_csv(file_name, index=True, index_label='id')
return df
def input_block(model, input_shape, output_shape):
model.add(Reshape(target_shape=output_shape, input_shape=input_shape))
return model
def cnn_block(model, filters, kernel_size, n_layers, dropout_rate):
for n in range(n_layers):
model.add(Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros'
))
model.add(Activation('selu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2),
padding='same',
data_format='channels_last'
))
model.add(Dropout(
rate=dropout_rate
))
return model
def nn_block(model, units, n_layers, dropout_rate):
for n in range(n_layers):
model.add(Dense(
units=units,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros'
))
model.add(Activation('selu'))
model.add(BatchNormalization())
model.add(Dropout(
rate=dropout_rate
))
return model
def output_block(model, output_shape):
model.add(Dense(
units=output_shape,
activation='softmax',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros'
))
return model
def net(input_shape, output_shape):
model = Sequential()
model = input_block(model, input_shape=input_shape, output_shape=IMAGE_SHAPE)
model = cnn_block(model, filters=64, kernel_size=(3, 3), n_layers=2, dropout_rate=0.5)
model = cnn_block(model, filters=128, kernel_size=(3, 3), n_layers=2, dropout_rate=0.5)
model = cnn_block(model, filters=256, kernel_size=(3, 3), n_layers=2, dropout_rate=0.5)
model.add(Flatten())
model = nn_block(model, units=FULLY_CONNECTED_UNIT_NUM, n_layers=3, dropout_rate=0.5)
model = output_block(model, output_shape=output_shape)
return model
def compile_model(model):
# adam = Adam(lr=1e-4, amsgrad=True)
model.compile(
optimizer='nadam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
def fit_generator(model, X, y, epochs, batch_size, model_saving_path):
callbacks = [
ModelCheckpoint(model_saving_path, monitor='val_loss', save_best_only=True, verbose=1),
# ModelCheckpoint(model_saving_path+'weights.{epoch:02d}-{val_loss:.4f}-{val_acc:.4f}.hdf5', monitor='val_loss', verbose=1),
EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=30, verbose=1)
]
(X_train, y_train), (X_val, y_val) = split_validation_set(X, y, VALIDATION_SPLIT)
m_train, *n = X_train.shape
m_val, *n = X_val.shape
training_data_generator = get_training_data_generator(X_train).flow(X_train, y_train, batch_size=batch_size, shuffle=True, seed=SEED)
# validation_data_generator = get_validation_data_generator(X_train).flow(X_val, y_val, batch_size=batch_size, shuffle=True, seed=SEED)
model.fit_generator(
generator=training_data_generator,
steps_per_epoch=int(m_train/batch_size),
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=(X_val, y_val),
validation_steps=None,
# validation_steps=int(m_val/batch_size)+1,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
initial_epoch=0
)
return model
def predict(model, t, batch_size):
prob = model.predict(t, batch_size=batch_size, verbose=1)
pred = np.argmax(prob, axis=1)
return pred
def featurewise_normalize(X):
mu = np.mean(X, axis=0)
sigma = np.std(X, axis=0)
sigma = np.nan_to_num(sigma)
X = (X - mu) / sigma
X = np.nan_to_num(X)
return X, mu, sigma
def samplewise_normalization(X):
mu = np.mean(X, axis=1)
sigma = np.std(X, axis=1)
sigma = np.nan_to_num(sigma)
X = ((X.T - mu) / sigma).T
X = np.nan_to_num(X)
return X
def save_statistics(s_path, mu, sigma):
np.save(s_path, np.array([mu, sigma]))
return
if __name__ == '__main__':
X, y = read_raw_training_data('../dataset/train.csv')
X, mu, sigma = preprocess_training_data(X)
save_statistics('./statistics.npy', mu, sigma)
model = net(input_shape=IMAGE_SHAPE, output_shape=OUTPUT_CLASSES_NUM)
model = compile_model(model)
model.summary()
model = fit_generator(model, X, y, epochs=50, batch_size=BATCH_SIZE, model_saving_path='./baseline.hdf5')
idx, t = read_raw_testing_data('../dataset/test.csv')
t = preprocess_testing_data(t, mu, sigma)
pred = predict(model, t, batch_size=BATCH_SIZE)
write_prediction('baseline.csv', pred)
| [
"r06921038@ntu.edu.tw"
] | r06921038@ntu.edu.tw |
be5cdea3551cb340c36e10d3d17ba7a15f6f5d02 | 8c88eb1d81fd36172bc95099eed628a60f356aee | /dppractice/codechef.py | c4ec948fc54405301cd38fcb5bb399c7ebd2cacd | [] | no_license | Roshan84ya/data-structure-with-python | 2d6aa9baa1901740f152e02d6422f45ef7bd1c6f | 7ef8dea5691685a0e9c194651d123b655a3bf131 | refs/heads/main | 2022-12-26T21:48:25.027318 | 2020-10-04T04:13:06 | 2020-10-04T04:13:06 | 301,036,232 | 5 | 1 | null | 2020-10-14T22:59:58 | 2020-10-04T04:04:48 | Python | UTF-8 | Python | false | false | 1,562 | py | def findminsubsetsum(arr,row,column):
dp=[False]*row
for i in range(row):
dp[i]=[False]*column
dp[i][0]=True
for i in range(1,row):
for j in range(1,column):
if j<arr[i-1]:
dp[i][j]=dp[i-1][j]
else:
dp[i][j]=dp[i-1][j] or dp[i-1][j-arr[i-1]]
count=0
for i in dp[row-1][::-1]:
if i:
break
else:
count+=2
return count,dp
k=int(input())
for _ in range(int(input())):
n=int(input())
arr=[i**k for i in range(1,n+1)]
summ=sum(arr)
kt=summ//2
st,dp=findminsubsetsum(arr,n+1,kt+1)
if 2*kt==summ:
print(st)
at = (summ-st)//2
elif 2*kt +1==summ:
print(1+st)
at = (summ-(1+st))//2
i=n+1
j=at
a=[]
d=dict()
for i in range(len(arr)):
d[arr[i]]=i
a.append(arr[i])
at-=arr[i]
j=at
while i>=0 and j>=0:
if dp[i-1][j]:
i-=1
else:
a.append(arr[i-1])
j=at-arr[i-1]
at-=arr[i-1]
result = ["0"]*n
for i in a:
result[d[i]]="1"
print("".join(result))
| [
"noreply@github.com"
] | Roshan84ya.noreply@github.com |
7dbb8959b233092833590760e034f6ebe7360014 | 1065a2782e4947b5bf14ec4536e4ad7addc7aec3 | /strategy/cryptoalpha/casubc.py | f4bb09a5cb12cc696d58f6af909445370b6e56b8 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Johannesduvenage/siis | 0bf6875d4a5f3638cadb01ed5541aab29ba1d77a | 57e537cf9b6a71c8ad0b3bb0759772d126496a17 | refs/heads/master | 2020-09-10T21:51:56.814014 | 2019-11-13T23:57:34 | 2019-11-13T23:57:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,900 | py | # @date 2018-08-24
# @author Frederic SCHERMA
# @license Copyright (c) 2018 Dream Overflow
# Crypto Alpha strategy, sub-strategy B.
from strategy.indicator import utils
from strategy.strategysignal import StrategySignal
from monitor.streamable import StreamMemberFloatSerie, StreamMemberSerie, StreamMemberFloatBarSerie, StreamMemberOhlcSerie
from .casub import CryptoAlphaStrategySub
import logging
logger = logging.getLogger('siis.strategy.cryptoalpha')
class CryptoAlphaStrategySubC(CryptoAlphaStrategySub):
"""
Crypto Alpha strategy, sub-strategy C.
"""
def __init__(self, strategy_trader, params):
super().__init__(strategy_trader, params)
self.rsi_low = params['constants']['rsi_low']
self.rsi_high = params['constants']['rsi_high']
def process(self, timestamp):
candles = self.get_candles()
if len(candles) < self.depth:
# not enought samples
return
last_timestamp = candles[-1].timestamp
prices = self.price.compute(last_timestamp, candles)
volumes = self.volume.compute(last_timestamp, candles)
signal = self.process1(timestamp, last_timestamp, candles, prices, volumes)
# avoid duplicates signals
if signal and self.need_signal:
# self.last_signal = signal
if (self.last_signal and (signal.signal == self.last_signal.signal) and
(signal.dir == self.last_signal.dir) and
(signal.base_time() == self.last_signal.base_time())): # or (signal.ts - self.last_signal.ts) < (self.tf * 0.5):
# same base time avoid multiple entries on the same candle
signal = None
else:
# retains the last valid signal only if valid
self.last_signal = signal
self.complete(candles)
return signal
def process1(self, timestamp, last_timestamp, candles, prices, volumes):
signal = None
# volume sma, increase signal strength when volume increase over its SMA
# volume_sma = utils.MM_n(self.depth-1, self.volume.volumes)
rsi_30_70 = 0 # 1 <30, -1 >70
rsi_40_60 = 0 # 1 if RSI in 40-60
stochrsi_20_80 = 0 # 1 <20, -1 >80
stochrsi_40_60 = 0 # 1 if stochRSI in 40-60
volume_signal = 0
ema_sma_cross = 0
ema_sma_height = 0
if self.rsi:
self.rsi.compute(last_timestamp, prices)
rsi = self.rsi.last
if self.rsi.last < self.rsi_low:
rsi_30_70 = 1.0
elif self.rsi.last > self.rsi_high:
rsi_30_70 = -1.0
if self.rsi.last > 0.4 and self.rsi.last < 0.6:
rsi_40_60 = 1
if self.stochrsi:
self.stochrsi.compute(last_timestamp, prices)
if self.stochrsi.last_k < 0.2:
stochrsi_20_80 = 1.0
elif self.stochrsi.last_k > 0.8:
stochrsi_20_80 = -1.0
if self.stochrsi.last_k > 0.4 and self.stochrsi.last_k < 0.6:
stochrsi_40_60 = 1
# if self.volume.last > volume_sma[-1]:
# volume_signal = 1
# elif self.volume.last < volume_sma[-1]:
# volume_signal = -1
if self.sma and self.ema:
self.sma.compute(last_timestamp, prices)
self.ema.compute(last_timestamp, prices)
# ema over sma crossing
ema_sma_cross = utils.cross((self.ema.prev, self.sma.prev), (self.ema.last, self.sma.last))
if self.ema.last > self.sma.last:
ema_sma_height = 1
elif self.ema.last < self.sma.last:
ema_sma_height = -1
if self.atr:
if self.last_closed:
self.atr.compute(last_timestamp, self.price.high, self.price.low, self.price.close)
if self.pivotpoint:
if self.pivotpoint.compute_at_close and self.last_closed:
self.pivotpoint.compute(last_timestamp, self.price.open, self.price.high, self.price.low, self.price.close)
return signal
def setup_streamer(self, streamer):
streamer.add_member(StreamMemberSerie('begin'))
streamer.add_member(StreamMemberOhlcSerie('ohlc'))
streamer.add_member(StreamMemberFloatSerie('price', 0))
streamer.add_member(StreamMemberFloatBarSerie('volume', 1))
streamer.add_member(StreamMemberFloatSerie('rsi-low', 2))
streamer.add_member(StreamMemberFloatSerie('rsi-high', 2))
streamer.add_member(StreamMemberFloatSerie('rsi', 2))
streamer.add_member(StreamMemberFloatSerie('stochrsi-low', 3))
streamer.add_member(StreamMemberFloatSerie('stochrsi-high', 3))
streamer.add_member(StreamMemberFloatSerie('stochrsi-k', 3))
streamer.add_member(StreamMemberFloatSerie('stochrsi-d', 3))
streamer.add_member(StreamMemberFloatSerie('sma', 0))
streamer.add_member(StreamMemberFloatSerie('ema', 0))
streamer.add_member(StreamMemberFloatSerie('hma', 0))
streamer.add_member(StreamMemberFloatSerie('vwma', 0))
streamer.add_member(StreamMemberFloatSerie('perf', 3))
streamer.add_member(StreamMemberSerie('end'))
streamer.next_timestamp = self.next_timestamp
def stream(self, streamer):
delta = min(int((self.next_timestamp - streamer.next_timestamp) / self.tf) + 1, len(self.price.prices))
for i in range(-delta, 0, 1):
ts = self.price.timestamp[i]
streamer.member('begin').update(ts)
streamer.member('ohlc').update((self.price.open[i], self.price.high[i], self.price.low[i], self.price.close[i]), ts)
streamer.member('price').update(self.price.prices[i], ts)
streamer.member('volume').update(self.volume.volumes[i], ts)
streamer.member('rsi-low').update(self.rsi_low, ts)
streamer.member('rsi-high').update(self.rsi_high, ts)
streamer.member('rsi').update(self.rsi.rsis[i], ts)
# streamer.member('stochrsi-low').update(20, ts)
# streamer.member('stochrsi-high').update(80, ts)
# streamer.member('stochrsi-k').update(self.stochrsi.stochrsis[i], ts)
# streamer.member('stochrsi-d').update(self.stochrsi.stochrsis[i], ts)
streamer.member('sma').update(self.sma.smas[i], ts)
streamer.member('ema').update(self.ema.emas[i], ts)
# streamer.member('hma').update(self.hma.hmas[i], ts)
# streamer.member('vwma').update(self.vwma.vwmas[i], ts)
streamer.member('perf').update(self.strategy_trader._stats['perf']*100, ts)
streamer.member('end').update(ts)
# push per frame
streamer.push()
streamer.next_timestamp = self.next_timestamp
| [
"frederic.scherma@gmail.com"
] | frederic.scherma@gmail.com |
c460a2b642fc7a313a2110c6c89ada18de78db9c | 92f87f8852045b0e3fb161f516c9e4c8931fd6ae | /src/server/oasisapi/data_files/migrations/0001_initial.py | a8fffad4f2aa30b3f486a2ef20e6fd08d5c1feec | [
"BSD-3-Clause"
] | permissive | OasisLMF/OasisPlatform | 5dc9d83924f06481ef58f2f2bcf0d36c7e1ebb2b | 4a301ec37e6c07af73abbbeaeb9e747a248a44e1 | refs/heads/main | 2023-08-14T20:28:32.951816 | 2023-08-03T12:02:08 | 2023-08-03T12:02:08 | 57,235,529 | 43 | 22 | BSD-3-Clause | 2023-09-13T12:11:26 | 2016-04-27T18:13:16 | Python | UTF-8 | Python | false | false | 1,733 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-15 10:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('files', '0002_auto_20190109_1715'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ComplexModelDataFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('file_name', models.CharField(help_text='The name of the complex model input file that will be used at runtime.', max_length=255)),
('file_description', models.CharField(help_text='Type of data contained within the file.', max_length=255)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='complex_model_file', to=settings.AUTH_USER_MODEL)),
('data_file', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='data_file_complex_model_file', to='files.RelatedFile')),
],
options={
'abstract': False,
},
),
]
| [
"daniel.evans@jbarisk.com"
] | daniel.evans@jbarisk.com |
901ed40757453f72482e04347a567f48a4452388 | b29acb2e230b3cf2f8be070850c34ed5d62dc80c | /Python/YPS/10/Sample1.py | ff99a7a4d11b7f13bd9fd07db5ef310bd2a798dc | [] | no_license | MasatakaShibataSS/lesson | be6e3557c52c6157b303be268822cad613a7e0f7 | 4f3f81ba0161b820410e2a481b63a999d0d4338c | refs/heads/master | 2020-06-17T13:42:08.383167 | 2019-11-11T07:23:14 | 2019-11-11T07:23:14 | 195,940,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | f = open("Sample.txt", "w")
f.write("こんにちは\n")
f.write("さようなら\n")
f.close()
| [
"masataka.shibata.ss@gmail.com"
] | masataka.shibata.ss@gmail.com |
111773a94a919d20fdbcc7a6f24eb9824d940be9 | e024fb3705970ac0b4d18ef39d9eb90856272039 | /python/learn_python_the_heard_way/ex14.py | 3f3347832ce3b0e3320827a880fc653254238c9f | [] | no_license | pengzhangdev/scripts | 4fc9c54b38aff587e832e5ac4360145caef0f7e1 | 66eec17ccd2e2774d48a14815bfd70e201efbaf5 | refs/heads/master | 2021-04-12T07:14:43.874821 | 2017-09-01T08:48:17 | 2017-09-01T08:48:17 | 20,473,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | #! /usr/bin/python
from sys import argv
print
script, user_name = argv
prompt = '> '
print "Hi %s. I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "What do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer)
| [
"werther0331@gmail.com"
] | werther0331@gmail.com |
67ae1842593a37877143fd006841397092c661e2 | 787c04c7bd8dd51dafc030d21e75891808df3dbf | /src/example.py | dd171e96187ec2072eb469b36902accf0366cac3 | [] | no_license | rasyadhs/random-json-generator | a7cd0670f5b986cb428815724c2fc0bb4bd8839d | cb4585771d0d4efdfe9f69cb40d7e25514bc4b0b | refs/heads/master | 2022-01-05T18:36:36.263799 | 2019-05-01T13:01:28 | 2019-05-01T13:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | #!/usr/bin/python3
# This is a helper file that helps you to generate random json data.
import multi_json_generator
import sys
import time
import argparse
import json_generator
def custom_method(main_object):
# This formats card_index. As I want card index map with user_id
pass
# main_object["payment_methods"]["card_index"] = \
# main_object["payment_methods"]["card_index"].format(user_id=main_object["user_id"])
def generate_single_object(schema_file):
random_object = json_generator.JsonGenerator('./example-schema.json',
[custom_method])
print(random_object.get_json_data())
def generate_multi_objects(schema_file, file_folder_location, file_count, objects_per_file):
multi_json_generator.MultiJsonGenerator(schema_file, file_folder_location, file_count, objects_per_file,
[custom_method]).generate_random_data()
print("\n\n================================================")
print("Data is generated. All files are saved at", file_folder_location)
print("================================================\n\n")
def args_generator():
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="Single object or multiple[S/m]", default="s")
parser.add_argument("--schema", help="Schema file path", default="./example-schema.json")
parser.add_argument("--folder", help="File folder location for multi object generation", default="./data/")
parser.add_argument("--count", help="Number of files to generate", default=1, type=int)
parser.add_argument("--objects", help="Number of objects per file", default=20, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = args_generator()
if (not args.type or (args.type.lower() == 's')):
# single object
generate_single_object(args.schema)
else:
# multi objects
start_time = time.time()
generate_multi_objects(args.schema, args.folder, args.count, args.objects)
print("\n\n", time.time() - start_time, "\n\n")
| [
"ankur3.kumar@paytmmall.com"
] | ankur3.kumar@paytmmall.com |
3fc5667310c4f116f9078ae8b07b622e7b91b154 | bbb5447fa5c39c050f23e8b62a91df052818570d | /modules/ibm_app_domain_shared_info.py | e47b4b30b53100306ee9d568fa4aca3a21e41555 | [] | no_license | Trickster4/ansible-collection-ibm | b28b79174b1f1b3f0dcba09e4888396e16b945bd | 14455103b7cb4845e6cfa3803cce7b2a0546af03 | refs/heads/master | 2021-01-03T22:33:46.082228 | 2020-02-09T19:49:09 | 2020-02-11T04:55:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,417 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ibm_app_domain_shared_info
short_description: Retrieve IBM Cloud 'ibm_app_domain_shared' resource
version_added: "2.8"
description:
- Retrieve an IBM Cloud 'ibm_app_domain_shared' resource
requirements:
- IBM-Cloud terraform-provider-ibm v1.2.0
- Terraform v0.12.20
options:
name:
description:
- The name of the shared domain
required: True
type: str
ibmcloud_api_key:
description:
- The API Key used for authentification. This can also be provided
via the environment variable 'IC_API_KEY'.
required: True
ibmcloud_region:
description:
- Denotes which IBM Cloud region to connect to
default: us-south
required: False
author:
- Jay Carman (@jaywcarman)
'''
# Top level parameter keys required by Terraform module
TL_REQUIRED_PARAMETERS = [
('name', 'str'),
]
# All top level parameter keys supported by Terraform module
TL_ALL_PARAMETERS = [
'name',
]
# define available arguments/parameters a user can pass to the module
from ansible.module_utils.basic import env_fallback
module_args = dict(
name=dict(
required=True,
type='str'),
ibmcloud_api_key=dict(
type='str',
no_log=True,
fallback=(env_fallback, ['IC_API_KEY']),
required=True),
ibmcloud_region=dict(
type='str',
fallback=(env_fallback, ['IC_REGION']),
default='us-south')
)
def run_module():
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.ibmcloud as ibmcloud
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
result = ibmcloud.ibmcloud_terraform(
resource_type='ibm_app_domain_shared',
tf_type='data',
parameters=module.params,
ibm_provider_version='1.2.0',
tl_required_params=TL_REQUIRED_PARAMETERS,
tl_all_params=TL_ALL_PARAMETERS)
if result['rc'] > 0:
module.fail_json(
msg=ibmcloud.Terraform.parse_stderr(result['stderr']), **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | [
"amallak1@in.ibm.com"
] | amallak1@in.ibm.com |
c9fcd284c3525e263a2c0ac08e2878e5632f28cc | bc09a1ecdd1ad75aef569ee09ce6f265777936bc | /accounts/views.py | 789b59d9eb80aa6711f91fab50c7e679823b0ce2 | [] | no_license | Erik-FN/Python_Real_State | 9cd708189e722fdea833b6e4e5e538d034e90c7e | afde2cdca623efc07fb00e359b5bd8b14e02ff19 | refs/heads/master | 2022-10-03T01:12:57.021861 | 2020-06-02T20:19:49 | 2020-06-02T20:19:49 | 254,611,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django. contrib.auth.models import User
from contacts.models import Contact
# Create your views here.
def register(request):
if request.method == 'POST':
#Getting the form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password_confirmation = request.POST['password2']
#Validations
if password == password_confirmation:
#Check for duplicated username and email
if User.objects.filter(username = username).exists():
messages.error(request, 'Username already exists')
return redirect('register')
else:
if User.objects.filter(email = email).exists():
messages.error(request, 'Email already exists')
return redirect('register')
#Passed the validations
user = User.objects.create_user(username=username, password=password, email=email, first_name=first_name, last_name=last_name)
#Loggin the user after register
user.save()
messages.success(request, 'User registered')
return redirect('index')
else:
messages.error(request, 'passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request,user)
messages.success(request, 'Logged in')
return redirect('dashboard')
else:
messages.error(request, 'invalid credentials')
return redirect('login')
return render(request, 'accounts/login.html')
def dashboard(request):
user_contacts = Contact.objects.order_by('-contact_date').filter(user_id = request.user.id)
context = {
'contacts':user_contacts
}
return render(request, 'accounts/dashboard.html', context)
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'Loggin out')
return redirect('index') | [
"erik.fnoble@gmail.com"
] | erik.fnoble@gmail.com |
4da7fd4f09f7351dad66cba40cd0625849491560 | 1efa59e25582bef2338cec96f0f80caad9ae9dbb | /swagger_server/models/schedule_option_section.py | 17a4b456e4de1139b7d80f253d5491b93dfd34bc | [
"MIT"
] | permissive | pablokvitca/cs3200-project-backend | ee1cd2d1da0dd54e3106c94dc267e7b257dab0cd | aec890bb4fb2b532cb8099016ebbec7b273bb266 | refs/heads/master | 2021-10-27T21:14:10.818409 | 2019-04-19T21:06:05 | 2019-04-19T21:06:05 | 178,476,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ScheduleOptionSection(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, schedule_id: int=None, crn: int=None): # noqa: E501
"""ScheduleOptionSection - a model defined in Swagger
:param schedule_id: The schedule_id of this ScheduleOptionSection. # noqa: E501
:type schedule_id: int
:param crn: The crn of this ScheduleOptionSection. # noqa: E501
:type crn: int
"""
self.swagger_types = {
'schedule_id': int,
'crn': int
}
self.attribute_map = {
'schedule_id': 'schedule_id',
'crn': 'crn'
}
self._schedule_id = schedule_id
self._crn = crn
@classmethod
def from_dict(cls, dikt) -> 'ScheduleOptionSection':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ScheduleOptionSection of this ScheduleOptionSection. # noqa: E501
:rtype: ScheduleOptionSection
"""
return util.deserialize_model(dikt, cls)
@property
def schedule_id(self) -> int:
"""Gets the schedule_id of this ScheduleOptionSection.
:return: The schedule_id of this ScheduleOptionSection.
:rtype: int
"""
return self._schedule_id
@schedule_id.setter
def schedule_id(self, schedule_id: int):
"""Sets the schedule_id of this ScheduleOptionSection.
:param schedule_id: The schedule_id of this ScheduleOptionSection.
:type schedule_id: int
"""
if schedule_id is not None and schedule_id < 0: # noqa: E501
raise ValueError("Invalid value for `schedule_id`, must be a value greater than or equal to `0`") # noqa: E501
self._schedule_id = schedule_id
@property
def crn(self) -> int:
"""Gets the crn of this ScheduleOptionSection.
:return: The crn of this ScheduleOptionSection.
:rtype: int
"""
return self._crn
@crn.setter
def crn(self, crn: int):
"""Sets the crn of this ScheduleOptionSection.
:param crn: The crn of this ScheduleOptionSection.
:type crn: int
"""
if crn is not None and crn > 99999: # noqa: E501
raise ValueError("Invalid value for `crn`, must be a value less than or equal to `99999`") # noqa: E501
if crn is not None and crn < 0: # noqa: E501
raise ValueError("Invalid value for `crn`, must be a value greater than or equal to `0`") # noqa: E501
self._crn = crn
| [
"pablokvitca@gmail.com"
] | pablokvitca@gmail.com |
ba6aeae64431208cbabea1456729c92c602f9921 | 386d5d4f8f102e701d02b326cd066f520e3dff9f | /ProjectApplication/project_core/migrations/0163_add_account_number.py | b5f637bb6f9f02103a1375f7fd3e3ed9b338b0fa | [
"MIT"
] | permissive | Swiss-Polar-Institute/project-application | ae2561c3ae2c1d5412d165d959ce2e5886135e0a | 7dc4a9f7e0f8d28c89977b85f99bc5e35ea77d43 | refs/heads/master | 2023-08-31T04:01:23.492272 | 2023-08-25T14:33:02 | 2023-08-25T14:33:02 | 206,330,401 | 7 | 5 | MIT | 2023-09-13T08:03:53 | 2019-09-04T13:49:39 | Python | UTF-8 | Python | false | false | 2,043 | py | # Generated by Django 3.2 on 2021-04-23 10:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project_core', '0162_callpartfile_proposal_keywords_null'),
]
operations = [
migrations.AddField(
model_name='financialkey',
name='account_number',
field=models.IntegerField(help_text='Code use by the accounting department', null=True, unique=True),
),
migrations.AlterField(
model_name='call',
name='scientific_clusters_question',
field=models.BooleanField(default=False, help_text='True if the Research Cluster question is enabled'),
),
migrations.AlterField(
model_name='historicalcall',
name='scientific_clusters_question',
field=models.BooleanField(default=False, help_text='True if the Research Cluster question is enabled'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='keywords',
field=models.ManyToManyField(help_text='Keywords that describe the research cluster', to='project_core.Keyword'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='proposal',
field=models.ForeignKey(help_text='Proposal that this Research Cluster refers to', on_delete=django.db.models.deletion.PROTECT, to='project_core.proposal'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='sub_pi',
field=models.ForeignKey(help_text='Main person of this research cluster', on_delete=django.db.models.deletion.PROTECT, to='project_core.personposition'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='title',
field=models.CharField(help_text='Title of the research cluster', max_length=500),
),
]
| [
"carles@pina.cat"
] | carles@pina.cat |
0b57e8f2b54b12010a3ec4c8bbbf67d4f7f346d4 | 8cf8fe0e08b7a0aafb6058b158f3218751a17fc2 | /retinanet/model/msdnet.py | 001d1472d704cc5b4a2b2f8f92ffb4e606927d8a | [] | no_license | zl1994/DLSFPN | 09aa4d9ce8bfd7dc43a3ee215d8edb1d4cd86326 | 50f77c2d27c79c6bed8b24bb96e5e7a11d1549c3 | refs/heads/main | 2023-09-03T10:12:48.817407 | 2021-11-09T03:07:00 | 2021-11-09T03:07:00 | 426,065,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,586 | py | import torch.nn as nn
import torch
import math
from mmcv.runner import load_checkpoint
import logging
from mmcv.utils import get_logger
# c3 c4 c5
feature_extract_layer = [ [4, 7, 10, 14, 18], [5, 10, 15, 21, 27], [7, 14, 21, 28, 35] ]
# fpn_sizes = [ [192, 448, 576], [288, 768, 640], [384, 704, 608], [320, 608, 1056], [448, 992, 976] ]
def get_root_logger(log_file=None, log_level=logging.INFO):
logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
return logger
class ConvBasic(nn.Module):
def __init__(self, nIn, nOut, kernel=3, stride=1,
padding=1):
super(ConvBasic, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride,
padding=padding, bias=False),
nn.BatchNorm2d(nOut),
nn.ReLU(True)
)
def forward(self, x):
return self.net(x)
class ConvBN(nn.Module):
def __init__(self, nIn, nOut, type: str, bottleneck,
bnWidth):
"""
a basic conv in MSDNet, two type
:param nIn:
:param nOut:
:param type: normal or down
:param bottleneck: use bottlenet or not
:param bnWidth: bottleneck factor
"""
super(ConvBN, self).__init__()
layer = []
nInner = nIn
if bottleneck is True:
nInner = min(nInner, bnWidth * nOut)
layer.append(nn.Conv2d(
nIn, nInner, kernel_size=1, stride=1, padding=0, bias=False))
layer.append(nn.BatchNorm2d(nInner))
layer.append(nn.ReLU(True))
if type == 'normal':
layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,
stride=1, padding=1, bias=False))
elif type == 'down':
layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,
stride=2, padding=1, bias=False))
else:
raise ValueError
layer.append(nn.BatchNorm2d(nOut))
layer.append(nn.ReLU(True))
self.net = nn.Sequential(*layer)
def forward(self, x):
return self.net(x)
class ConvDownNormal(nn.Module):
def __init__(self, nIn1, nIn2, nOut, bottleneck, bnWidth1, bnWidth2):
super(ConvDownNormal, self).__init__()
self.conv_down = ConvBN(nIn1, nOut // 2, 'down',
bottleneck, bnWidth1)
self.conv_normal = ConvBN(nIn2, nOut // 2, 'normal',
bottleneck, bnWidth2)
def forward(self, x):
res = [x[1],
self.conv_down(x[0]),
self.conv_normal(x[1])]
return torch.cat(res, dim=1)
class ConvNormal(nn.Module):
def __init__(self, nIn, nOut, bottleneck, bnWidth):
super(ConvNormal, self).__init__()
self.conv_normal = ConvBN(nIn, nOut, 'normal',
bottleneck, bnWidth)
def forward(self, x):
if not isinstance(x, list):
x = [x]
res = [x[0],
self.conv_normal(x[0])]
return torch.cat(res, dim=1)
class ParallelModule(nn.Module):
"""
This module is similar to luatorch's Parallel Table
input: N tensor
network: N module
output: N tensor
"""
def __init__(self, parallel_modules):
super(ParallelModule, self).__init__()
self.m = nn.ModuleList(parallel_modules)
def forward(self, x):
x, feature = x[0], x[1]
res = []
for i in range(len(x)):
res.append(self.m[i](x[i]))
return tuple([res, feature])
class MSDNFirstLayer(nn.Module):
def __init__(self, nIn, nOut, grFactor, nScales ):
# nIn 3 nOut 32
super(MSDNFirstLayer, self).__init__() # grFactor [1,2,4, 4]
self.layers = nn.ModuleList()
conv = nn.Sequential(
nn.Conv2d(nIn, nOut * grFactor[0], 7, 2, 3),
nn.BatchNorm2d(nOut * grFactor[0]),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1))
self.layers.append(conv)
nIn = nOut * grFactor[0]
for i in range(1, nScales):
self.layers.append(ConvBasic(nIn, nOut * grFactor[i],
kernel=3, stride=2, padding=1))
nIn = nOut * grFactor[i]
def forward(self, x):
x , feature = x[0], x[1]
res = []
for i in range(len(self.layers)):
x = self.layers[i](x)
res.append(x)
return tuple([res, feature])
class MSDNLayer(nn.Module):
def __init__(self, nIn, nOut, nScales, grFactor, bnFactor, bottleneck, inScales=None, outScales=None, n_layer_curr=-1):
super(MSDNLayer, self).__init__()
self.nIn = nIn
self.nOut = nOut
self.inScales = inScales if inScales is not None else nScales
self.outScales = outScales if outScales is not None else nScales
self.nScales = nScales
self.discard = self.inScales - self.outScales
self.offset = self.nScales - self.outScales
self.layers = nn.ModuleList()
self.n_layer_curr = n_layer_curr
if self.discard > 0:
nIn1 = nIn * grFactor[self.offset - 1]
nIn2 = nIn * grFactor[self.offset]
_nOut = nOut * grFactor[self.offset]
self.layers.append(ConvDownNormal(nIn1, nIn2, _nOut, bottleneck,
bnFactor[self.offset - 1],
bnFactor[self.offset]))
else:
self.layers.append(ConvNormal(nIn * grFactor[self.offset],
nOut * grFactor[self.offset],
bottleneck,
bnFactor[self.offset]))
for i in range(self.offset + 1, self.nScales):
nIn1 = nIn * grFactor[i - 1]
nIn2 = nIn * grFactor[i]
_nOut = nOut * grFactor[i]
self.layers.append(ConvDownNormal(nIn1, nIn2, _nOut, bottleneck,
bnFactor[i - 1],
bnFactor[i]))
def forward(self, x):
x, feature = x[0], x[1]
if self.discard > 0:
inp = []
for i in range(1, self.outScales + 1):
inp.append([x[i - 1], x[i]])
else:
inp = [[x[0]]]
for i in range(1, self.outScales):
inp.append([x[i - 1], x[i]])
res = []
for i in range(self.outScales):
res.append(self.layers[i](inp[i]))
for i in range(len(feature_extract_layer)):
if self.n_layer_curr in feature_extract_layer[i]:
feature[feature_extract_layer[i].index(self.n_layer_curr)].append(res[i-3])
return tuple([res, feature])
class MSDNet(nn.Module):
def __init__(self, nBlocks = 5, in_channels=3, stem_channels = 32, base = 4, step=4, stepmode = "even",
growthRate=16, grFactor="1-2-4-4", bnFactor="1-2-4-4", prune="max", reduction=0.5, bottleneck = True):
super(MSDNet, self).__init__()
self.blocks = nn.ModuleList()
self.classifier = nn.ModuleList()
self.nBlocks = nBlocks # nBlocks = 5 on ImageNet
self.in_channels = in_channels
base = step
self.steps = [base] # default: step 4, stepmode even, base 4
self.grFactor = list(map(int, grFactor.split('-')))
self.bnFactor = list(map(int, bnFactor.split('-')))
self.nScales = len(self.grFactor)
self.prune = prune
self.reduction = reduction
self.bottleneck = bottleneck
self.growthRate = growthRate
n_layers_all, n_layer_curr = base, 0
for i in range(1, self.nBlocks):
self.steps.append(step if stepmode == 'even' # steps [4, 4, 4, 4, 4]
else step * i + 1)
n_layers_all += self.steps[-1] # n_layers_all 20
# n_layers_cur 0
print("building network of steps: ")
print(self.steps, n_layers_all)
nIn = stem_channels # 32
for i in range(self.nBlocks):
print(' ********************** Block {} '
' **********************'.format(i + 1))
m, nIn = \
self._build_block(nIn, self.steps[i],
n_layers_all, n_layer_curr)
self.blocks.append(m)
n_layer_curr += self.steps[i]
def init_weights(self, pretrained):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.blocks:
if hasattr(m, '__iter__'):
for _m in m:
self._init_weights(_m)
else:
self._init_weights(m)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _build_block(self, nIn, step, n_layer_all, n_layer_curr):
layers = [MSDNFirstLayer(self.in_channels, nIn, self.grFactor, self.nScales)] \
if n_layer_curr == 0 else []
for i in range(step):
n_layer_curr += 1
if self.prune == 'min':
inScales = min(self.nScales, n_layer_all - n_layer_curr + 2)
outScales = min(self.nScales, n_layer_all - n_layer_curr + 1)
elif self.prune == 'max':
interval = math.ceil(1.0 * n_layer_all / self.nScales) # 5
inScales = self.nScales - math.floor(1.0 * (max(0, n_layer_curr - 2)) / interval)
outScales = self.nScales - math.floor(1.0 * (n_layer_curr - 1) / interval)
else:
raise ValueError
layers.append(MSDNLayer(nIn, self.growthRate, self.nScales,
self.grFactor, self.bnFactor, self.bottleneck, inScales, outScales, n_layer_curr))
print('|\t\tinScales {} outScales {} inChannels {} outChannels {}\t\t|'.format(inScales, outScales, nIn,
self.growthRate))
nIn += self.growthRate
if self.prune == 'max' and inScales > outScales and \
self.reduction > 0:
offset = self.nScales - outScales
layers.append(
self._build_transition(nIn, math.floor(1.0 * self.reduction * nIn),
outScales, offset))
_t = nIn
nIn = math.floor(1.0 * self.reduction * nIn)
print('|\t\tTransition layer inserted! (max), inChannels {}, outChannels {}\t|'.format(_t, math.floor(
1.0 * self.reduction * _t)))
elif self.prune == 'min' and self.reduction > 0 and \
((n_layer_curr == math.floor(1.0 * n_layer_all / 3)) or
n_layer_curr == math.floor(2.0 * n_layer_all / 3)):
offset = self.nScales - outScales
layers.append(self._build_transition(nIn, math.floor(1.0 * self.reduction * nIn),
outScales, offset))
nIn = math.floor(1.0 * self.reduction * nIn)
print('|\t\tTransition layer inserted! (min)\t|')
print("")
return nn.Sequential(*layers), nIn
def _build_transition(self, nIn, nOut, outScales, offset):
net = []
for i in range(outScales):
net.append(ConvBasic(nIn * self.grFactor[offset + i],
nOut * self.grFactor[offset + i],
kernel=1, stride=1, padding=0))
return ParallelModule(net)
def forward(self, x):
x = tuple([x, [[], [], [], [], []]])
for i in range(self.nBlocks):
x = self.blocks[i](x)
feature = x[1]
return feature
def msdnet(nBlocks, step, pretrained=None, **kwargs):
model = MSDNet(nBlocks=nBlocks, step=step, **kwargs)
model.init_weights(pretrained)
return model
| [
"noreply@github.com"
] | zl1994.noreply@github.com |
31274141d59b98537ab248800a734a0c04d967d3 | 34ee52d56f37fd9d56e7980e2ffe3dfccfc03ad7 | /07 - HTTP Request/passport_update_password.py | 827dd35adc3737d43f5344c817befacf6f292bc7 | [] | no_license | daydreamboy/HelloPythonScripts | 8206f19988f7a2ecb4e34c18e74f89ce48959ba3 | ddd65ffa513f984c43010f5a90aa8dab825acba3 | refs/heads/master | 2022-04-15T18:36:34.211981 | 2020-02-05T13:26:10 | 2020-02-05T13:26:10 | 108,149,240 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Usage:
#
#
# Example:
# '/passport/user/resetPassword'
#
from PassportRequest import *
import sys
import time
def main():
# parser = argparse.ArgumentParser()
# parser.add_argument('--path', default='/', type=str, nargs=1, help='the path of default test domain')
# parser.add_argument('--params', type=dict, nargs='?', help='the parameter of http request')
#
# args = parser.parse_args()
cell = 13700002011
while cell <= 13700002222:
print cell
data = passport_api_request('/passport/user/resetPassword', {'cell': str(cell), 'password': '123456'}, debug=True)
# print data, cell
cell += 1
time.sleep(1)
if __name__ == '__main__':
sys.exit(main())
| [
"wesley4chen@gmail.com"
] | wesley4chen@gmail.com |
8f036d76f8a2b93c9255eac63f79f22c2a7bd076 | b50b0f73896c8fca9029ac775caf6357c27ee209 | /14.py | 058b9908cb358be99a47e72cda1927e73d42a665 | [] | no_license | meysam81/Euler-project | 05e12b485fd313e3445d9c78400c9e2dbad5b47d | a8a80ed1cecc062780c83821031718b19e5e5452 | refs/heads/master | 2021-05-04T20:31:42.548004 | 2020-05-06T16:52:35 | 2020-05-06T16:52:35 | 119,821,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | def collatzProblem(n):
#result = 0
#print(n)
#if(n == 1):
#return n
if(n % 2 == 0):
return(int(n / 2))
else:
return(int(3 * n + 1))
#return result
max = 0
hold = 0
result = 0
for n in range(2, 10**6):
count = 0
hold = n
while(n != 1):
#print(n)
count = count + 1
if(count > max):
max = count
result = hold
n = collatzProblem(n)
print(result) | [
"MeysamAzad81@yahoo.com"
] | MeysamAzad81@yahoo.com |
ee67b8bc83d01f5f2c77dc3c9bbc160a0985cd0e | 37a34a0909ff18e4392a8b7cec89203d8ce3bca0 | /server.py | f97decb97ff34efba8a3029f66115241399dc992 | [] | no_license | chongchuanbing/api_demo | 906f87a1857482c8c9f9f3764f2ed166d68c81d5 | 64e0091a33893ec92ea07cec68cb69e30093619b | refs/heads/master | 2020-08-13T20:24:48.383223 | 2019-03-07T08:47:35 | 2019-03-07T08:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import os
import time
from flask import request, render_template
from flask import send_from_directory, redirect
from flask_cors import CORS
from app import init_app_br
from app.api.api_response import get_json_data
from app.api.api_base import get_docs
from app.app import create_app
from app.config import in_product
from app.utils import logger, cache_utils
from db_base import db
app = create_app()
CORS(app, supports_credentials=True)
# doc.init(app)
logger.init(app)
db.init_app(app)
init_app_br(app)
@app.route('/test/cache', methods=['GET', 'POST', 'DELETE', 'PUT'])
def cache_test():
result = cache_utils.get('testCache')
if not result:
result = 'test page, curr time = ' + str(time.ctime())
cache_utils.save('testCache', result, timeout=5 * 60)
return result
@app.before_request
def call_before_request():
if not in_product():
print('Request path: {}, params: {}'.format(request.path, get_json_data()))
# print('request.headers : ', request.headers)
if request.method != 'OPTIONS':
logger.api_logger.info('Request path: %s, params: %s', request.path, get_json_data())
@app.route('/api-json', methods=['GET'])
def api_doc_json():
return get_docs()
@app.route('/api', methods=['GET'])
def api_doc():
return render_template('api_doc.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=12345)
| [
"496662960@qq.com"
] | 496662960@qq.com |
01f2b39e906fa6896ddad81b11c800af607781d7 | 652121d51e6ff25aa5b1ad6df2be7eb341683c35 | /examples/mouse_and_key_modifiers.py | 6d28b7351cebc4f69acb343c35eb8233fa6877a3 | [] | no_license | jgalaz84/eman2 | be93624f1c261048170b85416e517e5813992501 | 6d3a1249ed590bbc92e25fb0fc319e3ce17deb65 | refs/heads/master | 2020-04-25T18:15:55.870663 | 2015-06-05T20:21:44 | 2015-06-05T20:21:44 | 36,952,784 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | from PyQt4 import QtCore, QtGui
class MouseAndKeyModifiers(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout()
label = QtGui.QLabel("Click here to test mouse buttons: Left, Right, Middle\nand keyboard modifiers: Ctrl, Alt, Shift, and Command (a Mac key)")
self.text_browser = QtGui.QTextBrowser()
layout.addWidget(label)
layout.addWidget(self.text_browser)
self.setLayout(layout)
def mousePressEvent(self, event):
self.text_browser.clear()
self.text_browser.append("Mouse press info...")
if event.buttons()&QtCore.Qt.LeftButton:
self.text_browser.append("Left Button")
if event.buttons()&QtCore.Qt.MidButton:
self.text_browser.append("Middle Button")
if event.buttons()&QtCore.Qt.RightButton:
self.text_browser.append("Right Button")
if event.modifiers()&QtCore.Qt.ShiftModifier:
self.text_browser.append("Shift Modifier")
if event.modifiers()&QtCore.Qt.ControlModifier:
#Apple/Command key on a Mac... NOT CONTROL KEY ON A MAC!
self.text_browser.append("Control Modifier")
if event.modifiers()&QtCore.Qt.AltModifier:
#Alt/Option key on a Mac. An EMAN convention is that Alt+Left click works like a middle click.
self.text_browser.append("Alt Modifier")
if event.modifiers()&QtCore.Qt.MetaModifier:
#Control Key on a Mac. A Mac convention is that Ctrl+Left Click works like a right click.
self.text_browser.append("Meta Modifier")
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = MouseAndKeyModifiers()
window.show()
sys.exit(app.exec_()) | [
"jgalaz@gmail.com"
] | jgalaz@gmail.com |
780f6f6cd4ffb052da87bd6b0e91520bf61b9560 | ee9552aed56ca61bd2f2a4000ad1338b7f62744b | /restwork/gs14/gs14/settings.py | 16ae6319381fbf5750ea4c63fb8388d76a60bb5e | [] | no_license | satishraut/geeks-repo | a9c41ac578772a57d8835d83f54c2e38e1f174ac | ad3560efeb25c74b3a5bcec9097ed64ec1419b40 | refs/heads/main | 2023-01-13T22:40:41.576912 | 2020-11-16T11:30:31 | 2020-11-16T11:30:31 | 310,078,162 | 0 | 0 | null | 2020-11-16T11:30:32 | 2020-11-04T17:55:56 | Python | UTF-8 | Python | false | false | 3,091 | py | """
Django settings for gs14 project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jwcn&mo8b_r=)if7)&yg8kkp*i+ntz!#+jkm4t=1g5i-+1&fdv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'apiapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs14.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs14.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"satish.raut777@gmail.com"
] | satish.raut777@gmail.com |
799da5f90332c8958a0a8e05156cccd1664b11fc | 2acc7b60a1f2352dac778ca3e418793011bce53a | /LAB3/Entropy/Entity.py | b5749aefbe3b5cf23eea516dd5ede2b025084cbc | [] | no_license | mlynarzsrem/TI | c3379725199f2627cf50b71a88bc2bf7382327b3 | f5c9eeb1dae9f11c3bbf7232d1895f9e7332b608 | refs/heads/master | 2021-01-25T13:29:17.831394 | 2018-05-10T14:31:21 | 2018-05-10T14:31:21 | 123,579,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from collections import Counter
class Entity:
def __init__(self,object,parent,child):
self.object =object
self.objectCount = 1
self.parents =[]
self.children = []
if(parent is not None):
self.parents.append(parent)
if(child is not None):
self.children.append(child)
def getChildrenNumber(self):
return len(self.children)
def getParentsNumber(self):
return len(self.parents)
def getChildrenCounter(self):
return Counter(self.children)
def getParentsCounter(self):
return Counter(self.parents)
def incrementCounter(self):
self.objectCount+=1
def getCount(self):
return self.objectCount
def addChild(self,child):
self.children.append(child)
def addParent(self,parent):
self.parents.append(parent) | [
"mlynarzsrem@gmail.com"
] | mlynarzsrem@gmail.com |
2a423cedae0d542fded7abe5a56e9e6177b69fb6 | 9ab60b9797cc2de229b840b8eeca4c1404076adb | /src/GL/texture.py | ca725a9c2be163fb596566a894d286805178daf6 | [
"MIT"
] | permissive | ChsHub/GLImageViewer | 2acf016fed5c19dbae416a41953ae345a2c27e23 | cb838215c43c8f6ad1470b369c74a5a7dea32ef6 | refs/heads/master | 2021-07-13T05:58:40.060239 | 2020-06-05T18:46:14 | 2020-06-05T18:46:14 | 157,926,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | from logging import info
import OpenGL.GL as gl
import numpy
from PIL import Image
from numpy.lib.arraypad import np
def get_texture_image(path):
with Image.open(path) as texture_image:
width, height = texture_image.size
texture_image = texture_image.convert('RGB')
texture_image = list(texture_image.getdata())
return texture_image, [(0.0, 0.0), (width, 0.0), (0.0, height), (width, height)], width, height
def bind_texture(texture_image, height, width):
texture = gl.glGenTextures(1, 0)
info(gl.glGetError())
texture_image = numpy.array(texture_image, dtype=np.uint8)
texture_image.reshape((height, width, 3))
gl.glBindTexture(gl.GL_TEXTURE_2D, texture)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_MIRRORED_REPEAT)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_MIRRORED_REPEAT)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB8, width, height, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE,
texture_image)
gl.glGenerateMipmap(gl.GL_TEXTURE_2D)
info(gl.glGetError())
| [
"christian1193@web.de"
] | christian1193@web.de |
ec2f456a167ccd3e44915be5628c8c39cde79351 | e3c425f4841b5d3c140ecd5c0aede930788e9c78 | /exposing/_version.py | 6a27987a8cba79e77e7ec06fe0349b417f6ae225 | [] | no_license | w4k2/exposing | f250e491ddd8e1a47e6b3706a50dfccd3da59cf0 | 6abbced18aa567ed45426ba915f3b56e7aeca028 | refs/heads/master | 2021-04-03T05:23:53.394528 | 2019-02-27T14:19:16 | 2019-02-27T14:19:16 | 124,964,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | """
``exposing``
"""
__version__ = '0.2.2'
| [
"pawel@ksienie.com"
] | pawel@ksienie.com |
a0a21d1a4fa5f9ed0ec4fdf03246a0292c5fdad2 | 1c1c0fe586b93bcae85522f8479efb37773b8770 | /memoapp/urls.py | e7ed57b937158bb77c4322bd92ed2c34b3d0f6ce | [] | no_license | crypturgus/memolink | 2b6c04ee17de966af4eaf6f5fdc43bf08774a906 | 43173ce22ffec285dc9045de2f8021278bc95d54 | refs/heads/master | 2021-01-24T07:23:29.188555 | 2017-06-04T21:47:43 | 2017-06-04T21:47:43 | 93,341,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.conf.urls import url
from memoapp import views
urlpatterns = [
url(r'^$', views.index, name='index'),
] | [
"mkukawski@gmial.com"
] | mkukawski@gmial.com |
3a88ef40ce18dfd5247af9fa7e6469e54982eef1 | ed1b6ce64a787c0b5b80c3f68576d0f3c396bc55 | /peples_heigth/solution.py | e3e9b8477e234b9dd55c3ab9faa01d21c05d7454 | [
"MIT"
] | permissive | andriiglukhyi/leetcode | 23481c236356baa9260eb7a4c24f5a1d5d9d2dc1 | 22be8c8417b28b2888be5aee82ccfe47f57f1945 | refs/heads/master | 2021-07-24T18:46:38.680457 | 2018-10-21T00:59:06 | 2018-10-21T00:59:06 | 135,516,915 | 1 | 0 | MIT | 2018-10-21T00:59:07 | 2018-05-31T01:44:57 | Python | UTF-8 | Python | false | false | 335 | py | class Solution:
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = lambda person: [-person[0],person[1]])
newlist = []
for person in people:
newlist.insert(person[1],person)
return newlist | [
"andriigluxii@gmail.com"
] | andriigluxii@gmail.com |
f3ef8db2828fa6cac30cafcf42c0a836c21234ae | e3b0113a913daf5884d4f4ae2cbc17796785ad5a | /orders/migrations/0011_auto_20191030_1714.py | c0432f18e308accfe091bf0a77c9a19f54020310 | [] | no_license | meemeee/pizza | 2023068bc487cb9fbdfab280ab7ddfe128a6db7b | 739a7227c8fcd0d10eb210df26d9f7525d9b5ab9 | refs/heads/master | 2022-04-20T09:09:22.894284 | 2020-04-22T09:21:14 | 2020-04-22T09:21:14 | 218,509,060 | 0 | 0 | null | 2020-02-17T16:45:31 | 2019-10-30T11:11:36 | JavaScript | UTF-8 | Python | false | false | 961 | py | # Generated by Django 2.2.6 on 2019-10-30 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0010_auto_20191030_1709'),
]
operations = [
migrations.AlterField(
model_name='orderdinnerplatters',
name='price',
field=models.FloatField(),
),
migrations.AlterField(
model_name='orderpasta',
name='price',
field=models.FloatField(),
),
migrations.AlterField(
model_name='orderpizza',
name='price',
field=models.FloatField(),
),
migrations.AlterField(
model_name='ordersalads',
name='price',
field=models.FloatField(),
),
migrations.AlterField(
model_name='ordersubs',
name='price',
field=models.FloatField(),
),
]
| [
"lethitramy27@gmail.com"
] | lethitramy27@gmail.com |
c9bd9882b8c70a69e85ab9473681185ba3564970 | 0b909f1657083407bfedb9742770ae2e31211f5e | /LSTMIMG/Base_Model.py | bd077351c290d34e5ec6e66006fa731b3535d813 | [] | no_license | jwonged/Visual-Question-Answering | 4ee3eef84f6d638699ae71bab21b3dac594ae13e | 9f4e1b2c8bc91ca801747beceadca14f37752640 | refs/heads/master | 2021-09-14T15:14:35.725572 | 2018-05-15T13:10:24 | 2018-05-15T13:10:24 | 107,781,120 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,979 | py | '''
Created on 31 Mar 2018
@author: jwong
'''
import csv
import json
import os
import pickle
import time
import os
import math
from model_utils import generateForSubmission
from vqaTools.vqaInternal import VQA
from vqaTools.vqaEval import VQAEval
import numpy as np
import tensorflow as tf
class BaseModel(object):
'''
Base Model for LSTMIMG
'''
def __init__(self, config):
self.config = config
tf.set_random_seed(self.config.randomSeed)
self.classToAnsMap = config.classToAnsMap
self.sess = None
self.saver = None
self.f1 = None
self.f2 = None
print(self._getDescription(config))
self.vqa = None
def _logToCSV(self, nEpoch='', qn='', pred='', lab='', predClass='', labClass='',
correct='', img_id='', qn_id=''):
self.predFile.writerow([nEpoch, qn, pred, lab, predClass, labClass,
correct, img_id, qn_id])
#to be abstract
def comment(self):
return ''
def _getDescription(self, config):
info = 'model: {}, classes: {}, batchSize: {}, \
dropout: {}, optimizer: {}, lr: {}, decay: {}, \
clip: {}, shuffle: {}, trainEmbeddings: {}, LSTM_units: {}, \
usePretrainedEmbeddings: {}, LSTMType: {}, elMult: {}, imgModel: {}, \
seed:{}, '.format(
config.modelStruct, config.nOutClasses, config.batch_size,
config.dropoutVal, config.modelOptimizer, config.learningRate,
config.learningRateDecay, config.max_gradient_norm, config.shuffle,
config.trainEmbeddings, config.LSTM_num_units, config.usePretrainedEmbeddings,
config.LSTMType, config.elMult, config.imgModel, config.randomSeed)
return info + 'fc: 2 layers (1000)' + self.comment()
def _addOptimizer(self):
#training optimizer
with tf.variable_scope("train_step"):
if self.config.modelOptimizer == 'adam':
print('Using adam optimizer')
optimizer = tf.train.AdamOptimizer(self.lr)
elif self.config.modelOptimizer == 'adagrad':
print('Using adagrad optimizer')
optimizer = tf.train.AdagradOptimizer(self.lr)
else:
print('Using grad desc optimizer')
optimizer = tf.train.GradientDescentOptimizer(self.lr)
if self.config.max_gradient_norm > 0: # gradient clipping if clip is positive
grads, vs = zip(*optimizer.compute_gradients(self.loss))
grads, gnorm = tf.clip_by_global_norm(grads, self.config.max_gradient_norm)
self.train_op = optimizer.apply_gradients(zip(grads, vs), name='trainModel')
else:
self.train_op = optimizer.minimize(self.loss, name='trainModel')
def _initSession(self):
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.merged = tf.summary.merge_all()
self.tb_writer = tf.summary.FileWriter(self.config.saveModelPath + 'tensorboard', self.sess.graph)
print('Completed Model Construction')
def train(self, trainReader, valReader, logFile):
if not self.config.debugMode:
if not os.path.exists(self.config.saveModelPath):
os.makedirs(self.config.saveModelPath)
print('Starting model training')
self.f1 = open(logFile, 'wb')
self.logFile = csv.writer(self.f1)
self.logFile.writerow(['Attention model, ', self._getDescription(self.config)])
self.logFile.writerow([
'Epoch', 'Val score', 'Train score', 'Train correct',
'Train predictions', 'Val correct', 'Val predictions', 'vqaAcc'])
self.vqa = VQA(self.config.valAnnotFileUnresolved, self.config.originalValQns)
startTime = time.time()
#self.add_summary()
highestScore = 0
nEpochWithoutImprovement = 0
for nEpoch in range(self.config.nTrainEpochs):
msg = 'Epoch {} \n'.format(nEpoch)
print(msg)
score = self._run_epoch(trainReader, valReader, nEpoch)
if nEpoch > self.config.decayAfterEpoch:
self.config.learningRate *= self.config.learningRateDecay
# early stopping and saving best parameters
if score >= highestScore:
nEpochWithoutImprovement = 0
self._saveModel()
highestScore = score
else:
nEpochWithoutImprovement += 1
if nEpochWithoutImprovement >= self.config.nEpochsWithoutImprov:
self.logFile.writerow([
'Early stopping at epoch {} with {} epochs without improvement'.format(
nEpoch+1, nEpochWithoutImprovement)])
break
timeTakenMsg = 'time taken: {}'.format(time.time() - startTime)
print(timeTakenMsg)
self.logFile.writerow([timeTakenMsg])
def _run_epoch(self, trainReader, valReader, nEpoch):
'''
Runs 1 epoch and returns val score
'''
# Potentially add progbar here
batch_size = self.config.batch_size
nBatches = trainReader.datasetSize / batch_size
correct_predictions, total_predictions = 0., 0.
train_losses = []
self.runVal(valReader, nEpoch)
for i, (qnAsWordIDsBatch, seqLens, img_vecs, labels, _, _, _) in enumerate(
trainReader.getNextBatch(batch_size)):
feed = {
self.word_ids : qnAsWordIDsBatch,
self.sequence_lengths : seqLens,
self.img_vecs : img_vecs,
self.labels : labels,
self.lr : self.config.learningRate,
self.dropout : self.config.dropoutVal
}
_, loss, labels_pred, summary = self.sess.run(
[self.train_op, self.loss, self.labels_pred, self.merged], feed_dict=feed)
train_losses.append(loss)
for lab, labPred in zip(labels, labels_pred):
if lab==labPred:
correct_predictions += 1
total_predictions += 1
#log to csv
#self.predFile.writerow([qn, self.classToAnsMap[labPred], self.classToAnsMap[lab], labPred, lab, lab==labPred])
#self.predFile.write('Qn:{}, lab:{}, pred:{}\n'.format(qn, self.classToAnsMap[lab], self.classToAnsMap[labPred]))
if (i%10==0):
self.tb_writer.add_summary(summary, global_step=nBatches*nEpoch + i)
'''valAcc, valCorrect, valTotalPreds = self.runVal(valReader, nEpoch)
resMsg = 'Epoch {0}, batch {1}: val Score={2:>6.1%}, trainAcc={3:>6.1%}\n'.format(
nEpoch, i, valAcc, correct_predictions/total_predictions if correct_predictions > 0 else 0 )
self.logFile.write(resMsg)
print(resMsg)'''
epochScore, valCorrect, valTotalPreds, vqaAcc, val_loss = self.runVal(valReader, nEpoch)
trainScore = correct_predictions/total_predictions if correct_predictions > 0 else 0
train_loss = np.mean(train_losses)
#logging
epMsg = 'Epoch {}: val Score={:>6.2%}, val Loss={}, train Score={:>6.2%}, train loss={}'.format(
nEpoch, epochScore, val_loss, trainScore, train_loss)
print(epMsg)
print('vqaAcc: {}'.format(vqaAcc))
self.logFile.writerow([nEpoch, epochScore, trainScore, correct_predictions,
total_predictions, valCorrect, valTotalPreds, vqaAcc, train_loss, val_loss])
return epochScore
def runVal(self, valReader, nEpoch, is_training=True):
"""Evaluates performance on val set
Args:
valReader:
Returns:
metrics:
"""
accuracies, res, val_losses = [], [], []
correct_predictions, total_predictions = 0., 0.
for qnAsWordIDsBatch, seqLens, img_vecs, labels, rawQns, img_ids, qn_ids in \
valReader.getNextBatch(self.config.batch_size):
feed = {
self.word_ids : qnAsWordIDsBatch,
self.sequence_lengths : seqLens,
self.img_vecs : img_vecs,
self.labels : labels,
self.dropout : 1.0
}
val_loss, labels_pred = self.sess.run([self.loss, self.labels_pred], feed_dict=feed)
for lab, labPred, qn ,img_id, qn_id in zip(
labels, labels_pred, rawQns, img_ids, qn_ids):
if (lab==labPred):
correct_predictions += 1
total_predictions += 1
accuracies.append(lab==labPred)
currentPred = {}
currentPred['question_id'] = qn_id
currentPred['answer'] = self.classToAnsMap[labPred]
res.append(currentPred)
if not math.isnan(val_loss):
val_losses.append(val_loss)
epoch_valLoss = np.mean(val_losses)
valAcc = np.mean(accuracies)
vqaRes = self.vqa.loadRes(res, self.config.originalValQns)
vqaEval = VQAEval(self.vqa, vqaRes, n=2)
vqaEval.evaluate()
return valAcc, correct_predictions, total_predictions, vqaEval.accuracy['overall'], epoch_valLoss
def _saveModel(self):
self.saver.save(self.sess, self.config.saveModelFile)
def loadTrainedModel(self, restoreModel, restoreModelPath):
print('Restoring model from: {}'.format(restoreModel))
tf.reset_default_graph()
self.sess = tf.Session()
self.saver = saver = tf.train.import_meta_graph(restoreModel)
saver.restore(self.sess, tf.train.latest_checkpoint(restoreModelPath))
graph = tf.get_default_graph()
self.labels_pred = graph.get_tensor_by_name('labels_pred:0')
self.accuracy = graph.get_tensor_by_name('accuracy:0')
self.word_ids = graph.get_tensor_by_name('word_ids:0')
self.img_vecs = graph.get_tensor_by_name('img_vecs:0')
self.sequence_lengths = graph.get_tensor_by_name('sequence_lengths:0')
self.labels = graph.get_tensor_by_name('labels:0')
self.dropout = graph.get_tensor_by_name('dropout:0')
self.topK = graph.get_tensor_by_name('topK:0')
self.saver = tf.train.Saver()
return graph
def runPredict(self, valReader, predfile):
"""Evaluates performance on internal valtest set
Args:
valReader:
Returns:
metrics:
"""
batch_size = self.config.batch_size
print('Predictions will be logged in {}'.format(predfile))
self.f2 = open(predfile, 'wb')
self.predFile = csv.writer(self.f2)
self._logToCSV('Epoch','Question', 'Prediction', 'Label', 'Pred Class',
'label class', 'Correct?', 'img id', 'qn_id')
accuracies = []
correct_predictions, total_predictions = 0., 0.
results = []
for nBatch, (qnAsWordIDsBatch, seqLens, img_vecs, labels, rawQns, img_ids, qn_ids) \
in enumerate(valReader.getNextBatch(batch_size)):
feed = {
self.word_ids : qnAsWordIDsBatch,
self.sequence_lengths : seqLens,
self.img_vecs : img_vecs,
self.dropout : 1.0
}
labels_pred = self.sess.run(self.labels_pred, feed_dict=feed)
for lab, labPred, qn, img_id, qn_id in zip(
labels, labels_pred, rawQns, img_ids, qn_ids):
if (lab==labPred):
correct_predictions += 1
total_predictions += 1
accuracies.append(lab==labPred)
self._logToCSV(nEpoch='', qn=qn,
pred=self.classToAnsMap[labPred],
lab=self.classToAnsMap[lab],
predClass=labPred, labClass=lab,
correct=lab==labPred, img_id=img_id, qn_id=qn_id)
currentPred = {}
currentPred['question_id'] = qn_id
currentPred['answer'] = self.classToAnsMap[labPred]
results.append(currentPred)
valAcc = np.mean(accuracies)
print('ValAcc: {:>6.2%}, total_preds: {}'.format(valAcc, total_predictions))
return results, valAcc
def runTest(self, testReader, jsonOutputFile):
'''For producing official test results for submission to server
'''
print('Starting test run...')
allQnIds, allPreds = [], []
for qnAsWordIDsBatch, seqLens, img_vecs, _, _, qn_ids \
in testReader.getNextBatch(self.config.batch_size):
feed = {
self.word_ids : qnAsWordIDsBatch,
self.sequence_lengths : seqLens,
self.img_vecs : img_vecs,
self.dropout : 1.0
}
labels_pred = self.sess.run(self.labels_pred, feed_dict=feed)
for labPred, qn_id in zip(labels_pred, qn_ids):
allQnIds.append(qn_id)
allPreds.append(self.classToAnsMap[labPred])
print('Total predictions: {}'.format(len(allPreds)))
generateForSubmission(allQnIds, allPreds, jsonOutputFile)
def destruct(self):
self.sess.close()
| [
"dsjw2@cam.ac.uk"
] | dsjw2@cam.ac.uk |
f524991a8cd90641162df26314d6222d274db957 | 0274a35ad1054ea4f0036c6c8651c7826876f77f | /testData/wrappers_storage2/bio/bismark/custom_wr1/wrapper.py | 395d82e4e9f33f685dc1b449361a71de44f90b75 | [
"MIT"
] | permissive | JetBrains-Research/snakecharm | 32e280f1d389c1fed04bfe19004a1b8275e7ae4c | f3c09f1a48f94063c20a4aa151399a15fc62f4a4 | refs/heads/master | 2023-09-01T02:44:03.341042 | 2023-08-29T08:12:39 | 2023-08-29T08:12:39 | 165,259,818 | 64 | 10 | MIT | 2023-04-20T17:14:57 | 2019-01-11T14:49:20 | Python | UTF-8 | Python | false | false | 55 | py | output_dir = snakemake.output.get("custom_html", None)
| [
"noreply@github.com"
] | JetBrains-Research.noreply@github.com |
d371f0c79b6ac549d7b5d9d34d43e21684e18b86 | c9cfcd8d85121fba6608557ab74e33b574e1f39d | /ultrachronic/__init__.py | 424030c0ba7bdb5fb9a2cf78ab5c565f0cdc39a7 | [
"MIT"
] | permissive | yoavram/ultrachronic | b588c8e7f291cf9697e42868fa53e3c70c7edd37 | eabd55c6c97da606688a73244faee7572ae381d0 | refs/heads/master | 2021-01-12T08:12:00.023379 | 2018-12-20T08:09:51 | 2018-12-20T08:09:51 | 76,501,608 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from .ultrachronic import jsonify_result, repeat
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| [
"yoav@yoavram.com"
] | yoav@yoavram.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.