blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84d1b66a1d65710dbf72630462b771d0caabbd2d | 5a1e5603a42ff27e648fad307d60957cb95f0185 | /dask/dataframe/tests/test_csv.py | 5e6550be7b23c583897a4f98ca267e45c276d05a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | jseabold/dask | 1937931e7951f776b253432f6b5beedee90892a0 | f6332dec1ead4034540bc2c3c1010a9783099752 | refs/heads/master | 2021-01-23T04:23:10.852740 | 2016-04-29T00:14:34 | 2016-04-29T00:14:34 | 57,442,996 | 0 | 0 | null | 2016-04-30T13:29:31 | 2016-04-30T13:29:30 | Python | UTF-8 | Python | false | false | 6,358 | py | from __future__ import print_function, division, absolute_import
from io import BytesIO
import pytest
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')
from toolz import partition_all, valmap, partial
from dask import compute
from dask.async import get_sync
from dask.dataframe.csv import read_csv_from_bytes, bytes_read_csv, read_csv
from dask.dataframe.utils import eq
from dask.utils import filetexts, filetext
compute = partial(compute, get=get_sync)
files = {'2014-01-01.csv': (b'name,amount,id\n'
b'Alice,100,1\n'
b'Bob,200,2\n'
b'Charlie,300,3\n'),
'2014-01-02.csv': (b'name,amount,id\n'),
'2014-01-03.csv': (b'name,amount,id\n'
b'Dennis,400,4\n'
b'Edith,500,5\n'
b'Frank,600,6\n')}
header = files['2014-01-01.csv'].split(b'\n')[0] + b'\n'
expected = pd.concat([pd.read_csv(BytesIO(files[k])) for k in sorted(files)])
def test_bytes_read_csv():
b = files['2014-01-01.csv']
df = bytes_read_csv(b, b'', {})
assert list(df.columns) == ['name', 'amount', 'id']
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
def test_bytes_read_csv_kwargs():
b = files['2014-01-01.csv']
df = bytes_read_csv(b, b'', {'usecols': ['name', 'id']})
assert list(df.columns) == ['name', 'id']
def test_bytes_read_csv_dtype_coercion():
b = files['2014-01-01.csv']
df = bytes_read_csv(b, b'', {}, {'amount': 'float'})
assert df.amount.dtype == 'float'
def test_bytes_read_csv_with_header():
b = files['2014-01-01.csv']
header, b = b.split(b'\n', 1)
header = header + b'\n'
df = bytes_read_csv(b, header, {})
assert list(df.columns) == ['name', 'amount', 'id']
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
def test_read_csv_simple():
blocks = [[files[k]] for k in sorted(files)]
kwargs = {}
head = bytes_read_csv(files['2014-01-01.csv'], b'', {})
df = read_csv_from_bytes(blocks, header, head, kwargs, collection=True)
assert isinstance(df, dd.DataFrame)
assert list(df.columns) == ['name', 'amount', 'id']
values = read_csv_from_bytes(blocks, header, head, kwargs,
collection=False)
assert isinstance(values, list)
assert len(values) == 3
assert all(hasattr(item, 'dask') for item in values)
result = df.amount.sum().compute(get=get_sync)
assert result == (100 + 200 + 300 + 400 + 500 + 600)
def test_kwargs():
blocks = [files[k] for k in sorted(files)]
blocks = [[b] for b in blocks]
kwargs = {'usecols': ['name', 'id']}
head = bytes_read_csv(files['2014-01-01.csv'], b'', kwargs)
df = read_csv_from_bytes(blocks, header, head, kwargs, collection=True)
assert list(df.columns) == ['name', 'id']
result = df.compute()
assert (result.columns == df.columns).all()
def test_blocked():
blocks = []
for k in sorted(files):
b = files[k]
lines = b.split(b'\n')
blocks.append([b'\n'.join(bs) for bs in partition_all(2, lines)])
df = read_csv_from_bytes(blocks, header, expected.head(), {})
eq(df.compute().reset_index(drop=True),
expected.reset_index(drop=True), check_dtype=False)
expected2 = expected[['name', 'id']]
df = read_csv_from_bytes(blocks, header, expected2.head(),
{'usecols': ['name', 'id']})
eq(df.compute().reset_index(drop=True),
expected2.reset_index(drop=True), check_dtype=False)
def test_enforce_dtypes():
blocks = [[b'aa,bb\n1,1.0\n2.2.0', b'10,20\n30,40'],
[b'aa,bb\n1,1.0\n2.2.0', b'10,20\n30,40']]
head = pd.read_csv(BytesIO(blocks[0][0]), header=0)
dfs = read_csv_from_bytes(blocks, b'aa,bb\n', head, {},
enforce_dtypes=True, collection=False)
dfs = compute(*dfs)
assert all(df.dtypes.to_dict() == head.dtypes.to_dict() for df in dfs)
def test_read_csv_files():
with filetexts(files, mode='b'):
df = read_csv('2014-01-*.csv')
eq(df, expected, check_dtype=False)
fn = '2014-01-01.csv'
df = read_csv(fn)
expected2 = pd.read_csv(BytesIO(files[fn]))
eq(df, expected2, check_dtype=False)
from dask.bytes.compression import compress, files as cfiles, seekable_files
fmt_bs = [(fmt, None) for fmt in cfiles] + [(fmt, 10) for fmt in seekable_files]
@pytest.mark.parametrize('fmt,blocksize', fmt_bs)
def test_read_csv_compression(fmt, blocksize):
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode='b'):
df = read_csv('2014-01-*.csv', compression=fmt, blocksize=blocksize)
eq(df.compute(get=get_sync).reset_index(drop=True),
expected.reset_index(drop=True), check_dtype=False)
def test_warn_non_seekable_files(capsys):
files2 = valmap(compress['gzip'], files)
with filetexts(files2, mode='b'):
df = read_csv('2014-01-*.csv', compression='gzip')
assert df.npartitions == 3
out, err = capsys.readouterr()
assert 'gzip' in err
assert 'blocksize=None' in err
df = read_csv('2014-01-*.csv', compression='gzip', blocksize=None)
out, err = capsys.readouterr()
assert not err and not out
with pytest.raises(NotImplementedError):
df = read_csv('2014-01-*.csv', compression='foo')
def test_windows_line_terminator():
text = 'a,b\r\n1,2\r\n2,3\r\n3,4\r\n4,5\r\n5,6\r\n6,7'
with filetext(text) as fn:
df = read_csv(fn, blocksize=5, lineterminator='\r\n')
assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7
assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5 + 6
def test_late_dtypes():
text = 'a,b\n1,2\n2,3\n3,4\n4,5\n5.5,6\n6,7.5'
with filetext(text) as fn:
df = read_csv(fn, blocksize=5, sample=10)
try:
df.b.sum().compute()
assert False
except TypeError as e:
assert ("'b': float" in str(e) or
"'a': float" in str(e))
df = read_csv(fn, blocksize=5, sample=10,
dtype={'a': float, 'b': float})
assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5.5 + 6
assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7.5
| [
"mrocklin@gmail.com"
] | mrocklin@gmail.com |
8c69b04818eb1c529b6ad11ac1a9de153b213ba5 | 68747ba592c252c952823ff4973c9508b7c8c5e9 | /Ensemble/BeamVelocity.py | dfcd6b48965024dc062ad756bb54688fbce1a739 | [] | no_license | ricorx7/rti_python-1 | 50ce01e7acf60ad6d57c26cfe5d79ecd1fc84563 | 384edef9c14ae5296d7e123eec473b29905a8a58 | refs/heads/master | 2023-02-01T04:33:48.585793 | 2020-12-16T23:25:22 | 2020-12-16T23:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,649 | py | from rti_python.Ensemble.Ensemble import Ensemble
import logging
class BeamVelocity:
"""
Beam Velocity DataSet.
[Bin x Beam] data.
"""
def __init__(self, num_elements, element_multiplier):
"""
Beam Velocity data.
:param num_elements: Number of bins
:param element_multiplier: Number of beams.
"""
self.ds_type = 10
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000001\0"
self.Velocities = []
# Create enough entries for all the (bins x beams)
# Initialize with bad values
for bins in range(num_elements):
bins = []
for beams in range(element_multiplier):
bins.append([Ensemble().BadVelocity])
self.Velocities.append(bins)
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the velocities.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
self.Velocities[bin_num][beam] = Ensemble.GetFloat(packet_pointer, Ensemble().BytesInFloat, data)
packet_pointer += Ensemble().BytesInFloat
logging.debug(self.Velocities)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
val = self.Velocities[bin_num][beam]
result += Ensemble.float_to_bytes(val)
return result
def encode_csv(self, dt, ss_code, ss_config, blank, bin_size):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or first bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
# Get the value
val = self.Velocities[bin_num][beam]
# Create the CSV string
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BEAM_VEL, ss_code, ss_config, bin_num, beam, blank, bin_size, val))
return str_result
def pd0_mm_per_sec(self, pd0_beam_num: int):
"""
Convert the Beam Velocity from m/s to mm/s and as an integer.
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: A list of all the velocities for the given PD0 beam, converted to mm/s for the beam. The beam will be based on reordering for PD0
:rtype: List or None if beam number is not correct.
"""
# Remap the beam number
# beam order 3,2,0,1
rti_beam_num = 0
if self.element_multiplier == 1: # Vertical beam
rti_beam_num = 0
elif pd0_beam_num == 0:
rti_beam_num = 2
elif pd0_beam_num == 1:
rti_beam_num = 3
elif pd0_beam_num == 2:
rti_beam_num = 1
elif pd0_beam_num == 3:
rti_beam_num = 0
# Replace the RTB BAD_Velocity (88.888) to PD0 BAD_VELOCITY (-32768)
pd0_vel_data = []
for bin_idx in range(self.num_elements):
if Ensemble.is_bad_velocity(self.Velocities[bin_idx][rti_beam_num]):
pd0_vel_data.append(-32768)
else:
pd0_vel_data.append(round(self.Velocities[bin_idx][rti_beam_num] * 1000.0)) # Convert to mm/s and integer
return pd0_vel_data
| [
"rcastelo@rowetechinc.com"
] | rcastelo@rowetechinc.com |
79b239a1f80ab8b8d1039c3f79e33ea2a56b2005 | 8740987d164ceec34801cfad3914b6fe52adb691 | /imagen.py | 9e6cfce19410453c2aad42323c8ea44c5d9912c1 | [] | no_license | Dwight-tech/webapp-petshop | 6ad02e1e67c36d313e5c58eaf562eeb63fb5bf70 | b743d984c43243a7bdce2160994dfa0562cab34c | refs/heads/master | 2022-12-23T10:26:07.034565 | 2020-10-02T01:44:43 | 2020-10-02T01:44:43 | 286,870,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from PIL import Image
im = Image.open("Face.jpg")
#im.rotate(180).show()
import requests
url = "https://api.covid19api.com/summary"
r = requests.get(url)
print(r.json()) | [
"dwight3103@hotmail"
] | dwight3103@hotmail |
b35b6265d2f87725bbf04c7a1a015b18573508d8 | db7b618e7d9f7c2af001678e7bc7dd703cb86e61 | /TelegramBot/settings.py | 5bcca8b1e223f39bd5a3ac86c3f826827b3c5348 | [] | no_license | abdullakn/impress.ai | 6f24403b70130d8b6440ceab22931cd1cdcb9aeb | c9033970d33304a306fd6dd5e8cc9c1e39ddf1d8 | refs/heads/master | 2023-08-10T18:07:19.462283 | 2021-09-29T17:27:52 | 2021-09-29T17:27:52 | 411,642,025 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,388 | py | """
Django settings for TelegramBot project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$iep-wj!47lovz7ui4i27t7e5c8d9o$pnmw#@l27sb_t-c5pig'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
TOKEN = config('TOKEN')
TELEGRAM = {
'bot_token': TOKEN,
'channel_name': 'Impress',
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'telegramApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TelegramBot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TelegramBot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME':'impressDB',
'USER':'postgres',
'PASSWORD':'Abdulla@123',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
| [
"abdudebanjazz@gmail.com"
] | abdudebanjazz@gmail.com |
cd3db4fc88cd3b0d60cfe5f2b5a34e40bff7b7a5 | 2a745cecba949014db4ba29b88598f0204c25832 | /kafka-producer.py | 7578403442df5fc54a498f50f74a8efbb4b8d8b1 | [] | no_license | alexandrubordei/python-kafka-connectors | 58219cf9b0e4a93642bc00c527538c7e3e5dbd0c | 62fda0cabcb8fcaedb6169ecfa3dcf4648854030 | refs/heads/master | 2021-01-19T11:42:25.748091 | 2016-06-13T13:18:33 | 2016-06-13T13:18:33 | 61,037,725 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | #!/usr/bin/python
from kafka import KafkaProducer
json = '{"user_action_id":{"long":1346801},"user_id":{"long":243008914},"customer_id":{"long":0},"session_id":{"string":"2icprcma5qp6ch52lk6sbm0ag7"},"remote_addr":{"string":"78.96.2.37"},"forwarded_for":{"string":""},"php_self":{"string":"/search-tools/suggest/products/masini%20de/0"},"keywords":{"string":""},"action":{"string":"ignore_Browsing ProductListing Search suggestProducts"},"category_id":{"long":0},"widget_page_id":{"int":0},"brand_id":{"long":0},"products_id":{"long":0},"time":{"long":1446425827000},"data":{"long":1446422400000},"ora":{"long":25027000},"referer":{"string":"http://m.emag.ro/resigilate/telefoane-mobile-accesorii/listall?ref=ps&emag_click_id=d2a1a979295cae63902266599533373b"},"referer_section":{"string":""},"referer_site":{"string":"m.emag.ro"},"user_agent":{"string":"Mozilla/5.0 (Linux; Android 4.4.4; SM-G530FZ Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/33.0.0.0 Mobile Safari/537.36"},"browser_name":{"string":"Chrome"},"browser_version":{"string":"33"},"operating_system":{"string":"AndroidOS"},"device":{"string":"undefined"},"device_type":{"string":"phone"},"click_to":{"long":3168216612},"link_type":{"int":0},"link_id":{"int":0},"response_code":{"int":200},"id_abonat":{"long":0},"timp_generare":{"string":"1.7870"},"cache_age":{"long":0},"ipREGION":{"string":"ALBA"},"ipCITY":{"string":"BLAJ"},"selectedRegion":{"string":""},"selectedCity":{"string":""},"spider_detection_status":{"int":0},"app_esi_call":{"boolean":false},"hostname":{"string":"m.emag.ro"},"lb":{"string":"lb1.emag.ro"},"ab_option":{"string":""},"set_cookie":{"int":0},"user_remember":{"string":"empty"},"products_status":{"int":0},"info_id":{"int":0},"partner_cookie":{"string":"-"}}'
producer = KafkaProducer(bootstrap_servers='instance-18171.bigstep.io:9092,instance-18169.bigstep.io:9092,instance-18170.bigstep.io:9092')
for _ in range(100000000):
producer.send('clickstreamjson',json.encode('utf-8'))
| [
"root@instance-18168.bigstep.io"
] | root@instance-18168.bigstep.io |
4a28f0deec15c745ccc4aa180c151746c3d8af36 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20181001/load_balancer.py | 0a7b74a91059e38d8ca28b8ffa74b27e5b17a0d9 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,538 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['LoadBalancer']
class LoadBalancer(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_address_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
inbound_nat_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]]] = None,
inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]]] = None,
load_balancer_name: Optional[pulumi.Input[str]] = None,
load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
outbound_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleArgs']]]]] = None,
probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['LoadBalancerSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
LoadBalancer resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]] backend_address_pools: Collection of backend address pools used by a load balancer
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]] frontend_ip_configurations: Object representing the frontend IPs to be used for the load balancer
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]] inbound_nat_pools: Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]] inbound_nat_rules: Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
:param pulumi.Input[str] load_balancer_name: The name of the load balancer.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]] load_balancing_rules: Object collection representing the load balancing rules Gets the provisioning
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleArgs']]]] outbound_rules: The outbound rules.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]] probes: Collection of probe objects used in the load balancer
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_guid: The resource GUID property of the load balancer resource.
:param pulumi.Input[pulumi.InputType['LoadBalancerSkuArgs']] sku: The load balancer SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['backend_address_pools'] = backend_address_pools
__props__['etag'] = etag
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['id'] = id
__props__['inbound_nat_pools'] = inbound_nat_pools
__props__['inbound_nat_rules'] = inbound_nat_rules
if load_balancer_name is None:
raise TypeError("Missing required property 'load_balancer_name'")
__props__['load_balancer_name'] = load_balancer_name
__props__['load_balancing_rules'] = load_balancing_rules
__props__['location'] = location
__props__['outbound_rules'] = outbound_rules
__props__['probes'] = probes
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_guid'] = resource_guid
__props__['sku'] = sku
__props__['tags'] = tags
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150615:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160330:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20161201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170301:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200301:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200501:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200601:LoadBalancer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(LoadBalancer, __self__).__init__(
'azure-nextgen:network/v20181001:LoadBalancer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'LoadBalancer':
"""
Get an existing LoadBalancer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return LoadBalancer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> pulumi.Output[Optional[Sequence['outputs.BackendAddressPoolResponse']]]:
"""
Collection of backend address pools used by a load balancer
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FrontendIPConfigurationResponse']]]:
"""
Object representing the frontend IPs to be used for the load balancer
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatPoolResponse']]]:
"""
Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatRuleResponse']]]:
"""
Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> pulumi.Output[Optional[Sequence['outputs.LoadBalancingRuleResponse']]]:
"""
Object collection representing the load balancing rules Gets the provisioning
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundRuleResponse']]]:
"""
The outbound rules.
"""
return pulumi.get(self, "outbound_rules")
@property
@pulumi.getter
def probes(self) -> pulumi.Output[Optional[Sequence['outputs.ProbeResponse']]]:
"""
Collection of probe objects used in the load balancer
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[Optional[str]]:
"""
The resource GUID property of the load balancer resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.LoadBalancerSkuResponse']]:
"""
The load balancer SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
862b3e9fdfaed87b52ccbd8596cebe7a097c2f6b | 386a7aa0f17a2c37ba6ecf29d7799b48a45c239e | /url_shortener/settings.py | 7c06dc33e283cc5aa735eb5d840f4fbb8ea92fbf | [] | no_license | NNApy/URL_SHORTENER | 7d5691f9f86e53976321bc7a4f93d51a69c42869 | be65bd53c08780cc772d4db0cce00f90cff9005a | refs/heads/master | 2020-02-26T14:54:41.583366 | 2017-02-15T09:44:41 | 2017-02-15T09:44:41 | 63,497,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,252 | py | """
Django settings for url_shortener project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lix(lf06)3f15$!&u8l*h=wh$^#1f_@yf_a*lj8w&a-uj-^6$k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_shortener',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'url_shortener.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'url_shortener.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"phantom.kot@gmail.com"
] | phantom.kot@gmail.com |
3b7e7606078cd3ed5a6bce29f7891a86294ca375 | fb4473a0c00d89f55fb2d07bad3d62302a715f5b | /sidpy/__version__.py | 2fde28695a3068688f07069ca37f51f4ac4cf919 | [
"MIT"
] | permissive | ziatdinovmax/sidpy | b700535014b78eeddf3aff4b57f5110e220d9a2e | 299147bfc22741b5170aa00e92b34159dfc910c5 | refs/heads/master | 2022-12-19T07:28:39.509116 | 2020-10-03T00:31:20 | 2020-10-03T00:31:20 | 292,929,545 | 0 | 0 | MIT | 2020-09-04T19:23:14 | 2020-09-04T19:23:13 | null | UTF-8 | Python | false | false | 47 | py | version = '0.0.3'
time = '2020-09-18 10:00:25'
| [
"14300780+ssomnath@users.noreply.github.com"
] | 14300780+ssomnath@users.noreply.github.com |
afaf8972538982cfcdf75ab95b849d5df7333bae | fa988a0e218e16bddc6c151c288f3157a613f6f0 | /files/vanillaserver.py | ed0de91299bae7c2714e3ce81a4eb91803334f30 | [] | no_license | flownandez/insta-netflix-geni | 213f7be369b2ab49cad6ddcd8ee5f13961288b31 | 1763b10cc71d46d6f6c8d7e01018ba235f222aaa | refs/heads/master | 2016-09-11T02:42:39.137027 | 2014-12-14T22:37:51 | 2014-12-14T22:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | import socket
import sys
HOST = '146.148.44.124' # Symbolic name meaning all available interfaces
PORT = 8888 # Arbitrary non-privileged port
# Datagram (udp) socket
try :
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print 'Socket created'
except socket.error, msg :
print 'Failed to create socket. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
# Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error , msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
#now keep talking with the client
while 1:
# receive data from client (data, addr)
d = s.recvfrom(1024)
data = d[0]
addr = d[1]
if not data:
break
reply = 'OK...' + data
s.sendto(reply , addr)
print 'Message[' + addr[0] + ':' + str(addr[1]) + '] - ' + data.strip()
s.close() | [
"chris2fernandez@gmail.com"
] | chris2fernandez@gmail.com |
64ed459566160d5fa1f62b23b2ec567cb184a6dc | d47b404bb9b245d4d0cabf8496062f3a40e21cd2 | /vehicle/migrations/0001_initial.py | b9c7b7e97314cb1e3e745bf14a4103fd725091d9 | [] | no_license | czach81/nex-tech-vehicles | 7fd8ed9e48d1e8fae5a01a244336b13ae8679725 | b88633d7ef7d16519fcf76847411110c95e35b9a | refs/heads/master | 2021-01-26T07:20:01.660572 | 2020-02-26T21:47:47 | 2020-02-26T21:47:47 | 243,362,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # Generated by Django 3.0.3 on 2020-02-25 22:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vehicle_number', models.IntegerField(max_length=10)),
('description', models.CharField(max_length=40)),
],
),
]
| [
"christopherpzach@gmail.com"
] | christopherpzach@gmail.com |
891589871bcea932e2b54b737c1ef4421d7ab8ef | 6ab0962d7caba66e0b47e9feb99773e53f894e5d | /PyBank/main.py | 796cd43eece80fe2c33fde3213699eaaf93ea8f5 | [] | no_license | Goat7/python-challenge | a78158b8ca9991b55290911c3d3938712f11c69a | 5bbdada52e19cdf22627d04b813f82632d1eeadb | refs/heads/master | 2020-03-20T02:30:28.805790 | 2018-06-15T00:48:23 | 2018-06-15T00:48:23 | 137,115,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,599 | py | #import os and write csv path
from statistics import mean
import os
input_file = input("Enter the name of the CSV file(s) you will like to process one at a time? ")
csvpath = os.path.join(input_file)
#csvpath = os.path.join("budget_data_1.csv")
#Improved Reading using CSV module
import csv
with open(csvpath, newline='') as csvfile:
# CSV reader delimiter and varaiable
csv_reader = csv.reader(csvfile, delimiter=',')
next(csv_reader,None)
csvlist = list(csv_reader)
#list creation, places to store csv "rows" (They are columns!)
dates = []
revenues = []
#run for loop for every row
for x in csvlist:
dates.append(x[0])
revenues.append(int(x[1]))
#create a list for revenue change
revchange = []
#run loop through revenues list to find the change revenues from month
revchange = [revenues[i+1] - revenues[i] for i in range(len(revenues) -1)]
#variables
max_change = max(revchange)
big_loss = min(revchange)
avg_change = mean(revchange)
total_month = len(dates)
max_month = None
loss_month = None
initial_val = None
for row in csvlist:
if initial_val is None:
initial_val = int(row[1])
continue
if int(row[1]) - initial_val == big_loss:
loss_month = row[0]
initial_val = int(row[1])
initial_val2 = None
for row in csvlist:
if initial_val2 is None:
initial_val2 = int(row[1])
continue
if abs(int(row[1]) - initial_val2) == max_change:
max_month = row[0]
initial_val2 = int(row[1])
print("Financial Analysis")
print("-----------------------------------------------------------------------------")
print(f"The financial analysis occured over {total_month} months")
print(f"The average revenue change was ${avg_change}")
print(f"The maximum revenue gain was ${max_change} and occured on {max_month}")
print(f"The biggest revenue loss was ${big_loss} and occured on {loss_month}")
txt_file = open("Company_Financial_Analysis_Budget_1.txt", "w")
txt_file.write("Company Financial Analysis Budget 1 \n")
txt_file.write("-----------------------------------------------------------------------------\n")
txt_file.write(f"Total Months: {total_month}\n")
#txt_file.write(f"Total Revenue: ${rev_total}\n")
txt_file.write(f"Average Revenue Change: ${avg_change}\n")
txt_file.write(f"Maximum Revenue Gain: ${max_change} in {max_month}\n")
txt_file.write(f"Largest Revenue Loss: ${big_loss} in {loss_month}\n")
txt_file.close() | [
"chuckmchls@gmail.com"
] | chuckmchls@gmail.com |
c6562204befd9fd8bdfb3879021fa41efa1e8045 | 2aa037df57789a0150f4eff884e358a1b81784f3 | /Lib/faster_rcnn_config.py | e410a0c823fd97e490a4e57c6793ec79cca79d2e | [] | no_license | NKCSRzChen/supernova | b497a1f3fc4e21994e617ab4af04b2af2bda9b25 | a7d364d2110d7b2905d3acb41b5a596fc56ed723 | refs/heads/master | 2020-05-05T06:56:43.580060 | 2019-04-06T09:24:30 | 2019-04-06T09:24:30 | 179,807,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,983 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 1 20:47:15 2017
@author: Kevin Liang (Modifications)
"""
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Faster R-CNN config system.
This file specifies default config options for Faster R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Examples of YAML cfg files are located in Models/cfgs.
"""
import os
import os.path as osp
import numpy as np
from distutils import spawn
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from faster_rcnn_config import cfg
cfg = __C
###############################################################################
# Network Architecture
###############################################################################
# Classes: The types of objects the algorithm is trying to find
# First class must be __background__
__C.CLASSES = ['__background__']
__C.NUM_CLASSES = 1
# RPN Anchor Box Scales: Anchor boxes will have dimensions scales*16*ratios in image space
__C.RPN_ANCHOR_SCALES = [8, 16, 32]
# RPN CNN parameters
__C.RPN_OUTPUT_CHANNELS = [512]
__C.RPN_FILTER_SIZES = [3]
# Fast R-CNN Fully Connected Layer hidden unit number
__C.FRCNN_FC_HIDDEN = [1024, 1024]
# Fast R-CNN dropout keep rate
__C.FRCNN_DROPOUT_KEEP_RATE = 0.5
###############################################################################
# Training options
###############################################################################
__C.TRAIN = edict()
# Learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Learning rate decay factor
__C.TRAIN.LEARNING_RATE_DECAY = 0.5
# Number of epochs before decaying learning rate
__C.TRAIN.LEARNING_RATE_DECAY_RATE = 10
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
#__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
#__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.0
# Use horizontally-flipped images during training?
__C.TRAIN.USE_HORZ_FLIPPED = True
# Use vertically-flipped images during training?
__C.TRAIN.USE_VERT_FLIPPED = False
# Train bounding-box refinement in Fast R-CNN
__C.TRAIN.BBOX_REFINE = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
#__C.TRAIN.BBOX_THRESH = 0.5
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
#__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
#__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
#__C.TRAIN.HAS_RPN = True # Default: False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Relative weight of RPN bounding box loss
__C.TRAIN.RPN_BBOX_LAMBDA = 10.0
# Relative weight of Fast RCNN bounding box loss
__C.TRAIN.FRCNN_BBOX_LAMBDA = 1.0
###############################################################################
# Testing options
###############################################################################
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
#__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
#__C.TEST.MAX_SIZE = 1000
# Test using bounding-box refinement in Fast R-CNN
# Note: Should not be on if TRAIN.BBOX_REFINE is not also True
__C.TEST.BBOX_REFINE = True
# Propose boxes
__C.TEST.HAS_RPN = True
# NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# NMS overlap threshold used post-refinement (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Evaluate with test ground truth (Turn off for deployment, when you don't have gt info)
__C.TEST.GROUNDTRUTH = True
# Plot ground truth boxes on output images. (Turn off if gt boxes are creating too much clutter)
__C.TEST.PLOT_GROUNDTRUTH = True
# Output image colormap (cmap argument to matplotlib.pyplot.imshow())
# Default of 'jet' is standard RGB
__C.TEST.CMAP = 'jet'
###############################################################################
# MISC
###############################################################################
# Relative location of data files
__C.DATA_DIRECTORY = '../Data/'
# Relative location of where of logging directory
__C.SAVE_DIRECTORY = '../Logs/'
# Model directory under logging directory, where 'Model[n]' folder is created
__C.MODEL_DIRECTORY = 'FRCNN/'
# TF Slim restore file for resnet50
__C.RESTORE_SLIM_FILE = '../Data/'
# How much of GPU memory to use (TensorFlow tries to take up entire GPU by default)
__C.VRAM = 0.8
# Image file format ('.png', '.jpg')
__C.IMAGE_FORMAT = '.png'
# Number of bits representing the image
__C.IMAGE_BITDEPTH = 8
# If dataset consists of natural images, subtract pixel means
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.NATURAL_IMAGE = True
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# How often to save TensorFlow checkpoint of model parameters (epochs)
__C.CHECKPOINT_RATE = 1
# How often to evaluate on the validation set (epochs)
__C.VALID_RATE = 1
# How often to show training losses (iterations)
__C.DISPLAY_RATE = 250
# Include objects labeled as "difficult" (PASCAL VOC)
__C.USE_DIFFICULT = False
###############################################################################
###############################################################################
if spawn.find_executable("nvcc"):
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
else:
__C.USE_GPU_NMS = False
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is not None:
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value | [
"247218118@qq.com"
] | 247218118@qq.com |
e099d73b9060d41c51d0db01d454d211d3b761c0 | 9b36fc8d0b599b2d59772e7bac8697ed47a4ba85 | /manageFunc27.py | 2d1d8ae1106b241ccb428989c7830b0777ae8c51 | [] | no_license | WilliamHutcherson/PMSoftware | 0b5bac187633bd8c0c5e98e08f80f8e4c644a761 | 94f616e465ade1b93bb03f19848168d8aa3f5f60 | refs/heads/master | 2021-01-19T10:45:13.160758 | 2016-08-16T03:17:23 | 2016-08-16T03:17:23 | 65,783,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | import sqlite3
conn = sqlite3.connect('projectTasks.db')
c = conn.cursor()
def createTable():
c.execute("DROP TABLE IF EXISTS Tasks")
c.execute("CREATE TABLE IF NOT EXISTS Tasks (ID INTEGER PRIMARY KEY AUTOINCREMENT, Description TEXT, Owner TEXT, Status TEXT, DateComplete TEXT)")
def insertTask(Description):
c.execute("INSERT INTO Tasks (Description, Owner, Status, DateComplete) VALUES ('{}', 'None', 'None', 'None')".format(Description))
conn.commit()
def updateOwner(id, owner):
sql = "UPDATE Tasks SET Owner = '{}' WHERE ID = {}".format(owner, id)
c.execute(sql)
conn.commit()
def completeTask(date, id):
sql = 'UPDATE Tasks SET Status = "Done", DateComplete = "{}" WHERE ID = {}'.format(date, id)
c.execute(sql)
conn.commit()
readCompleted()
def deleteTask(id):
sql = "DELETE FROM Tasks WHERE ID = {}".format(id)
c.execute(sql)
conn.commit()
def getTaskID(desc):
sql = 'SELECT ID FROM Tasks WHERE Description = "{}"'.format(desc)
c.execute(sql)
i = c.fetchall()
for a in i:
return int(a)
def readNewTasks():
sql = 'SELECT ID, Description FROM Tasks WHERE Owner = "None"'
c.execute(sql)
t = c.fetchall()
newList = []
for rec in t:
newList.append(rec)
return newList
def readOwnedTasks():
sql = 'SELECT ID, Description, Owner FROM Tasks WHERE Owner != "None" AND DateComplete = "None"'
c.execute(sql)
o = c.fetchall()
ownedList = []
for rec in o:
ownedList.append(rec)
return ownedList
def readCompleted():
sql = 'SELECT Description, Owner, DateComplete FROM Tasks WHERE Status = "Done"'
d = c.execute(sql)
finishedList = []
for rec in d:
finishedList.append(rec)
return finishedList
def readAll():
sql = "SELECT * FROM Tasks"
c.execute(sql)
testing = c.fetchall()
print testing
| [
"noreply@github.com"
] | WilliamHutcherson.noreply@github.com |
aea279f30496d21667f68e1ca63aad3d2a87d4ad | 410b40634fcfbf9ccfe7277c9f4993479e07407c | /tests/test_core.py | a4bc795ead784b99dd88a89ab4a94c804621e282 | [
"Apache-2.0"
] | permissive | pbittyscu/serene-python-client | a9cb22576a9698b30ddf389271e3aca6a74a70ba | 2fd7634b26677fb30a866c5900cae6ebd848a9a8 | refs/heads/master | 2023-01-07T06:03:37.620918 | 2022-12-29T22:11:36 | 2022-12-29T22:11:36 | 226,217,078 | 0 | 0 | NOASSERTION | 2022-12-29T22:11:37 | 2019-12-06T00:58:34 | Python | UTF-8 | Python | false | false | 2,462 | py | """
Copyright (C) 2017 Data61 CSIRO
Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
Tests the core module
"""
import unittest2 as unittest
import serene
import os
from .utils import TestWithServer
class TestSerene(TestWithServer):
"""
Tests the SemanticModeller class
"""
def __init__(self, method_name="runTest"):
super().__init__(method_name)
path = os.path.join(os.path.dirname(__file__), "resources")
self._test_owl = os.path.join(path, 'owl', 'dataintegration_report_ontology.ttl')
self._business_file = os.path.join(path, 'data', 'businessInfo.csv')
self._cities_file = os.path.join(path, 'data', 'getCities.csv')
self._employee_file = os.path.join(path, 'data', 'getEmployees.csv')
self._postal_file = os.path.join(path, 'data', 'postalCodeLookup.csv')
self._addresses_file = os.path.join(path, 'data', 'EmployeeAddressesSingleName.csv')
self._map_file = os.path.join(path, 'data', 'example-map-file.csv')
self._link_file = os.path.join(path, 'data', 'example-link-file.csv')
def setUp(self):
self._clear_storage()
def tearDown(self):
self._clear_storage()
def _clear_storage(self):
"""Removes all server elements"""
for o in self._serene.octopii.items:
self._serene.octopii.remove(o)
for ssd in self._serene.ssds.items:
self._serene.ssds.remove(ssd)
for ds in self._serene.datasets.items:
self._serene.datasets.remove(ds)
for on in self._serene.ontologies.items:
self._serene.ontologies.remove(on)
def test_load(self):
"""
Tests the loading mechanism
:return:
"""
ds, on, ssds = self._serene.load(self._test_owl,
[
self._business_file,
self._cities_file,
self._employee_file,
self._postal_file,
self._addresses_file
],
self._map_file,
self._link_file)
self.assertEqual(len(ds), 5)
self.assertEqual(len(on), 1)
self.assertEqual(len(ssds), 5)
| [
"alex.collins@nicta.com.au"
] | alex.collins@nicta.com.au |
a5056688c64509504bfa9c7a6d05f17f4545a6f5 | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/graph_objs/isosurface/__init__.py | 641825ef79e0665708cf71c4b63a4d978f9ad605 | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 137,729 | py |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Surface(_BaseTraceHierarchyType):
# count
# -----
@property
def count(self):
"""
Sets the number of iso-surfaces between minimum and maximum
iso-values. By default this value is 2 meaning that only
minimum and maximum surfaces would be drawn.
The 'count' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self['count']
@count.setter
def count(self, val):
self['count'] = val
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the iso-surface. The default fill value
of the surface is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# pattern
# -------
@property
def pattern(self):
"""
Sets the surface pattern of the iso-surface 3-D sections. The
default pattern of the surface is `all` meaning that the rest
of surface elements would be shaded. The check options (either
1 or 2) could be used to draw half of the squares on the
surface. Using various combinations of capital `A`, `B`, `C`,
`D` and `E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of interest.
The 'pattern' property is a flaglist and may be specified
as a string containing:
- Any combination of ['A', 'B', 'C', 'D', 'E'] joined with '+' characters
(e.g. 'A+B')
OR exactly one of ['all', 'odd', 'even'] (e.g. 'even')
Returns
-------
Any
"""
return self['pattern']
@pattern.setter
def pattern(self, val):
self['pattern'] = val
# show
# ----
@property
def show(self):
"""
Hides/displays surfaces between minimum and maximum iso-values.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['show']
@show.setter
def show(self, val):
self['show'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
count
Sets the number of iso-surfaces between minimum and
maximum iso-values. By default this value is 2 meaning
that only minimum and maximum surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The default
fill value of the surface is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is `all`
meaning that the rest of surface elements would be
shaded. The check options (either 1 or 2) could be used
to draw half of the squares on the surface. Using
various combinations of capital `A`, `B`, `C`, `D` and
`E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and maximum
iso-values.
"""
def __init__(
self,
arg=None,
count=None,
fill=None,
pattern=None,
show=None,
**kwargs
):
"""
Construct a new Surface object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Surface
count
Sets the number of iso-surfaces between minimum and
maximum iso-values. By default this value is 2 meaning
that only minimum and maximum surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The default
fill value of the surface is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is `all`
meaning that the rest of surface elements would be
shaded. The check options (either 1 or 2) could be used
to draw half of the squares on the surface. Using
various combinations of capital `A`, `B`, `C`, `D` and
`E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and maximum
iso-values.
Returns
-------
Surface
"""
super(Surface, self).__init__('surface')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Surface
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Surface"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (surface as v_surface)
# Initialize validators
# ---------------------
self._validators['count'] = v_surface.CountValidator()
self._validators['fill'] = v_surface.FillValidator()
self._validators['pattern'] = v_surface.PatternValidator()
self._validators['show'] = v_surface.ShowValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('count', None)
self['count'] = count if count is not None else _v
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('pattern', None)
self['pattern'] = pattern if pattern is not None else _v
_v = arg.pop('show', None)
self['show'] = show if show is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self['maxpoints']
@maxpoints.setter
def maxpoints(self, val):
self['maxpoints'] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['token']
@token.setter
def token(self, val):
self['token'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__('stream')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Stream
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (stream as v_stream)
# Initialize validators
# ---------------------
self._validators['maxpoints'] = v_stream.MaxpointsValidator()
self._validators['token'] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('maxpoints', None)
self['maxpoints'] = maxpoints if maxpoints is not None else _v
_v = arg.pop('token', None)
self['token'] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Spaceframe(_BaseTraceHierarchyType):
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `spaceframe` elements. The default
fill value is 0.15 meaning that only 15% of the area of every
faces of tetras would be shaded. Applying a greater `fill`
ratio would allow the creation of stronger elements or could be
sued to have entirely closed areas (in case of using 1).
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# show
# ----
@property
def show(self):
"""
Displays/hides tetrahedron shapes between minimum and maximum
iso-values. Often useful when either caps or surfaces are
disabled or filled with values less than 1.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['show']
@show.setter
def show(self, val):
self['show'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `spaceframe` elements. The
default fill value is 0.15 meaning that only 15% of the
area of every faces of tetras would be shaded. Applying
a greater `fill` ratio would allow the creation of
stronger elements or could be sued to have entirely
closed areas (in case of using 1).
show
Displays/hides tetrahedron shapes between minimum and
maximum iso-values. Often useful when either caps or
surfaces are disabled or filled with values less than
1.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Spaceframe object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Spaceframe
fill
Sets the fill ratio of the `spaceframe` elements. The
default fill value is 0.15 meaning that only 15% of the
area of every faces of tetras would be shaded. Applying
a greater `fill` ratio would allow the creation of
stronger elements or could be sued to have entirely
closed areas (in case of using 1).
show
Displays/hides tetrahedron shapes between minimum and
maximum iso-values. Often useful when either caps or
surfaces are disabled or filled with values less than
1.
Returns
-------
Spaceframe
"""
super(Spaceframe, self).__init__('spaceframe')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Spaceframe
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Spaceframe"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (spaceframe as v_spaceframe)
# Initialize validators
# ---------------------
self._validators['fill'] = v_spaceframe.FillValidator()
self._validators['show'] = v_spaceframe.ShowValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('show', None)
self['show'] = show if show is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Slices(_BaseTraceHierarchyType):
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of plotly.graph_objs.isosurface.slices.X
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis x except
start and end.
locationssrc
Sets the source reference on plot.ly for
locations .
show
Determines whether or not slice planes about
the x dimension are drawn.
Returns
-------
plotly.graph_objs.isosurface.slices.X
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of plotly.graph_objs.isosurface.slices.Y
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis y except
start and end.
locationssrc
Sets the source reference on plot.ly for
locations .
show
Determines whether or not slice planes about
the y dimension are drawn.
Returns
-------
plotly.graph_objs.isosurface.slices.Y
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of plotly.graph_objs.isosurface.slices.Z
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis z except
start and end.
locationssrc
Sets the source reference on plot.ly for
locations .
show
Determines whether or not slice planes about
the z dimension are drawn.
Returns
-------
plotly.graph_objs.isosurface.slices.Z
"""
return self['z']
@z.setter
def z(self, val):
self['z'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
plotly.graph_objs.isosurface.slices.X instance or dict
with compatible properties
y
plotly.graph_objs.isosurface.slices.Y instance or dict
with compatible properties
z
plotly.graph_objs.isosurface.slices.Z instance or dict
with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Slices object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Slices
x
plotly.graph_objs.isosurface.slices.X instance or dict
with compatible properties
y
plotly.graph_objs.isosurface.slices.Y instance or dict
with compatible properties
z
plotly.graph_objs.isosurface.slices.Z instance or dict
with compatible properties
Returns
-------
Slices
"""
super(Slices, self).__init__('slices')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Slices
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Slices"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (slices as v_slices)
# Initialize validators
# ---------------------
self._validators['x'] = v_slices.XValidator()
self._validators['y'] = v_slices.YValidator()
self._validators['z'] = v_slices.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('z', None)
self['z'] = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Lightposition(_BaseTraceHierarchyType):
# x
# -
@property
def x(self):
"""
Numeric vector, representing the X coordinate for each vertex.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# y
# -
@property
def y(self):
"""
Numeric vector, representing the Y coordinate for each vertex.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# z
# -
@property
def z(self):
"""
Numeric vector, representing the Z coordinate for each vertex.
The 'z' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self['z']
@z.setter
def z(self, val):
self['z'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Lightposition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.isosurface.Lightposition
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
Returns
-------
Lightposition
"""
super(Lightposition, self).__init__('lightposition')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Lightposition
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Lightposition"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (
lightposition as v_lightposition
)
# Initialize validators
# ---------------------
self._validators['x'] = v_lightposition.XValidator()
self._validators['y'] = v_lightposition.YValidator()
self._validators['z'] = v_lightposition.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('z', None)
self['z'] = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Lighting(_BaseTraceHierarchyType):
# ambient
# -------
@property
def ambient(self):
"""
Ambient light increases overall color visibility but can wash
out the image.
The 'ambient' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['ambient']
@ambient.setter
def ambient(self, val):
self['ambient'] = val
# diffuse
# -------
@property
def diffuse(self):
"""
Represents the extent that incident rays are reflected in a
range of angles.
The 'diffuse' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['diffuse']
@diffuse.setter
def diffuse(self, val):
self['diffuse'] = val
# facenormalsepsilon
# ------------------
@property
def facenormalsepsilon(self):
"""
Epsilon for face normals calculation avoids math issues arising
from degenerate geometry.
The 'facenormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['facenormalsepsilon']
@facenormalsepsilon.setter
def facenormalsepsilon(self, val):
self['facenormalsepsilon'] = val
# fresnel
# -------
@property
def fresnel(self):
"""
Represents the reflectance as a dependency of the viewing
angle; e.g. paper is reflective when viewing it from the edge
of the paper (almost 90 degrees), causing shine.
The 'fresnel' property is a number and may be specified as:
- An int or float in the interval [0, 5]
Returns
-------
int|float
"""
return self['fresnel']
@fresnel.setter
def fresnel(self, val):
self['fresnel'] = val
# roughness
# ---------
@property
def roughness(self):
"""
Alters specular reflection; the rougher the surface, the wider
and less contrasty the shine.
The 'roughness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['roughness']
@roughness.setter
def roughness(self, val):
self['roughness'] = val
# specular
# --------
@property
def specular(self):
"""
Represents the level that incident rays are reflected in a
single direction, causing shine.
The 'specular' property is a number and may be specified as:
- An int or float in the interval [0, 2]
Returns
-------
int|float
"""
return self['specular']
@specular.setter
def specular(self, val):
self['specular'] = val
# vertexnormalsepsilon
# --------------------
@property
def vertexnormalsepsilon(self):
"""
Epsilon for vertex normals calculation avoids math issues
arising from degenerate geometry.
The 'vertexnormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['vertexnormalsepsilon']
@vertexnormalsepsilon.setter
def vertexnormalsepsilon(self, val):
self['vertexnormalsepsilon'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
"""
def __init__(
self,
arg=None,
ambient=None,
diffuse=None,
facenormalsepsilon=None,
fresnel=None,
roughness=None,
specular=None,
vertexnormalsepsilon=None,
**kwargs
):
"""
Construct a new Lighting object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Lighting
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
Returns
-------
Lighting
"""
super(Lighting, self).__init__('lighting')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Lighting
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Lighting"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (lighting as v_lighting)
# Initialize validators
# ---------------------
self._validators['ambient'] = v_lighting.AmbientValidator()
self._validators['diffuse'] = v_lighting.DiffuseValidator()
self._validators['facenormalsepsilon'
] = v_lighting.FacenormalsepsilonValidator()
self._validators['fresnel'] = v_lighting.FresnelValidator()
self._validators['roughness'] = v_lighting.RoughnessValidator()
self._validators['specular'] = v_lighting.SpecularValidator()
self._validators['vertexnormalsepsilon'
] = v_lighting.VertexnormalsepsilonValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('ambient', None)
self['ambient'] = ambient if ambient is not None else _v
_v = arg.pop('diffuse', None)
self['diffuse'] = diffuse if diffuse is not None else _v
_v = arg.pop('facenormalsepsilon', None)
self['facenormalsepsilon'
] = facenormalsepsilon if facenormalsepsilon is not None else _v
_v = arg.pop('fresnel', None)
self['fresnel'] = fresnel if fresnel is not None else _v
_v = arg.pop('roughness', None)
self['roughness'] = roughness if roughness is not None else _v
_v = arg.pop('specular', None)
self['specular'] = specular if specular is not None else _v
_v = arg.pop('vertexnormalsepsilon', None)
self[
'vertexnormalsepsilon'
] = vertexnormalsepsilon if vertexnormalsepsilon is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['align']
@align.setter
def align(self, val):
self['align'] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['alignsrc']
@alignsrc.setter
def alignsrc(self, val):
self['alignsrc'] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bgcolorsrc']
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self['bgcolorsrc'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bordercolorsrc']
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self['bordercolorsrc'] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.isosurface.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.isosurface.hoverlabel.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self['namelength']
@namelength.setter
def namelength(self, val):
self['namelength'] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['namelengthsrc']
@namelengthsrc.setter
def namelengthsrc(self, val):
self['namelengthsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__('hoverlabel')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (hoverlabel as v_hoverlabel)
# Initialize validators
# ---------------------
self._validators['align'] = v_hoverlabel.AlignValidator()
self._validators['alignsrc'] = v_hoverlabel.AlignsrcValidator()
self._validators['bgcolor'] = v_hoverlabel.BgcolorValidator()
self._validators['bgcolorsrc'] = v_hoverlabel.BgcolorsrcValidator()
self._validators['bordercolor'] = v_hoverlabel.BordercolorValidator()
self._validators['bordercolorsrc'
] = v_hoverlabel.BordercolorsrcValidator()
self._validators['font'] = v_hoverlabel.FontValidator()
self._validators['namelength'] = v_hoverlabel.NamelengthValidator()
self._validators['namelengthsrc'
] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('align', None)
self['align'] = align if align is not None else _v
_v = arg.pop('alignsrc', None)
self['alignsrc'] = alignsrc if alignsrc is not None else _v
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bgcolorsrc', None)
self['bgcolorsrc'] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('bordercolorsrc', None)
self['bordercolorsrc'
] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('namelength', None)
self['namelength'] = namelength if namelength is not None else _v
_v = arg.pop('namelengthsrc', None)
self['namelengthsrc'
] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Contour(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the color of the contour lines.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# show
# ----
@property
def show(self):
"""
Sets whether or not dynamic contours are shown on hover
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['show']
@show.setter
def show(self, val):
self['show'] = val
# width
# -----
@property
def width(self):
"""
Sets the width of the contour lines.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the contour lines.
show
Sets whether or not dynamic contours are shown on hover
width
Sets the width of the contour lines.
"""
def __init__(self, arg=None, color=None, show=None, width=None, **kwargs):
"""
Construct a new Contour object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Contour
color
Sets the color of the contour lines.
show
Sets whether or not dynamic contours are shown on hover
width
Sets the width of the contour lines.
Returns
-------
Contour
"""
super(Contour, self).__init__('contour')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Contour
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Contour"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (contour as v_contour)
# Initialize validators
# ---------------------
self._validators['color'] = v_contour.ColorValidator()
self._validators['show'] = v_contour.ShowValidator()
self._validators['width'] = v_contour.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('show', None)
self['show'] = show if show is not None else _v
_v = arg.pop('width', None)
self['width'] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['borderwidth']
@borderwidth.setter
def borderwidth(self, val):
self['borderwidth'] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self['dtick']
@dtick.setter
def dtick(self, val):
self['dtick'] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self['exponentformat']
@exponentformat.setter
def exponentformat(self, val):
self['exponentformat'] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['len']
@len.setter
def len(self, val):
self['len'] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self['lenmode']
@lenmode.setter
def lenmode(self, val):
self['lenmode'] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['nticks']
@nticks.setter
def nticks(self, val):
self['nticks'] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['outlinecolor']
@outlinecolor.setter
def outlinecolor(self, val):
self['outlinecolor'] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['outlinewidth']
@outlinewidth.setter
def outlinewidth(self, val):
self['outlinewidth'] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['separatethousands']
@separatethousands.setter
def separatethousands(self, val):
self['separatethousands'] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showexponent']
@showexponent.setter
def showexponent(self, val):
self['showexponent'] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showticklabels']
@showticklabels.setter
def showticklabels(self, val):
self['showticklabels'] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showtickprefix']
@showtickprefix.setter
def showtickprefix(self, val):
self['showtickprefix'] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showticksuffix']
@showticksuffix.setter
def showticksuffix(self, val):
self['showticksuffix'] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['thickness']
@thickness.setter
def thickness(self, val):
self['thickness'] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self['thicknessmode']
@thicknessmode.setter
def thicknessmode(self, val):
self['thicknessmode'] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self['tick0']
@tick0.setter
def tick0(self, val):
self['tick0'] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self['tickangle']
@tickangle.setter
def tickangle(self, val):
self['tickangle'] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['tickcolor']
@tickcolor.setter
def tickcolor(self, val):
self['tickcolor'] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of plotly.graph_objs.isosurface.colorbar.Tickfont
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.isosurface.colorbar.Tickfont
"""
return self['tickfont']
@tickfont.setter
def tickfont(self, val):
self['tickfont'] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-format/blob/master/READM
E.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one item to
d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['tickformat']
@tickformat.setter
def tickformat(self, val):
self['tickformat'] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.isosurface.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.isosurface.colorbar.Tickformatstop]
"""
return self['tickformatstops']
@tickformatstops.setter
def tickformatstops(self, val):
self['tickformatstops'] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.isosurface.col
orbar.tickformatstopdefaults), sets the default property values
to use for elements of isosurface.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of plotly.graph_objs.isosurface.colorbar.Tickformatstop
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.isosurface.colorbar.Tickformatstop
"""
return self['tickformatstopdefaults']
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self['tickformatstopdefaults'] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['ticklen']
@ticklen.setter
def ticklen(self, val):
self['ticklen'] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self['tickmode']
@tickmode.setter
def tickmode(self, val):
self['tickmode'] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['tickprefix']
@tickprefix.setter
def tickprefix(self, val):
self['tickprefix'] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self['ticks']
@ticks.setter
def ticks(self, val):
self['ticks'] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['ticksuffix']
@ticksuffix.setter
def ticksuffix(self, val):
self['ticksuffix'] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ticktext']
@ticktext.setter
def ticktext(self, val):
self['ticktext'] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on plot.ly for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['ticktextsrc']
@ticktextsrc.setter
def ticktextsrc(self, val):
self['ticktextsrc'] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['tickvals']
@tickvals.setter
def tickvals(self, val):
self['tickvals'] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on plot.ly for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['tickvalssrc']
@tickvalssrc.setter
def tickvalssrc(self, val):
self['tickvalssrc'] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['tickwidth']
@tickwidth.setter
def tickwidth(self, val):
self['tickwidth'] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of plotly.graph_objs.isosurface.colorbar.Title
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.isosurface.colorbar.Title
"""
return self['title']
@title.setter
def title(self, val):
self['title'] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use isosurface.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.isosurface.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self['titlefont']
@titlefont.setter
def titlefont(self, val):
self['titlefont'] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use isosurface.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self['titleside']
@titleside.setter
def titleside(self, val):
self['titleside'] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self['xanchor']
@xanchor.setter
def xanchor(self, val):
self['xanchor'] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['xpad']
@xpad.setter
def xpad(self, val):
self['xpad'] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self['yanchor']
@yanchor.setter
def yanchor(self, val):
self['yanchor'] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['ypad']
@ypad.setter
def ypad(self, val):
self['ypad'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
plotly.graph_objs.isosurface.colorbar.Tickformatstop
instance or dict with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.isosur
face.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
isosurface.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objs.isosurface.colorbar.Title instance or
dict with compatible properties
titlefont
Deprecated: Please use isosurface.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use isosurface.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
'titlefont': ('title', 'font'),
'titleside': ('title', 'side')
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.ColorBar
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
plotly.graph_objs.isosurface.colorbar.Tickformatstop
instance or dict with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.isosur
face.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
isosurface.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objs.isosurface.colorbar.Title instance or
dict with compatible properties
titlefont
Deprecated: Please use isosurface.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use isosurface.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__('colorbar')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.ColorBar
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.ColorBar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (colorbar as v_colorbar)
# Initialize validators
# ---------------------
self._validators['bgcolor'] = v_colorbar.BgcolorValidator()
self._validators['bordercolor'] = v_colorbar.BordercolorValidator()
self._validators['borderwidth'] = v_colorbar.BorderwidthValidator()
self._validators['dtick'] = v_colorbar.DtickValidator()
self._validators['exponentformat'
] = v_colorbar.ExponentformatValidator()
self._validators['len'] = v_colorbar.LenValidator()
self._validators['lenmode'] = v_colorbar.LenmodeValidator()
self._validators['nticks'] = v_colorbar.NticksValidator()
self._validators['outlinecolor'] = v_colorbar.OutlinecolorValidator()
self._validators['outlinewidth'] = v_colorbar.OutlinewidthValidator()
self._validators['separatethousands'
] = v_colorbar.SeparatethousandsValidator()
self._validators['showexponent'] = v_colorbar.ShowexponentValidator()
self._validators['showticklabels'
] = v_colorbar.ShowticklabelsValidator()
self._validators['showtickprefix'
] = v_colorbar.ShowtickprefixValidator()
self._validators['showticksuffix'
] = v_colorbar.ShowticksuffixValidator()
self._validators['thickness'] = v_colorbar.ThicknessValidator()
self._validators['thicknessmode'] = v_colorbar.ThicknessmodeValidator()
self._validators['tick0'] = v_colorbar.Tick0Validator()
self._validators['tickangle'] = v_colorbar.TickangleValidator()
self._validators['tickcolor'] = v_colorbar.TickcolorValidator()
self._validators['tickfont'] = v_colorbar.TickfontValidator()
self._validators['tickformat'] = v_colorbar.TickformatValidator()
self._validators['tickformatstops'
] = v_colorbar.TickformatstopsValidator()
self._validators['tickformatstopdefaults'
] = v_colorbar.TickformatstopValidator()
self._validators['ticklen'] = v_colorbar.TicklenValidator()
self._validators['tickmode'] = v_colorbar.TickmodeValidator()
self._validators['tickprefix'] = v_colorbar.TickprefixValidator()
self._validators['ticks'] = v_colorbar.TicksValidator()
self._validators['ticksuffix'] = v_colorbar.TicksuffixValidator()
self._validators['ticktext'] = v_colorbar.TicktextValidator()
self._validators['ticktextsrc'] = v_colorbar.TicktextsrcValidator()
self._validators['tickvals'] = v_colorbar.TickvalsValidator()
self._validators['tickvalssrc'] = v_colorbar.TickvalssrcValidator()
self._validators['tickwidth'] = v_colorbar.TickwidthValidator()
self._validators['title'] = v_colorbar.TitleValidator()
self._validators['x'] = v_colorbar.XValidator()
self._validators['xanchor'] = v_colorbar.XanchorValidator()
self._validators['xpad'] = v_colorbar.XpadValidator()
self._validators['y'] = v_colorbar.YValidator()
self._validators['yanchor'] = v_colorbar.YanchorValidator()
self._validators['ypad'] = v_colorbar.YpadValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('borderwidth', None)
self['borderwidth'] = borderwidth if borderwidth is not None else _v
_v = arg.pop('dtick', None)
self['dtick'] = dtick if dtick is not None else _v
_v = arg.pop('exponentformat', None)
self['exponentformat'
] = exponentformat if exponentformat is not None else _v
_v = arg.pop('len', None)
self['len'] = len if len is not None else _v
_v = arg.pop('lenmode', None)
self['lenmode'] = lenmode if lenmode is not None else _v
_v = arg.pop('nticks', None)
self['nticks'] = nticks if nticks is not None else _v
_v = arg.pop('outlinecolor', None)
self['outlinecolor'] = outlinecolor if outlinecolor is not None else _v
_v = arg.pop('outlinewidth', None)
self['outlinewidth'] = outlinewidth if outlinewidth is not None else _v
_v = arg.pop('separatethousands', None)
self['separatethousands'
] = separatethousands if separatethousands is not None else _v
_v = arg.pop('showexponent', None)
self['showexponent'] = showexponent if showexponent is not None else _v
_v = arg.pop('showticklabels', None)
self['showticklabels'
] = showticklabels if showticklabels is not None else _v
_v = arg.pop('showtickprefix', None)
self['showtickprefix'
] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop('showticksuffix', None)
self['showticksuffix'
] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop('thickness', None)
self['thickness'] = thickness if thickness is not None else _v
_v = arg.pop('thicknessmode', None)
self['thicknessmode'
] = thicknessmode if thicknessmode is not None else _v
_v = arg.pop('tick0', None)
self['tick0'] = tick0 if tick0 is not None else _v
_v = arg.pop('tickangle', None)
self['tickangle'] = tickangle if tickangle is not None else _v
_v = arg.pop('tickcolor', None)
self['tickcolor'] = tickcolor if tickcolor is not None else _v
_v = arg.pop('tickfont', None)
self['tickfont'] = tickfont if tickfont is not None else _v
_v = arg.pop('tickformat', None)
self['tickformat'] = tickformat if tickformat is not None else _v
_v = arg.pop('tickformatstops', None)
self['tickformatstops'
] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop('tickformatstopdefaults', None)
self[
'tickformatstopdefaults'
] = tickformatstopdefaults if tickformatstopdefaults is not None else _v
_v = arg.pop('ticklen', None)
self['ticklen'] = ticklen if ticklen is not None else _v
_v = arg.pop('tickmode', None)
self['tickmode'] = tickmode if tickmode is not None else _v
_v = arg.pop('tickprefix', None)
self['tickprefix'] = tickprefix if tickprefix is not None else _v
_v = arg.pop('ticks', None)
self['ticks'] = ticks if ticks is not None else _v
_v = arg.pop('ticksuffix', None)
self['ticksuffix'] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop('ticktext', None)
self['ticktext'] = ticktext if ticktext is not None else _v
_v = arg.pop('ticktextsrc', None)
self['ticktextsrc'] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop('tickvals', None)
self['tickvals'] = tickvals if tickvals is not None else _v
_v = arg.pop('tickvalssrc', None)
self['tickvalssrc'] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop('tickwidth', None)
self['tickwidth'] = tickwidth if tickwidth is not None else _v
_v = arg.pop('title', None)
self['title'] = title if title is not None else _v
_v = arg.pop('titlefont', None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self['titlefont'] = _v
_v = arg.pop('titleside', None)
_v = titleside if titleside is not None else _v
if _v is not None:
self['titleside'] = _v
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('xanchor', None)
self['xanchor'] = xanchor if xanchor is not None else _v
_v = arg.pop('xpad', None)
self['xpad'] = xpad if xpad is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('yanchor', None)
self['yanchor'] = yanchor if yanchor is not None else _v
_v = arg.pop('ypad', None)
self['ypad'] = ypad if ypad is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Caps(_BaseTraceHierarchyType):
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of plotly.graph_objs.isosurface.caps.X
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the x `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.isosurface.caps.X
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of plotly.graph_objs.isosurface.caps.Y
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.isosurface.caps.Y
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of plotly.graph_objs.isosurface.caps.Z
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the z `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.isosurface.caps.Z
"""
return self['z']
@z.setter
def z(self, val):
self['z'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
plotly.graph_objs.isosurface.caps.X instance or dict
with compatible properties
y
plotly.graph_objs.isosurface.caps.Y instance or dict
with compatible properties
z
plotly.graph_objs.isosurface.caps.Z instance or dict
with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Caps object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.Caps
x
plotly.graph_objs.isosurface.caps.X instance or dict
with compatible properties
y
plotly.graph_objs.isosurface.caps.Y instance or dict
with compatible properties
z
plotly.graph_objs.isosurface.caps.Z instance or dict
with compatible properties
Returns
-------
Caps
"""
super(Caps, self).__init__('caps')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Caps
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.Caps"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface import (caps as v_caps)
# Initialize validators
# ---------------------
self._validators['x'] = v_caps.XValidator()
self._validators['y'] = v_caps.YValidator()
self._validators['z'] = v_caps.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('z', None)
self['z'] = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.isosurface import slices
from plotly.graph_objs.isosurface import hoverlabel
from plotly.graph_objs.isosurface import colorbar
from plotly.graph_objs.isosurface import caps
| [
"noreply@github.com"
] | Jonathan-MW.noreply@github.com |
8887cbd8ae69a61cb532730f2785e1bd435b4545 | 2d5e61e316543b1d483b5a8d9dfad6855f3628b8 | /MLCodes/Qianfeng.20171210/BI/python_01.py | 31d6ae21e68b3dc8b0ee54982ca77c21f520dbf6 | [
"CC-BY-4.0"
] | permissive | qianrenjian/MachineLearning-7 | 95faa679c5794411a99ed5402893b54ebf967e6f | 238e695c4c83af09dad5b7c2acce12ac7f55ee5f | refs/heads/master | 2021-10-07T20:35:37.860909 | 2018-12-05T07:17:59 | 2018-12-05T07:17:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
# 引用必要的库
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# 4.线图
"""
注意,这里我们只指定了y轴数据,x轴的都是matplotlib自动生成的
"""
linear_data = np.arange(1, 9)
quadratic_data = linear_data ** 2
plt.figure()
plt.plot(linear_data, '-o', quadratic_data, '-o')
# plt.show()
plt.show()
plt.plot([22, 44, 66], '--r')
plt.show()
"""
1.添加坐标轴标签及图例
2.填充两个line间的区域
"""
# 添加坐标轴标签及图例
plt.xlabel('x data')
plt.ylabel('y data')
plt.title('Line Chart Title')
plt.legend(['legend1', 'legend2', 'legend3'])
plt.show()
# 填充两个line间的区域
plt.gca().fill_between(range(len(linear_data)),
linear_data, quadratic_data,
facecolor='green',
alpha=0.25)
"""
fill_between是2条线之间的区域
x坐标是len(linear_data) , y坐标是linear_data, quadratic_data,
facecolor是2条线之间的颜色
alpha = 0.25透明度
"""
plt.show()
# 柱状图
plt.figure()
x_vals = list(range(len(linear_data)))
# x坐标是range(len(linear_data))生成的个数[0~7]
# y轴是np.arange(1, 9)的值
plt.bar(x_vals, linear_data, width=0.3)
# width = 0.5 1:连起来了
plt.show()
# plt.show()
# group bar chart
"""
同一副图中添加新的柱状图
注意,为了不覆盖第一个柱状图,需要对x轴做偏移
"""
x_vals2 = [item + 0.3 for item in x_vals]
plt.bar(x_vals2, quadratic_data, width=0.3)
plt.show()
# stack bar chart
plt.figure()
x_vals = list(range(len(linear_data)))
plt.bar(x_vals, linear_data, width=0.3)
# 横坐标是【0~7】,y轴的数据是matplotlib生成的
plt.bar(x_vals, quadratic_data, width=0.3, bottom=linear_data)
plt.show()
# 横向柱状图
plt.figure()
x_vals = list(range(len(linear_data)))
plt.barh(x_vals, linear_data, height=0.3)
# 水平方向指定宽度,横向方向的话指定高度
plt.barh(x_vals, quadratic_data, height=0.3, left=linear_data)
# 纵向叠加的话,放在左边
plt.show() | [
"shonsy1999@gmail.com"
] | shonsy1999@gmail.com |
e4542b8b5cbb4bef96ff785702f56111f3fe58f4 | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_internal/commands/help.py | 386278cf5f1461bd429c082c29df90b3b868a0cf | [] | no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | from __future__ import absolute_import
from pip._internal.basecommand import SUCCESS, Command
from pip._internal.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = "help"
usage = """
%prog <command>"""
summary = "Show help for commands."
ignore_require_venv = True
def run(self, options, args):
from pip._internal.commands import commands_dict, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(" - ".join(msg))
command = commands_dict[cmd_name]()
command.parser.print_help()
return SUCCESS
| [
"jan@multiply.ai"
] | jan@multiply.ai |
8825be2df6f21c80b55d7b6603c20ee0ec1bb009 | 4715fbe97b060345b71ac54cbf7e2e0d7ee62716 | /transmission/check_dropped_bits.py | c12eb0a3721c482f371395791aeb87bc2b1779e1 | [] | no_license | tiffong/SSLP | 00bf79353da85ca6e59c972d4925943849997800 | 1be715a36ade954b1ab5057560b3aa60ae679805 | refs/heads/master | 2020-03-27T02:30:39.054029 | 2018-09-05T00:19:11 | 2018-09-05T00:19:11 | 145,795,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | #used to format text block received from Serial transmission from RPI
#text block should have all headers included
#USAGE: python format_serial.py > edited.txt
from decimal import Decimal
import numpy as np
with open('Serial.txt', 'r') as f:
#with open('test2.txt', 'r') as f:
raw_img = f.readlines()
array = raw_img[0].split(".") #split by decimal
leading_removed = [s.lstrip("0") for s in array] #remove leading 0
leading_removed.pop() #remove new line character at end
#for serial
final_array = map(int, leading_removed) #convert to integer
#print(final_array)
#for testing
#raw_img = map(lambda s: s.strip(), raw_img)
#raw_img = map(float, raw_img)
#raw_img = map(int, raw_img)
#print(raw_img)
length = np.shape(final_array)[0]
#print(length)
sixty = np.arange(300,360)
#print(sixty)
headers = list() #contains indexes of all headers
#add these headers to a list
for header in range(300,360):
headers.append(final_array.index(header))
#print(headers)
#print(len(headers))
#put differences between consecutive headers in a list
differences = list()
for i in range(0,59):
differences.append(headers[i+1] - headers[i])
differences.append(length - headers[59])
#print(differences)
#print(len(differences))
#testing program by changing differences
# differences[0] = 231
# differences[9] = 239
# differences[10] = 240
#print(differences)
buf_size = 241
for i in range(0,60):
if differences[i] != buf_size:
print(i+300)
print(buf_size - differences[i])
| [
"tiffanyong96@yahoo.com"
] | tiffanyong96@yahoo.com |
ac620e83745a21e4d70d63b7b8d004828f651dd3 | 989611a3adf9410049ea21f685546695d424ce25 | /cygwin64/lib/python2.7/site-packages/chainer/functions/connection/dilated_convolution_2d.py | c5188ef0b1dac2e18ced07340c84e41af93e8c49 | [] | no_license | bopopescu/stdenv_cygwin | d58c42cea3a34f8e0c574539c7cbd4a6a0b2e412 | 2e31d41299bd0798cd12a3a04c77220fd9b2fc3e | refs/heads/master | 2021-09-22T13:27:54.120263 | 2018-06-01T05:30:03 | 2018-06-01T05:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,542 | py | import numpy
from six import moves
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
from chainer import variable
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
_bwd_filter_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT
_bwd_data_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class DilatedConvolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, dilate=1, cover_all=False,
requires_x_grad=True):
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.dy, self.dx = _pair(dilate)
self.cover_all = cover_all
self.requires_x_grad = requires_x_grad
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, numpy.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = numpy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, cuda.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph,
cover_all=self.cover_all, d=self.dy)
out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw,
cover_all=self.cover_all, d=self.dx)
y = cuda.cupy.zeros((n, out_c, out_h, out_w), dtype=x.dtype)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros((n, c, h + 2 * self.ph, w + 2 * self.pw),
dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
handle = cudnn.get_handle()
xji_desc = cudnn.create_tensor_descriptor(xji)
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(Wji)
self.conv_desc = cudnn.create_convolution_descriptor(
(0, 0), (self.sy, self.sx), xji.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, xji_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
workspace_size)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
libcudnn.convolutionForward(
handle, one.data, xji_desc.value, xji.data.ptr,
self.filter_desc.value, Wji.data.ptr,
self.conv_desc.value, algo, workspace.data.ptr,
workspace_size, one.data, y_desc.value, y.data.ptr)
if b is not None:
b = cuda.cupy.ascontiguousarray(b)
self.bias_desc = cudnn.create_tensor_descriptor(
b[None, :, None, None])
cudnn.add_tensor(
handle, one.data, self.bias_desc.value, b.data.ptr,
one.data, y_desc.value, y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = cuda.cupy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype,
copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = numpy.tensordot(W, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def backward_gpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
kh, kw = W.shape[2:]
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
gW = cuda.cupy.empty_like(W)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros(
(n, c, h + 2 * self.ph, w + 2 * self.pw), dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
out_sh = out_h + (out_h - 1) * (self.sy - 1)
out_sw = out_w + (out_w - 1) * (self.sx - 1)
gy_ph = (h + dkh - out_sh - 1) / 2
gy_pw = (w + dkw - out_sw - 1) / 2
pad_gy = cuda.cupy.zeros(
(n, out_c, h + dkh - 1, w + dkw - 1), dtype=x.dtype)
pad_gy[:, :,
gy_ph:gy_ph + out_sh:self.sy,
gy_pw:gy_pw + out_sw:self.sx] = gy
gx = None
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
gyji = cuda.cupy.ascontiguousarray(
pad_gy[:, :,
j * self.dy:j * self.dy + h,
i * self.dx:i * self.dx + w])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, -1::-1, -1::-1][:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
xji_desc = cudnn.create_tensor_descriptor(xji)
gy_desc = cudnn.create_tensor_descriptor(gy)
gyji_desc = cudnn.create_tensor_descriptor(gyji)
conv_desc_data = cudnn.create_convolution_descriptor(
(0, 0), (1, 1), xji.dtype)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
if self.requires_x_grad:
gx = cuda.cupy.zeros_like(x)
gWji = cuda.cupy.empty((out_c, c, 1, 1), dtype=W.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo_filter = (
libcudnn.getConvolutionBackwardFilterAlgorithm(
handle, xji_desc.value, gy_desc.value,
self.conv_desc.value,
self.filter_desc.value,
_bwd_filter_pref, workspace_size))
algo_data = (
libcudnn.getConvolutionBackwardDataAlgorithm(
handle, self.filter_desc.value,
gyji_desc.value, conv_desc_data.value,
x_desc.value, _bwd_data_pref,
workspace_size))
libcudnn.convolutionBackwardFilter_v3(
handle, one.data, xji_desc.value, xji.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
algo_filter, workspace.data.ptr, workspace_size,
zero.data, self.filter_desc.value, gWji.data.ptr)
if self.requires_x_grad:
libcudnn.convolutionBackwardData_v3(
handle, one.data, self.filter_desc.value,
Wji.data.ptr, gyji_desc.value,
gyji.data.ptr, conv_desc_data.value,
algo_data, workspace.data.ptr, workspace_size,
one.data, x_desc.value, gx.data.ptr)
gW[:, :, j:j + 1, i:i + 1] = gWji
if b is not None:
gb = cuda.cupy.empty_like(b)
libcudnn.convolutionBackwardBias(
handle, one.data, gy_desc.value, gy.data.ptr,
zero.data, self.bias_desc.value, gb.data.ptr)
else:
gW = cuda.cupy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype,
copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = cuda.cupy.tensordot(W, gy, (0, 1)).astype(x.dtype,
copy=False)
gcol = cuda.cupy.rollaxis(gcol, 3)
gx = conv.col2im_gpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is not None:
gb = gy.sum(axis=(0, 2, 3))
if b is None:
return gx, gW
else:
return gx, gW, gb
def dilated_convolution_2d(x, W, b=None, stride=1, pad=0, dilate=1,
cover_all=False):
"""Two-dimensional dilated convolution function.
This is an implementation of two-dimensional dilated convolution
in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
W (~chainer.Variable): Weight variable of shape
:math:`(c_O, c_I, k_H, k_W)`.
b (~chainer.Variable): Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
Returns:
~chainer.Variable: Output variable.
The two-dimensional dilated convolution function is defined as follows.
Then the ``DilatedConvolution2D`` function computes correlations
between filters and patches of size :math:`(k_H, k_W)` in ``x``.
Patches here are extracted at intervals of the dilation factor.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at intervals of the dilation factor and at positions
shifted by multiples of ``stride`` from the first position ``-pad`` for
each spatial axis. The right-most (or bottom-most) patches do not run over
the padded spatial size.
Let :math:`(s_Y, s_X)` be the stride of filter application,
:math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
the dilation factor of filter application. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso:: :class:`DilatedConvolution2D`
"""
requires_x_grad = isinstance(x, variable.Variable) and x.requires_grad
func = DilatedConvolution2DFunction(stride, pad, dilate, cover_all,
requires_x_grad)
if b is None:
return func(x, W)
else:
return func(x, W, b)
| [
"tokutaro@gmail.com"
] | tokutaro@gmail.com |
a1428e4eefbf42eb2695f06f650ab13a06bc4d22 | ea47779be5916366cbfc8318dac657a6a276eb7d | /cs231n/vis_utils.py | 9f6f4d62827b2b14e791480201bda50db658e29b | [] | no_license | rfrancisr/cs231n_assignment1 | 1cac8444fa91e0e9589c7258af96f020e59f29d9 | fe08b01f47c77dabfe626c1633ecd714f758b39e | refs/heads/master | 2021-01-22T06:23:20.768951 | 2017-05-26T21:14:30 | 2017-05-26T21:14:30 | 92,548,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,984 | py | #from past.builtins import xrange
from math import sqrt, ceil
import numpy as np
def visualize_grid(Xs, ubound=255.0, padding=1):
"""
Reshape a 4D tensor of image data to a grid for easy visualization.
Inputs:
- Xs: Data of shape (N, H, W, C)
- ubound: Output grid will have values scaled to the range [0, ubound]
- padding: The number of blank pixels between elements of the grid
"""
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in xrange(grid_size):
x0, x1 = 0, W
for x in xrange(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
# grid[y0:y1, x0:x1] = Xs[next_idx]
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
# grid_max = np.max(grid)
# grid_min = np.min(grid)
# grid = ubound * (grid - grid_min) / (grid_max - grid_min)
return grid
def vis_grid(Xs):
""" visualize a grid of images """
(N, H, W, C) = Xs.shape
A = int(ceil(sqrt(N)))
G = np.ones((A*H+A, A*W+A, C), Xs.dtype)
G *= np.min(Xs)
n = 0
for y in range(A):
for x in range(A):
if n < N:
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = Xs[n,:,:,:]
n += 1
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
def vis_nn(rows):
""" visualize array of arrays of images """
N = len(rows)
D = len(rows[0])
H,W,C = rows[0][0].shape
Xs = rows[0][0]
G = np.ones((N*H+N, D*W+D, C), Xs.dtype)
for y in range(N):
for x in range(D):
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = rows[y][x]
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
| [
"francis.r.ratsimbazafy@vanderbilt.edu"
] | francis.r.ratsimbazafy@vanderbilt.edu |
e569e80b4bbd1901ffe3be5dea6ab1da2fb06801 | 0b45ec591750e5b19b25c6cba402cef96fc67f5a | /ele/ele/wsgi.py | d6ace0a72516842294d0d04a7c2ccf8e01b89534 | [] | no_license | WangErGou/ele_interview | 4353b4057d346c4d38326936983808836480af2d | 14a0e94f9090074a07746720a789b843198395dc | refs/heads/master | 2021-01-10T14:33:00.318300 | 2016-02-29T14:56:50 | 2016-02-29T14:56:50 | 52,785,010 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for ele project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ele.settings")
application = get_wsgi_application()
| [
"wangyanjie_pwd@163.com"
] | wangyanjie_pwd@163.com |
3fecce31d145d2ab317f39dba9bf792930cf205f | 8223bd2598ea21d2b79e2b827436845891111dbe | /middle_store_03/zwb_book/views.py | 0d0e0c5a8e97ce8f32a40c86e7a6dcc6dd1cf638 | [] | no_license | zwb22/django_base | 00b4d6013f9d6415bfb644023f01d494f8c01e78 | c6730da675a6138497295e5a7ad4fdace044c772 | refs/heads/master | 2023-08-05T23:52:02.017367 | 2021-10-06T14:40:03 | 2021-10-06T14:40:03 | 412,833,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | from django.http import HttpResponse
from django.shortcuts import render
from zwb_book.models import BookInfo
def create_book(p_request):
p_book_info = BookInfo.objects.create(
book_name='zwb1',
pub_date='1996-09-12',
read_count=100,
)
return HttpResponse('0dw')
| [
"374457565@qq.com"
] | 374457565@qq.com |
629acf07b4351309d1d0571b0836c8efc2bb058a | 8bde6ba3d0f196666deb6e866165ebe14fc04aed | /article/admin.py | 39f9ff38743cc5350978f4ccd5e3bbaa84d749f0 | [] | no_license | sunyunzhuo/BlogDemo | 6e17d60d49cbf743441c0d9d31a316ec43b71947 | 2042a7f3048b8440addc92dc7da0baf255e5d451 | refs/heads/master | 2021-04-25T05:34:12.086170 | 2017-12-11T17:10:34 | 2017-12-11T17:10:34 | 113,886,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | from django.contrib import admin
# Register your models here.
from .models import Article, Category, Tag
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category)
admin.site.register(Tag)
| [
"594301777@qq.com"
] | 594301777@qq.com |
351f880928bc18b1a8b741ea485a2de9570df434 | 04161d452aaaacee847c8e2efe581baa9a06176a | /cards/views.py | ccb42c8e4944b91ae0321e57333c0844c712907e | [] | no_license | 18mward/MediaTracker | 1e90dae1d4dae839a1f6c4b7aff8e4623062c67c | fde5d12c07476984ab9d1376f62d1597d661227c | refs/heads/master | 2023-01-08T08:29:07.725545 | 2020-11-09T03:45:08 | 2020-11-09T03:45:08 | 302,162,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, DetailView
from .models import Card
from django.urls import reverse_lazy
class CardListView(LoginRequiredMixin, ListView):
model = Card
template_name = 'card_list.html'
login_url = 'login'
class MovieListView(LoginRequiredMixin, ListView):
model = Card
template_name = 'movie_list.html'
login_url = 'login'
class SeriesListView(LoginRequiredMixin, ListView):
model = Card
template_name = 'series_list.html'
login_url = 'login'
class WatchedListView(LoginRequiredMixin, ListView):
model = Card
template_name = 'watched_list.html'
login_url = 'login'
class CardDetailView(LoginRequiredMixin, DetailView):
model = Card
template_name = 'card_detial.html'
login_url = 'login'
| [
"matthew3.ward@umconnect.umt.edu"
] | matthew3.ward@umconnect.umt.edu |
ebd8ec9f0147117b70ed5717177f3771723b112d | 14f7e8be66488e3b7fb6db6ef48bb245d5850b96 | /users/admin.py | f4a665334a847213b87c41def5346b9fd0c712aa | [] | no_license | DavyBeer/PaperTradingApplication | 6e44e69f0660dee25e47cfd47f4d9b287916c510 | 1ff0c7b37b295ab85e063af797e52d04a85ba0bf | refs/heads/main | 2022-12-26T04:24:11.949295 | 2020-10-12T05:07:11 | 2020-10-12T05:07:11 | 301,445,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ['username', 'age', 'email', 'is_staff', ]
admin.site.register(CustomUser, CustomUserAdmin) | [
"steven1126@swbell.net"
] | steven1126@swbell.net |
2e603c2a7a66a964f1225b68ee468389f736c54f | 12cb1e0b81bf9d9a82dadc959ad206b01aea3cb0 | /data_processing/data_processing_ui/plot_config_ui.py | ff73abcbf235cecb6388929c6618060092c78ec5 | [
"MIT"
] | permissive | MadSciSoCool/PCI6289UiTaskConfig | df6f17dba7ece7c1ca7d02ca8d6c6ed8d77fca13 | 39052774ae0f519f3a9a50546ff0d9cf9081adb0 | refs/heads/master | 2020-04-12T11:04:54.450208 | 2019-06-28T12:48:18 | 2019-06-28T12:48:18 | 162,449,152 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | from PyQt5.QtWidgets import QGroupBox, QGridLayout, QCheckBox, QLabel, QRadioButton
from .input_widget import IntegerInputWidget
class PlotConfigUI(QGroupBox):
def __init__(self):
super().__init__("Plot Configurations")
self.initUI()
self.organization = "single_spectrum"
def initUI(self):
layout = QGridLayout()
self.auto = QCheckBox("Auto", self)
self.auto.setChecked(True)
self.left = IntegerInputWidget(self, "X axis left", 0, "Hz", 0, 250000, 1)
self.right = IntegerInputWidget(self, "X axis right", 0, "Hz", 0, 250000, 1)
self.left.communication.is_set.connect(self.check_validity)
self.right.communication.is_set.connect(self.check_validity)
single_spectrum = QRadioButton("Single Spectrum", self)
single_spectrum.setChecked(True)
single_spectrum.clicked.connect(self.single_spectrum_clicked)
contrast_periods = QRadioButton("Contrasting Periods", self)
contrast_periods.clicked.connect(self.contrast_periods_clicked)
contrast_channels = QRadioButton("Contrasting Channels", self)
contrast_channels.clicked.connect(self.contrast_channels_clicked)
layout.addWidget(QLabel("Spectrum Display Settings", self), 0, 0)
layout.addWidget(self.auto, 1, 0)
layout.addWidget(self.left, 1, 1)
layout.addWidget(self.right, 1, 2)
layout.addWidget(QLabel("Organize the spectrums by:"), 2, 0)
layout.addWidget(single_spectrum, 3, 0)
layout.addWidget(contrast_periods, 3, 1)
layout.addWidget(contrast_channels, 3, 2)
self.setLayout(layout)
self.show()
def single_spectrum_clicked(self):
self.organization = "single_spectrum"
def contrast_periods_clicked(self):
self.organization = "contrast_periods"
def contrast_channels_clicked(self):
self.organization = "contrast_channels"
def check_validity(self):
if self.right.value <= self.left.value:
high = self.left.value
low = self.right.value
self.left.set_value(low)
self.right.set_value(high)
| [
"expanjiahe@gmail.com"
] | expanjiahe@gmail.com |
6383c420b4d765598ded8fa8b7e09a41780ee859 | 5761eca23af5ad071a9b15e2052958f2c9de60c0 | /generated-stubs/allauth/socialaccount/providers/weixin/views.pyi | ab4087168efbf7f077d1dc53cf0dcb35eb434d7a | [] | no_license | d-kimuson/drf-iframe-token-example | 3ed68aa4463531f0bc416fa66d22ee2aaf72b199 | dd4a1ce8e38de9e2bf90455e3d0842a6760ce05b | refs/heads/master | 2023-03-16T13:52:45.596818 | 2021-03-09T22:09:49 | 2021-03-09T22:09:49 | 346,156,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | pyi | from .client import WeixinOAuth2Client as WeixinOAuth2Client
from .provider import WeixinProvider as WeixinProvider
from allauth.account import app_settings as app_settings
from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter as OAuth2Adapter, OAuth2CallbackView as OAuth2CallbackView, OAuth2LoginView as OAuth2LoginView
from allauth.utils import build_absolute_uri as build_absolute_uri
from typing import Any
class WeixinOAuth2Adapter(OAuth2Adapter):
provider_id: Any = ...
access_token_url: str = ...
profile_url: str = ...
@property
def authorize_url(self): ...
def complete_login(self, request: Any, app: Any, token: Any, **kwargs: Any): ...
class WeixinOAuth2ClientMixin:
def get_client(self, request: Any, app: Any): ...
class WeixinOAuth2LoginView(WeixinOAuth2ClientMixin, OAuth2LoginView): ...
class WeixinOAuth2CallbackView(WeixinOAuth2ClientMixin, OAuth2CallbackView): ...
oauth2_login: Any
oauth2_callback: Any
| [
"d-kimuson@gmail.com"
] | d-kimuson@gmail.com |
e197dfbf8faa71a3dcc6e6c779944012ad05b4b9 | 761f217364807e1cd536cc6eaeca8fb822934141 | /tests/song_test.py | 460b91a74b7ee4cfad5ffaba373cd15f27277291 | [] | no_license | duncancryan/karaoke | 74e02cfcc9f9fa7d9a4c3af3fc879fdbc6f6395c | 3fa48a578c87dae72bad767972f211d24763bfb2 | refs/heads/master | 2022-11-21T16:26:30.885952 | 2020-07-27T09:00:59 | 2020-07-27T09:00:59 | 282,766,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | import unittest
from classes.song import Song
class TestSong(unittest.TestCase):
def setUp(self):
self.song_01 = Song("Come on Eileen", "Dexys Midnight Runners")
self.song_02 = Song("Mr Blue Sky", "ELO")
self.song_03 = Song("Bat out of Hell", "Meatloaf")
self.song_04 = Song("You're so Vain", "Carly Simon")
self.song_05 = Song("Tubthumping", "Chumbawamba")
self.song_06 = Song("Accidentally in Love", "Counting Crows")
def test_song_has_name(self):
name = self.song_01.name
self.assertEqual("Come on Eileen", name)
def test_song_has_artist(self):
artist = self.song_02.artist
self.assertEqual("ELO", artist) | [
"duncancryan1@gmail.com"
] | duncancryan1@gmail.com |
5ac9b4d7308eaba4eff0b9657389f4c3652b5b94 | ebdeaa70f6e30abab03a1589bcdd56d1339151ef | /day14Python对象3/02-添加子类属性.py | e7ac08d531ca166a266198f0171a8931da24f600 | [] | no_license | gilgameshzzz/learn | 490d8eb408d064473fdbfa3f1f854c2f163a7ef6 | d476af77a6163ef4f273087582cbecd7f2ec15e6 | refs/heads/master | 2020-03-31T11:32:42.909453 | 2018-11-22T03:34:45 | 2018-11-22T03:34:45 | 152,181,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # Filename : 02-添加子类属性.py
# Date : 2018/8/2
"""
对象属性的继承:是通过继承init方法来继承的对象属性
给当前类添加对象属性:重写init方法,如果需要保留父类的对象属性,需要使用
super()去调用父类的init方法
多态:同一个事物有多种形态,子类继承父类的方法,可以对方法进行重写,
一个方法就有多种形态(多态的表现)
类的多态:继承产生多态
"""
class Person:
def __init__(self, name='', age=2):
self.name = name
self.age = age
class Staff(Person):
# init方法的参数:保证在创建对象的时候就可以给某些属性赋值
def __init__(self, name):
super().__init__(name)
self.salary = 0
if __name__ == '__main__':
s1 = Person()
s1.__init__('wd', 12)
print(s1.name, s1.age)
# 练习
"""
声明人类,有属性,名字、年龄、性别。身高
要求创建人的对象的时候可以给名字、性别、年龄赋初值
再创建学生类继承自人类,拥有人类的所有的属性,再添加学号、
成绩、电话属性
要求创建学生对象的时候可以给名字、年龄和电话赋初值
"""
class Human:
def __init__(self, name, age=0, sex='男'):
self.name = name
self.height = 0
self.age = age
self.sex = sex
class Student(Human):
def __init__(self, name, age, tel):
super().__init__(self, name, age)
self.score = 0
self.id_num = 0
self.tel = 13
| [
"619959856@qq.com"
] | 619959856@qq.com |
99af284f65cc44601dffa48fff1221c69d0cafdd | b402040954f13bbb8df5e2cb2f5ebafb4ed75f7a | /main/views.py | d7c0bd762af2b4961bf7af064383bbd60956c106 | [] | no_license | HalimaShanta/Shopping-Website | 6c744a6f219fa5744ffd3823742c852636b960ed | 2007cea9755b8f32493849bedd6af6c7cc9e91fd | refs/heads/main | 2023-08-04T23:12:24.547411 | 2021-09-09T21:10:37 | 2021-09-09T21:10:37 | 404,867,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,230 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import JsonResponse
from django.db.models import Avg
import json
import datetime
from .models import *
from .utils import cookieCart, cartData, guestOrder
# Create your views here.
def home(request):
data = cartData(request)
cartItems = data['cartItems']
review = Store.objects.all()
review_avg = review.aggregate(avg=Avg('rate'))
review_count = review.count()
print(review_avg)
img = Home.objects.all()
context = {'img':img, 'cartItems':cartItems,'review':review,'review_avg':review_avg,
'review_count':review_count}
return render(request, 'home.html', context)
def store(request,id):
product = Home.objects.filter(id=id)
productreview = Home.objects.get(id=id)
review = Store.objects.filter(product=productreview)
review_avg = review.aggregate(avg_rate=Avg('rate'))
review_count =review.count()
avg = review_avg['avg_rate']
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
# print(review_avg)
# print(review)
context = {'product':product,'review':review,'review_avg':review_avg,'review_count':review_count,'cartItems':cartItems,
'order':order,'items':items,'avg':review_avg}
return render(request, 'store.html', context)
def review(request):
if request.method == "GET":
prod_id = request.GET.get('prod_id')
product = Home.objects.get(id=prod_id)
comment = request.GET.get('comment')
rate = request.GET.get('rate')
# avg = request.GET.get('avg_rate')
avg_rate = request.GET.get('avg_rate')
user = request.user.customer
# reviews = Store.objects.filter(product=self).aggregate(average=Avg('rate'))
# rev = Store.objects.annonate(avg_rating=Avg('rate')).order_by('-avg_rating')
Store(user=user,product=product,comment=comment,rate=rate,avg=avg_rate).save()
return redirect('store',id=prod_id)
def fashion(request):
product = Home.objects.all()
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context={'product': product,'cartItems':cartItems,'order':order,'items':items}
return render(request, 'fashion.html',context)
def beauty(request):
product = Beauty.objects.all()
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context={'product': product,'cartItems':cartItems,'order':order,'items':items}
return render(request, 'beauty.html',context)
def accesories(request):
product = Access.objects.all()
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context={'product': product,'cartItems':cartItems,'order':order,'items':items}
return render(request, 'accesories.html',context)
def cart(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
# data = cookieCart(request)
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'cart.html', context)
def checkout(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'chekout.html', context)
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
print('Action:', action)
print('productId:', productId)
customer = request.user.customer
product = Home.objects.get(id=productId)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)
if action == 'add':
orderItem.quantity = (orderItem.quantity + 1)
elif action == 'remove':
orderItem.quantity = (orderItem.quantity - 1)
orderItem.save()
if orderItem.quantity <= 0:
orderItem.delete()
return JsonResponse('Item was added', safe=False)
def processOrder(request):
transaction_id = datetime.datetime.now().timestamp()
data = json.loads(request.body)
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
else:
customer , order = guestOrder(request, data)
total = float(data['form']['total'])
order.transaction_id = transaction_id
if total == float(order.get_cart_total):
order.complete = True
order.save()
if order.shipping == True:
ShippingAddress.objects.create(
customer=customer,
order=order,
address=data['shipping']['address'],
city=data['shipping']['city'],
state=data['shipping']['state'],
zipcode=data['shipping']['zipcode'],
)
return JsonResponse('Payment Complete', safe=False) | [
"halimashanta@gmail.com"
] | halimashanta@gmail.com |
243f97c81a0f8ae108c5b7557264ce26e6162555 | fd3836a6f08c25a421e1c4bc22cd22670336792f | /blog/models.py | 4ee0bb0395901c1cfdc1a9bdbced5c9317c60559 | [] | no_license | artsemm/artsemblog | 9d31a91517863a4a871d5f3a6d4e5ed586e49a3f | d7c0687ae5bbe7b2dfdbb0afdfa179da43abd341 | refs/heads/master | 2022-12-11T00:45:24.408802 | 2020-09-14T07:18:27 | 2020-09-14T07:18:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from django.db import models
from django.urls import reverse
from taggit.managers import TaggableManager
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=255)
category = models.CharField(max_length=255)
body = RichTextUploadingField()
created_on = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
slug = models.SlugField(null=False, unique=True)
tags = TaggableManager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("post_single", kwargs={'slug' : self.slug})
| [
"artyomsnow@gmail.com"
] | artyomsnow@gmail.com |
399a097469c714fa7e24cd73ba07c8856384fe38 | 6d1d909fba051e615b3c18baf30622c12e727a9f | /YoutubeDown.py | f11287973b16faff5a6808e0fb9de3829871d71e | [] | no_license | Elma-dev/YoutubeDown1 | f474aab194be199194f4f6beb534e3286151c93b | 0dec4d855f2773630e31941a6efce3f78aace3b3 | refs/heads/master | 2023-07-14T10:04:17.506027 | 2021-09-02T16:08:43 | 2021-09-02T16:08:43 | 402,473,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | def finish():
print("Download Finish.")
from pytube import YouTube
print("----------Download Youtube Videos----------",end="\n")
url=input("Enter Url Video:")
url=YouTube(url)
t=url.title
print(f'---------------//{t}//---------------')
type=input("Enter Type (video/audio):")
path=input("Enter saved Path(C:\....): ")
if type=="audio":
url.streams.get_audio_only("mp4").download(output_path=path)
url.register_on_complete_callback(print("Download Finish..."))
elif type=="video":
while True:
resolution=str(input("Enter resolution:"))
video=url.streams.filter(res=resolution+"p",type="video")
if len(video)!=0:
break
video[0].download(output_path=path)
print("Wait...")
url.register_on_complete_callback(print("Download Finish..."))
else:
print("Unknow Type!!")
| [
"elmajjodi.abdeljalil@gmail.com"
] | elmajjodi.abdeljalil@gmail.com |
33cd55806b05ab6854a02b54fdf4443fb469b298 | 15a8be522dbf52eb2ffff2e711cb2e3934691e27 | /Python/for_loops_exercises.py | 5f28e3c8eb59e167b1acafc4e329950e93247dbc | [] | no_license | BrookeKelly-coder/she_codes_python_ | 3598984253013391825e2a017ffa8b57b27fd5ac | eb69754fe68345cb8859995eb960b23bc10e0389 | refs/heads/main | 2023-06-30T14:22:00.934847 | 2021-08-07T07:10:44 | 2021-08-07T07:10:44 | 383,939,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | user_input = input("Please enter a number ")
for number in range(int(user_input)):
print(f"{user_input} * {number +1} = {int(user_input) * (number +1)}")
| [
"86648602+BrookeKelly-coder@users.noreply.github.com"
] | 86648602+BrookeKelly-coder@users.noreply.github.com |
0b9a5b6229bd58649e623ae40fa5521bcb5e90a1 | 96411232e8b78423cdade23c8eec05a4246caed4 | /levelupapi/models/game.py | 85796c0ed2c84c903d3db2a769020c1e8524fc32 | [] | no_license | DakotaLambert/levelUp | 9bee61352ebd0d6da7fada6c60ad1f5dc3864dc2 | 81f7a7422081a0815c4f659f1ab98c22581a282e | refs/heads/main | 2023-07-15T21:46:30.059287 | 2021-08-25T16:36:30 | 2021-08-25T16:36:30 | 392,073,546 | 0 | 0 | null | 2021-08-10T18:03:31 | 2021-08-02T19:40:48 | Python | UTF-8 | Python | false | false | 772 | py | from django.db import models
class Game(models.Model):
"""Game Model
Fields:
models (CharField): The name of the game
game_type (ForeignKey): The type of game
description (CharField): The description of the game
number_of_players (IntegerField): The max number of players of the game
maker (CharField): The company that made the game
"""
name = models.CharField(max_length=100)
game_type = models.ForeignKey("GameType", on_delete=models.CASCADE)
description = models.CharField(max_length=150)
number_of_players = models.IntegerField()
gamer = models.ForeignKey("Gamer", on_delete=models.CASCADE)
maker = models.CharField(max_length=50)
def __str__(self):
return self.name
| [
"dakotalambertbiz@gmail.com"
] | dakotalambertbiz@gmail.com |
d22558d47d0ab75d02028fd114e05c50d3c5dad2 | cacb324edc56d582e86c2e340a7ec33af69c08d8 | /SCRAPER/database_test.py | edbbb0cece9996a228b81bd9518061caabb4f87a | [] | no_license | twj25/FYP-Code | 2a84324f32eed483b8f08b64a66624bde2719b5b | a64e429c948ca1219fa25ce61e086747b18542d0 | refs/heads/master | 2023-04-21T20:44:36.522982 | 2021-05-07T13:59:03 | 2021-05-07T13:59:03 | 365,251,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import database_helper
con = database_helper.db_connect()
cur = con.cursor()
sample_id = database_helper.create_sample(con,1,2021,'5577','http://sirius.bu.edu/data/')
# cur.execute("SELECT id, place FROM places")
# results = cur.fetchall()
# for row in results:
# print(row)
| [
"tom.johnson22@btinternet.com"
] | tom.johnson22@btinternet.com |
aed4e08003da3e498eec93eece45b13402d3b72d | cfe048d0c8a8247c6fe9b5dbfaf0b170e26fb2e9 | /tests/test_format.py | 20ec78e45f9b2248f233ff4c39c927a16eb4e76a | [
"BSD-2-Clause"
] | permissive | Deepwalker/trafaret_schema | d7049d1b462619fb526970b88cc23443caf06c25 | c902440ef44fab2285df927450f49c5ec3eccc62 | refs/heads/master | 2020-12-30T16:02:45.302759 | 2018-07-05T10:39:59 | 2018-07-05T10:39:59 | 90,956,081 | 8 | 1 | BSD-2-Clause | 2018-12-22T15:47:44 | 2017-05-11T08:29:06 | Python | UTF-8 | Python | false | false | 2,790 | py | import datetime
import unittest
import pytest
import trafaret as t
import trafaret_schema
from trafaret_schema.format import (
parse_date,
parse_time,
)
def test_parse_date():
check = parse_date()
assert check('2012-01-01') == datetime.datetime(2012, 1, 1)
with pytest.raises(t.DataError):
check('not a date')
def test_parse_time():
parsed = parse_time('11:59')
assert parsed.tm_hour == 11
assert parsed.tm_min == 59
with pytest.raises(t.DataError):
parse_time('not a date')
class TestFormats(unittest.TestCase):
def test_email(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"email": {"format": "email"}
},
})
schema({'email': 'norfolk@inductries.co'})
def test_ipv4(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"ip": {"format": "ipv4"}
},
})
schema({'ip': '127.0.0.1'})
def test_ipv6(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"ip": {"format": "ipv6"}
},
})
schema({'ip': '::1'})
def test_datetime(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"datetime": {"format": "date-time"}
},
})
schema({'datetime': '2017-09-02T00:00:00.59Z'})
schema({'datetime': '2017-09-02T00:00:00.59+02:00'})
def test_date(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"datetime": {"format": "date"}
},
})
schema({'datetime': '2017-09-02'})
def test_phone(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"phone": {"format": "phone"}
},
})
schema({'phone': '+7 927 728 67 67'})
def test_time(self):
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"time": {"format": "time"}
},
})
schema({'time': '19:59'})
def test_reg_format(self):
register = trafaret_schema.Register()
register.reg_format('any_ip', t.IP)
schema = trafaret_schema.json_schema({
"type": "object",
"properties": {
"ip_addr": {"format": "any_ip"}
},
},
context=register,
)
schema({'ip_addr': '192.168.0.1'})
schema({'ip_addr': '::1'})
| [
"krivushinme@gmail.com"
] | krivushinme@gmail.com |
e118ca4b0b8f51c659d6486050b387d039295125 | ea1cb5891aa6cc4997c0fa842a317b5bfebb9f92 | /max-temperatures.py | 2046020d2178b4c55ae36b2990c2d1bfc90f7ced | [] | no_license | srjalan/SparkCourse | 4d4da2b207713c606df373711bb5ce32554e7fec | 6845b9e5d478ad50ee77ea14ba6ae89f882238cd | refs/heads/master | 2020-06-11T04:42:00.105913 | 2019-07-18T05:47:23 | 2019-07-18T05:47:23 | 193,852,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("MaxTemperatures")
sc = SparkContext(conf = conf)
def parseLine(line):
fields = line.split(',')
stationID = fields[0]
entryType = fields[2]
temperature = float(fields[3]) * 0.1 * (9.0 / 5.0) + 32.0
return (stationID, entryType, temperature)
lines = sc.textFile("data/1800.csv")
parsedLines = lines.map(parseLine)
minTemps = parsedLines.filter(lambda x: "TMAX" in x[1])
stationTemps = minTemps.map(lambda x: (x[0], x[2]))
minTemps = stationTemps.reduceByKey(lambda x, y: max(x,y))
results = minTemps.collect();
for result in results:
print(result[0] + "\t{:.2f}F".format(result[1]))
| [
"renjie.song@gatech.edu"
] | renjie.song@gatech.edu |
2d1003eb12e4578cbb09e2a2b23226e356bffd3e | 80c8d4e84f2ea188a375ff920a4adbd9edaed3a1 | /bigdata_study/pyflink1.x/pyflink_learn/examples/4_window/sink_monitor.py | f9435ee7aaed197828b8fafad6f66d9fa6cace97 | [
"MIT"
] | permissive | Birkid/penter | 3a4b67801d366db15ca887c31f545c8cda2b0766 | 0200f40c9d01a84c758ddcb6a9c84871d6f628c0 | refs/heads/master | 2023-08-22T14:05:43.106499 | 2021-10-20T07:10:10 | 2021-10-20T07:10:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | """
读取 kafka 的用户操作数据并打印
"""
from kafka import KafkaConsumer
from reprint import output
import json
topic = 'click_rank'
bootstrap_servers = ['localhost:9092']
group_id = 'group7'
consumer = KafkaConsumer(
topic, # topic的名称
group_id=group_id, # 指定此消费者实例属于的组名,可以不指定
bootstrap_servers=bootstrap_servers, # 指定kafka服务器
auto_offset_reset='latest', # 'smallest': 'earliest', 'largest': 'latest'
)
with output(output_type="list", initial_len=22, interval=0) as output_lines:
# 初始化打印行
output_lines[0] = '=== 男 ==='
output_lines[6] = '=== 女 ==='
for msg in consumer:
# 解析结果
data = json.loads(msg.value)
start_index = 1 if data['sex'] == '男' else 7
rank = json.loads('[' + data['top10'] + ']')
# 逐行打印
for i in range(5):
index = start_index + i
if i < len(rank):
name = list(rank[i].keys())[0]
value = list(rank[i].values())[0]
output_lines[index] = f'{name:6s} {value}'
else:
output_lines[index] = ''
| [
"350840291@qq.com"
] | 350840291@qq.com |
17fa411032a598da703a155c33a3b26331da15ed | 0a5e0cb2654407e89e2709a21b3dd8aa53cd1990 | /django-extjs-v1/settings.py | 7326ab9d9ea761e53fcbf7b99f74ff289c37c515 | [] | no_license | DarioGT/misEjemplos | 58ca669a0950c51815441f0fde3476fcab653ac2 | 40fce08fa3b44e943209dad8c790b72aad1a655d | refs/heads/master | 2021-01-13T02:44:30.955228 | 2013-08-16T02:30:43 | 2013-08-16T02:30:43 | 3,567,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,975 | py | # Django settings for PROTO project.
import os, sys
from django.conf.global_settings import STATIC_URL
PPATH = os.path.realpath(os.path.dirname(__file__)).replace('\\','/')
PPATHBASE = os.path.abspath(os.path.join( PPATH, os.pardir )).replace('\\','/')
# Para encontrar las globales
sys.path.append(PPATHBASE )
# Django settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Dario Gomez', 'dariogomezt@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': PPATH + '/db/django-skeleton.db',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
LANGUAGE_CODE = 'fr-CA'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
#DGT: Formateo de numeros
USE_THOUSAND_SEPARATOR = True
NUMBER_GROUPING = 1
#DECIMAL_SEPARATOR = '.'
#THOUSAND_SEPARATOR = ','
APPEND_SLASH = False
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
DATA_PATH = os.path.join(PPATH, 'data') # a central place where to store apps datas
HOST = 'http://www.djangoproject.com' # your real host
EMAIL_HOST = 'smtp.ulaval.ca'
#FORCE_LOGIN = False # first page is always login
#LOGIN_URL = '/apps/login'
# Django settings for easyintranet project.
MEDIA_ROOT = os.path.join(PPATH, 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/'
USE_DJANGO_JQUERY = True
# Additional locations of static files
STATIC_URL = '/static/'
STATIC_ROOT = PPATH + '/staticx'
STATICFILES_DIRS = (
PPATHBASE + '/ProtoLib/static',
PPATHBASE + '/ProtoLib/globale/admin/media',
PPATH + '/static',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = "z7jc&(scfm-c5lt-h#(m*epqis54tc)lxm=g+&5+ud$3w783dx"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages"
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# 'core.middleware.AJAXSimpleExceptionResponse.AJAXSimpleExceptionResponse',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'apps.main',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
ROOT_URLCONF = 'urls'
DEFAULT_VIEW = 'core.views.default'
| [
"dario@mint"
] | dario@mint |
ac7005419abb334ce4cd3857d287ab213ec756ed | 9960a85fe06338773488ae72a737b9ae80a8f84f | /python/tut2.py | d047cf09fbc2053d331c62d20f7e8da3bf66779a | [] | no_license | xmms2/xmms2-tutorial | 11e74a482be721e9a7a12ffc07fc1c3c4bda4fc1 | 8ff8c66d1cc2a90d583dc2d96490e626792c486a | refs/heads/master | 2020-12-24T13:52:17.106928 | 2011-09-09T17:52:46 | 2011-09-10T01:12:10 | 7,924,221 | 6 | 1 | null | 2018-12-06T00:22:02 | 2013-01-30T21:25:37 | C | UTF-8 | Python | false | false | 1,951 | py | #!/usr/bin/env python
# XMMS2 - X Music Multiplexer System
# Copyright (C) 2003-2006 XMMS2 Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# This file is a part of the XMMS2 client tutorial #2
# Here we will learn to retrieve results from a command
import xmmsclient
import os
import sys
"""
The first part of this program is
commented in tut1.py See that one for
instructions
"""
xmms = xmmsclient.XMMS("tutorial2")
try:
xmms.connect(os.getenv("XMMS_PATH"))
except IOError, detail:
print "Connection failed:", detail
sys.exit(1)
"""
Now we send a command that will return
a result. Let's find out which entry
is currently playing.
Note that this program has be run while
xmms2 is playing something, otherwise
XMMS.playback_current_id will return 0.
"""
result = xmms.playback_current_id()
"""
We are still doing sync operations, wait for the
answer and block.
"""
result.wait()
"""
Also this time we need to check for errors.
Errors can occur on all commands, but not signals
and broadcasts. We will talk about these later.
"""
if result.iserror():
print "playback current id returns error, %s" % result.get_error()
"""
Let's retrieve the value from the XMMSResult object.
You don't have to know what type of value is returned
in response to which command - simply call
XMMSResult.value()
In this case XMMS.playback_current_id will return a UINT
"""
id = result.value()
"""Print the value"""
print "Currently playing id is %d" % id
| [
"eleusis@xmms.org"
] | eleusis@xmms.org |
25a39bfe0961decc5e8a5665dfe83a66b05dbd27 | 18430833920b3193d2f26ed526ca8f6d7e3df4c8 | /src/notifications/context_processors.py | f80de60ee43e53ffde101052edf945953ac0c19e | [
"MIT"
] | permissive | providenz/phase | ed8b48ea51d4b359f8012e603b328adf13d5e535 | b0c46a5468eda6d4eae7b2b959c6210c8d1bbc60 | refs/heads/master | 2021-01-17T06:56:07.842719 | 2016-06-28T11:17:53 | 2016-06-28T11:17:53 | 47,676,508 | 0 | 0 | null | 2015-12-09T07:45:19 | 2015-12-09T07:45:18 | null | UTF-8 | Python | false | false | 991 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.conf import settings
from notifications.models import Notification
def notifications(request):
"""Fetches data required to render navigation menu.
The main menu contains the list of user categories to choose.
Here is the query to fetch them.
"""
user = getattr(request, 'user')
context = {}
if not isinstance(user, AnonymousUser):
qs = Notification.objects \
.filter(user=user) \
.order_by('-created_on')
notifications = list(qs[0:settings.DISPLAY_NOTIFICATION_COUNT])
if len(notifications) > 0:
has_new_notifications = (not notifications[0].seen)
else:
has_new_notifications = False
context.update({
'notifications': notifications,
'has_new_notifications': has_new_notifications,
})
return context
| [
"thibault@miximum.fr"
] | thibault@miximum.fr |
e270360c2e7314eb2a69a82872043984e52ce1b4 | 70ba2c6f45bf036cf8e2860003ee03ef2de7842c | /apps/registro_hora_extra/models.py | c2e70f1ef58c47a87e4baec3d3f2f58225e2e7a5 | [] | no_license | Izaiasjun1Dev/gestao_rh | b99d0ba767ad136ba596c8da388ec184e19b5aae | 29830e5d7e1eed5eec93548ee31b19a4c6d62797 | refs/heads/master | 2022-01-26T00:57:10.561760 | 2019-07-31T17:56:25 | 2019-07-31T17:56:25 | 199,683,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from django.db import models
class Registro_Hora_Extra(models.Model):
motivo = models.CharField(max_length=100)
def __str__(self):
return self.motivo | [
"solucaoprogramer@gmail.com"
] | solucaoprogramer@gmail.com |
aa868482ce1ae081171bcade437cedd55fa9e419 | 94b23a4376941549e48df743b39221a5b8355c5f | /group_agnostic_fairness/data_utils/uci_adult_input.py | f4d7ab2303073b91a8795717c32dd14eac160444 | [
"Apache-2.0"
] | permissive | TomFrederik/fact-ai | 6585a34879341b838b1a21935beda6a2a234704c | 34e1889db83e70d5ddde590ae67836c97c99f904 | refs/heads/main | 2023-03-24T23:39:20.827844 | 2021-03-21T15:06:16 | 2021-03-21T15:08:02 | 326,638,629 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,913 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Data reader for UCI adult dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow.compat.v1 as tf
from tensorflow.contrib import lookup as contrib_lookup
IPS_WITH_LABEL_TARGET_COLUMN_NAME = "IPS_example_weights_with_label"
IPS_WITHOUT_LABEL_TARGET_COLUMN_NAME = "IPS_example_weights_without_label"
SUBGROUP_TARGET_COLUMN_NAME = "subgroup"
class UCIAdultInput():
"""Data reader for UCI Adult dataset."""
def __init__(self,
dataset_base_dir,
train_file=None,
test_file=None):
"""Data reader for UCI Adult dataset.
Args:
dataset_base_dir: (string) directory path.
train_file: string list of training data paths.
test_file: string list of evaluation data paths.
dataset_base_sir must contain the following files in the dir:
- train.csv: comma separated training data without header.
Column order must match the order specified in self.feature_names.
- test.csv: comma separated training data without header.
Column order must match the order specified in self.feature_names.
- mean_std.json: json dictionary of the format feature_name: [mean, std]},
containing mean and std for numerical features. For example,
"hours-per-week": [40.437455852092995, 12.347428681731843],...}.
- vocabulary.json: json dictionary of the format {feature_name:
[feature_vocabulary]}, containing vocabulary for categorical features.
For example, {sex": ["Female", "Male"],...}.
- IPS_example_weights_with_label.json: json dictionary of the format
{subgroup_id : inverse_propensity_score,...}. For example,
{"0": 2.34, ...}.
- IPS_example_weights_without_label.json: json dictionary of the format
{subgroup_id : inverse_propensity_score,...}. For example,
{"0": 2.34, ...}.
"""
# pylint: disable=long-line,line-too-long
self._dataset_base_dir = dataset_base_dir
if train_file:
self._train_file = train_file
else:
self._train_file = ["{}/train.csv".format(self._dataset_base_dir)]
if test_file:
self._test_file = test_file
else:
self._test_file = ["{}/test.csv".format(self._dataset_base_dir)]
self._mean_std_file = "{}/mean_std.json".format(self._dataset_base_dir)
self._vocabulary_file = "{}/vocabulary.json".format(self._dataset_base_dir)
self._ips_with_label_file = "{}/IPS_example_weights_with_label.json".format(
self._dataset_base_dir)
self._ips_without_label_file = "{}/IPS_example_weights_without_label.json".format(self._dataset_base_dir)
# pylint: enable=long-line,line-too-long
self.feature_names = [
"age", "workclass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship", "race", "sex",
"capital-gain", "capital-loss", "hours-per-week", "native-country",
"income"]
self.RECORD_DEFAULTS = [[0.0], ["?"], [0.0], ["?"], [0.0], ["?"], ["?"], # pylint: disable=invalid-name
["?"], ["?"], ["?"], [0.0], [0.0], [0.0], ["?"],
["?"]]
# Initializing variable names specific to UCI Adult dataset input_fn
self.target_column_name = "income"
self.target_column_positive_value = ">50K"
self.sensitive_column_names = ["sex", "race"]
self.sensitive_column_values = ["Female", "Black"]
self.weight_column_name = "instance_weight"
def get_input_fn(self, mode, batch_size=128):
"""Gets input_fn for UCI census income data.
Args:
mode: The execution mode, as defined in tf.estimator.ModeKeys.
batch_size: An integer specifying batch_size.
Returns:
An input_fn.
"""
def _input_fn():
"""Input_fn for the dataset."""
if mode == tf.estimator.ModeKeys.TRAIN:
filename_queue = tf.train.string_input_producer(self._train_file)
elif mode == tf.estimator.ModeKeys.EVAL:
filename_queue = tf.train.string_input_producer(self._test_file)
# Extracts basic features and targets from filename_queue
features, targets = self.extract_features_and_targets(
filename_queue, batch_size)
# Adds subgroup information to targets. Used to plot metrics.
targets = self._add_subgroups_to_targets(features, targets)
# Adds ips_example_weights to targets
targets = self._add_ips_example_weights_to_targets(targets)
# Unused in robust_learning models. Adding it for min-diff approaches.
# Adding instance weight to features.
features[self.weight_column_name] = tf.ones_like(
targets[self.target_column_name], dtype=tf.float32)
return features, targets
return _input_fn
def extract_features_and_targets(self, filename_queue, batch_size):
"""Extracts features and targets from filename_queue."""
reader = tf.TextLineReader(skip_header_lines=1)
_, value = reader.read(filename_queue)
feature_list = tf.decode_csv(value, record_defaults=self.RECORD_DEFAULTS)
# Setting features dictionary.
features = dict(zip(self.feature_names, feature_list))
features = self._binarize_protected_features(features)
features = tf.train.batch(features, batch_size)
# Setting targets dictionary.
targets = {}
targets[self.target_column_name] = tf.reshape(
tf.cast(
tf.equal(
features.pop(self.target_column_name),
self.target_column_positive_value), tf.float32), [-1, 1])
return features, targets
def _binarize_protected_features(self, features):
"""Processes protected features and binarize them."""
for sensitive_column_name, sensitive_column_value in zip(
self.sensitive_column_names, self.sensitive_column_values):
features[sensitive_column_name] = tf.cast(
tf.equal(
features.pop(sensitive_column_name), sensitive_column_value),
tf.float32)
return features
def _add_subgroups_to_targets(self, features, targets):
"""Adds subgroup information to targets dictionary."""
for sensitive_column_name in self.sensitive_column_names:
targets[sensitive_column_name] = tf.reshape(
tf.identity(features[sensitive_column_name]), [-1, 1])
return targets
def _load_json_dict_into_hashtable(self, filename):
"""Load json dictionary into a HashTable."""
with tf.gfile.Open(filename, "r") as filename:
# pylint: disable=g-long-lambda
temp_dict = json.load(
filename,
object_hook=lambda d:
{int(k) if k.isdigit() else k: v for k, v in d.items()})
# pylint: enable=g-long-lambda
keys = list(temp_dict.keys())
values = [temp_dict[k] for k in keys]
feature_names_to_values = contrib_lookup.HashTable(
contrib_lookup.KeyValueTensorInitializer(
keys, values, key_dtype=tf.int64, value_dtype=tf.float32), -1)
return feature_names_to_values
def _add_ips_example_weights_to_targets(self, targets):
"""Add ips_example_weights to targets. Used in ips baseline model."""
# Add subgroup information to targets
target_subgroups = (targets[self.target_column_name],
targets[self.sensitive_column_names[1]],
targets[self.sensitive_column_names[0]])
targets[SUBGROUP_TARGET_COLUMN_NAME] = tf.map_fn(
lambda x: (2 * x[1]) + (1 * x[2]), target_subgroups, dtype=tf.float32)
# Load precomputed IPS weights into a HashTable.
ips_with_label_table = self._load_json_dict_into_hashtable(self._ips_with_label_file) # pylint: disable=line-too-long
ips_without_label_table = self._load_json_dict_into_hashtable(self._ips_without_label_file) # pylint: disable=line-too-long
# Adding IPS example weights to targets
# pylint: disable=g-long-lambda
targets[IPS_WITH_LABEL_TARGET_COLUMN_NAME] = tf.map_fn(
lambda x: ips_with_label_table.lookup(
tf.cast((4 * x[0]) + (2 * x[1]) + (1 * x[2]), dtype=tf.int64)),
target_subgroups,
dtype=tf.float32)
targets[IPS_WITHOUT_LABEL_TARGET_COLUMN_NAME] = tf.map_fn(
lambda x: ips_without_label_table.lookup(
tf.cast((2 * x[1]) + (1 * x[2]), dtype=tf.int64)),
target_subgroups,
dtype=tf.float32)
# pylint: enable=g-long-lambda
return targets
def get_feature_columns(self,
embedding_dimension=0,
include_sensitive_columns=True):
"""Return feature columns and weight_column_name for census data.
Categorical features are encoded as categorical columns with vocabulary list
(given by vocabulary in vocabulary_file), and saved as either a
embedding_column or indicator_column. All numerical columns are normalized
(given by mean and std in mean_std_file).
Args:
embedding_dimension: (int) dimension of the embedding column. If set to 0
a multi-hot representation using tf.feature_column.indicator_column is
created. If not, a representation using
tf.feature_column.embedding_column is created. Consider using
embedding_column if the number of buckets (unique values) are large.
include_sensitive_columns: boolean flag. If set, sensitive attributes are
included in feature_columns.
Returns:
feature_columns: list of feature_columns.
weight_column_name: (string) name of the weight column.
feature_names: list of feature_columns.
target_column_name: (string) name of the target variable column.
"""
# Load precomputed mean and standard deviation values for features.
with tf.gfile.Open(self._mean_std_file, "r") as mean_std_file:
mean_std_dict = json.load(mean_std_file)
with tf.gfile.Open(self._vocabulary_file, "r") as vocabulary_file:
vocab_dict = json.load(vocabulary_file)
feature_columns = []
for i in range(0, len(self.feature_names)):
if (self.feature_names[i] in [
self.weight_column_name, self.target_column_name
]):
continue
elif self.feature_names[i] in self.sensitive_column_names:
if include_sensitive_columns:
feature_columns.append(
tf.feature_column.numeric_column(self.feature_names[i]))
else:
continue
elif self.RECORD_DEFAULTS[i][0] == "?":
sparse_column = tf.feature_column.categorical_column_with_vocabulary_list(
self.feature_names[i], vocab_dict[self.feature_names[i]])
if embedding_dimension > 0:
feature_columns.append(
tf.feature_column.embedding_column(sparse_column,
embedding_dimension))
else:
feature_columns.append(
tf.feature_column.indicator_column(sparse_column))
else:
mean, std = mean_std_dict[self.feature_names[i]]
feature_columns.append(
tf.feature_column.numeric_column(
self.feature_names[i],
normalizer_fn=(lambda x, m=mean, s=std: (x - m) / s)))
return feature_columns, self.weight_column_name, self.sensitive_column_names, self.target_column_name
| [
"erik.jenner99@gmail.com"
] | erik.jenner99@gmail.com |
17305d5e5936550c6ceb2609d0076d06a975bbfc | e6da5a3210800cdfde59f3bbb7986ff3fc878598 | /src/test/isolation/specs/referential-integrity.spec | df74d4704e3b342b5da9118425b67dfd99a5f6b8 | [
"BSD-3-Clause",
"PostgreSQL",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | Tencent/TBase | b66f13583ce6cd02ee5d453e2ce5a3a61e8b8f13 | 7cf7f8afbcab7290538ad5e65893561710be3dfa | refs/heads/master | 2023-09-04T03:27:38.289238 | 2023-03-09T12:02:41 | 2023-03-09T12:18:46 | 220,177,733 | 1,433 | 283 | NOASSERTION | 2023-07-31T07:31:58 | 2019-11-07T07:34:03 | C | UTF-8 | Python | false | false | 854 | spec | # Referential Integrity test
#
# The assumption here is that the application code issuing the SELECT
# to test for the presence or absence of a related record would do the
# right thing -- this script doesn't include that logic.
#
# Any overlap between the transactions must cause a serialization failure.
setup
{
CREATE TABLE a (i int PRIMARY KEY);
}
setup
{
CREATE TABLE b (a_id int);
}
setup
{
INSERT INTO a VALUES (1);
}
teardown
{
DROP TABLE a, b;
}
session "s1"
setup { BEGIN ISOLATION LEVEL REPEATABLE READ; }
step "rx1" { SELECT i FROM a WHERE i = 1; }
step "wy1" { INSERT INTO b VALUES (1); }
step "c1" { COMMIT; }
session "s2"
setup { BEGIN ISOLATION LEVEL REPEATABLE READ; }
step "rx2" { SELECT i FROM a WHERE i = 1; }
step "ry2" { SELECT a_id FROM b WHERE a_id = 1; }
step "wx2" { DELETE FROM a WHERE i = 1; }
step "c2" { COMMIT; }
| [
"chenzaini@sina.com"
] | chenzaini@sina.com |
1fca4aed26bc77632bf5e34189a59250e7abcc60 | a5eeb9754449cbf884fc3bc44fc2f6f842e94a96 | /dialogweb/urls.py | 6d3d707eb60a1e5563c36662fefa3f72efff8351 | [] | no_license | namanana1007/dialogtask | 038af0391495d25d31518fc2ae18f29fd07fab24 | 0605c63406c8b483683669288f954a58a1444fec | refs/heads/master | 2020-05-31T06:50:48.422412 | 2019-06-04T07:42:40 | 2019-06-04T07:42:40 | 190,151,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | """HelloWorld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^xkr$', view.dia_response),
url(r'^$', view.chushihua)
]
| [
"xkr654321@163.com"
] | xkr654321@163.com |
112ccc43f55508cf6ee3643d5088e775dfb0a6d6 | 37cad5adb9c46b3ca41b5220b768606172e7649a | /areaofcircle - Copy.py | 2896890254e425c8f86f264ed477011ad4549505 | [] | no_license | nidjaj/python-basic-codes | 7c65a82c5c259a97148e97872c9d8799756bf489 | 0a9a0384f88dcc039fd14d69bc43657a304e273c | refs/heads/master | 2020-06-03T15:42:12.175219 | 2019-07-09T18:44:56 | 2019-07-09T18:44:56 | 191,634,515 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # calculate area of circle
r=input("enter radius of circle")
a=3.14*float(r)*float(r)
print("area of circle is",a)
| [
"noreply@github.com"
] | nidjaj.noreply@github.com |
db5fc913c50c24d9c3bb985ff8799f82103afce3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/454/usersdata/302/106280/submittedfiles/programa.py | ca76871b6b4cba21f2e253a7a5ef79930a322905 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | # -*- coding: utf-8 -*-
c = int(input('Digite o número de consultas:'))
for i in range(c):
c.append(int(input('Digite a consulta %d: %(i+1))))
print(c)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
20a5737230bac56977780a12595c131b8523268d | 9fa8c280571c099c5264960ab2e93255d20b3186 | /system/scientist/panel/control/stop_criterion/view.py | 76a6fe614d545726fcac47b3131dbcdefb304689 | [
"MIT"
] | permissive | thuchula6792/AutoOED | 8dc97191a758200dbd39cd850309b0250ac77cdb | 272d88be7ab617a58d3f241d10f4f9fd17b91cbc | refs/heads/master | 2023-07-23T16:06:13.820272 | 2021-09-08T14:22:18 | 2021-09-08T14:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,417 | py | import tkinter as tk
from system.gui.widgets.factory import create_widget
class StopCriterionView:
def __init__(self, root_view):
self.root_view = root_view
self.window = create_widget('toplevel', master=self.root_view.root_view.root, title='Stopping Criterion')
self.widget = {
'var': {},
'entry': {},
}
frame_options = create_widget('frame', master=self.window, row=0, column=0, padx=0, pady=0)
self.name_options = {'time': 'Time', 'n_iter': 'Number of iterations', 'n_sample': 'Number of samples', 'hv': 'Hypervolume value', 'hv_conv': 'Hypervolume convergence'}
def check(var, entry):
if var.get() == 1:
entry.enable()
else:
entry.disable()
frame_time = create_widget('frame', master=frame_options, row=0, column=0)
self.widget['var']['time'] = tk.IntVar()
cb_time = tk.Checkbutton(master=frame_time, variable=self.widget['var']['time'], highlightthickness=0, bd=0)
cb_time.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_time, text=self.name_options['time'] + ': stop after').grid(row=0, column=1, sticky='W')
self.widget['entry']['time'] = create_widget('entry', master=frame_time, row=0, column=2, class_type='float',
required=True, valid_check=lambda x: x > 0, error_msg='time limit must be positive', pady=0)
tk.Label(master=frame_time, text='seconds').grid(row=0, column=3, sticky='W')
cb_time.configure(command=lambda: check(self.widget['var']['time'], self.widget['entry']['time']))
frame_n_iter = create_widget('frame', master=frame_options, row=1, column=0)
self.widget['var']['n_iter'] = tk.IntVar()
cb_n_iter = tk.Checkbutton(master=frame_n_iter, variable=self.widget['var']['n_iter'], highlightthickness=0, bd=0)
cb_n_iter.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_n_iter, text=self.name_options['n_iter'] + ': stop after').grid(row=0, column=1, sticky='W')
self.widget['entry']['n_iter'] = create_widget('entry', master=frame_n_iter, row=0, column=2, class_type='int',
required=True, valid_check=lambda x: x > 0, error_msg='number of iterations must be positive', pady=0)
tk.Label(master=frame_n_iter, text='iterations').grid(row=0, column=3, sticky='W')
cb_n_iter.configure(command=lambda: check(self.widget['var']['n_iter'], self.widget['entry']['n_iter']))
frame_n_sample = create_widget('frame', master=frame_options, row=2, column=0)
self.widget['var']['n_sample'] = tk.IntVar()
cb_n_sample = tk.Checkbutton(master=frame_n_sample, variable=self.widget['var']['n_sample'], highlightthickness=0, bd=0)
cb_n_sample.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_n_sample, text=self.name_options['n_sample'] + ': stop when number of samples reaches').grid(row=0, column=1, sticky='W')
self.widget['entry']['n_sample'] = create_widget('entry', master=frame_n_sample, row=0, column=2, class_type='int',
required=True, valid_check=lambda x: x > 0, error_msg='number of samples must be positive', pady=0)
cb_n_sample.configure(command=lambda: check(self.widget['var']['n_sample'], self.widget['entry']['n_sample']))
frame_hv = create_widget('frame', master=frame_options, row=3, column=0)
self.widget['var']['hv'] = tk.IntVar()
cb_hv = tk.Checkbutton(master=frame_hv, variable=self.widget['var']['hv'], highlightthickness=0, bd=0)
cb_hv.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_hv, text=self.name_options['hv'] + ': stop when hypervolume reaches').grid(row=0, column=1, sticky='W')
self.widget['entry']['hv'] = create_widget('entry', master=frame_hv, row=0, column=2, class_type='float',
required=True, valid_check=lambda x: x > 0, error_msg='hypervolume value must be positive', pady=0)
cb_hv.configure(command=lambda: check(self.widget['var']['hv'], self.widget['entry']['hv']))
frame_hv_conv = create_widget('frame', master=frame_options, row=4, column=0)
self.widget['var']['hv_conv'] = tk.IntVar()
cb_hv_conv = tk.Checkbutton(master=frame_hv_conv, variable=self.widget['var']['hv_conv'], highlightthickness=0, bd=0)
cb_hv_conv.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_hv_conv, text=self.name_options['hv_conv'] + ': stop when hypervolume stops to improve over past').grid(row=0, column=1, sticky='W')
self.widget['entry']['hv_conv'] = create_widget('entry', master=frame_hv_conv, row=0, column=2, class_type='int',
required=True, valid_check=lambda x: x > 0, error_msg='number of iterations must be positive', pady=0)
tk.Label(master=frame_hv_conv, text='iterations').grid(row=0, column=3, sticky='W')
cb_hv_conv.configure(command=lambda: check(self.widget['var']['hv_conv'], self.widget['entry']['hv_conv']))
for key in self.name_options:
self.widget['entry'][key].disable()
frame_action = create_widget('frame', master=self.window, row=1, column=0, pady=0, sticky=None)
self.widget['save'] = create_widget('button', master=frame_action, row=0, column=0, text='Save')
self.widget['cancel'] = create_widget('button', master=frame_action, row=0, column=1, text='Cancel') | [
"yunsheng@mit.edu"
] | yunsheng@mit.edu |
014d013189fab069557c491555f1d503d5982ddb | 9fcb65c090de07be1ef6da0e9571b98810139557 | /kronos/utils.py | e834d9046f9902edb33dc0d349e043e0ca783f13 | [
"MIT"
] | permissive | sebastiandev/kronos | 2d136a37ccdb3390eeda73fa63eee3e909073773 | 24ec4f9d45024fec487a0b51331162f6a6cf767c | refs/heads/master | 2020-08-08T02:02:44.141924 | 2019-11-28T12:51:24 | 2019-11-28T12:51:24 | 213,670,090 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py |
def serializable_dict(d):
def _safe_value(value):
from decimal import Decimal
from datetime import date, datetime, time
if isinstance(value, Decimal):
_v = str(value)
elif isinstance(value, (datetime, date, time)):
_v = value.isoformat()
elif isinstance(value, dict):
_v = serializable_dict(value)
elif isinstance(value, (list, set, tuple)):
_v = [_safe_value(e) for e in value]
else:
_v = value
return _v
_d = {}
for k, v in d.items():
_d[k] = _safe_value(v)
return _d
| [
"devsebas@gmail.com"
] | devsebas@gmail.com |
0e1c84c3ad5515132006c028d0ce7d87bdfbc4e2 | c8c77f6cc6c032daf179ea2138e4dda5473b426b | /pinpoint-email/pinpoint_send_email_message_email_api.py | c607d8762534d68c7b98210c7dd0bc37ba9ccd58 | [] | no_license | arunmastermind/AWS-examples-using-BOTO3 | b411a6c96011ab58a66952a53fa2938cb58d5135 | e8390094374c10902bab016a21caba75ea179b5a | refs/heads/master | 2020-09-30T13:34:33.657621 | 2019-12-11T12:37:44 | 2019-12-11T12:37:44 | 227,297,211 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,698 | py | import boto3
from botocore.exceptions import ClientError
# The AWS Region that you want to use to send the email.
AWS_REGION = "us-west-2"
# The "From" address. This address has to be verified in
# Amazon Pinpoint in the region you're using to send email.
SENDER = "Mary Major <sender@example.com>"
# The addresses on the "To" line. If your Amazon Pinpoint account is in
# the sandbox, these addresses also have to be verified.
TOADDRESSES = ["recipient@example.com"]
# CC and BCC addresses. If your account is in the sandbox, these
# addresses have to be verified.
CCADDRESSES = ["cc_recipient1@example.com", "cc_recipient2@example.com"]
BCCADDRESSES = ["bcc_recipient@example.com"]
# The configuration set that you want to use to send the email.
CONFIGURATION_SET = "ConfigSet"
# The subject line of the email.
SUBJECT = "Amazon Pinpoint Test (SDK for Python)"
# The body of the email for recipients whose email clients don't support HTML
# content.
BODY_TEXT = """Amazon Pinpoint Test (SDK for Python)
-------------------------------------
This email was sent with Amazon Pinpoint using the AWS SDK for Python.
For more information, see https:#aws.amazon.com/sdk-for-python/
"""
# The body of the email for recipients whose email clients can display HTML
# content.
BODY_HTML = """<html>
<head></head>
<body>
<h1>Amazon Pinpoint Test (SDK for Python)</h1>
<p>This email was sent with
<a href='https:#aws.amazon.com/pinpoint/'>Amazon Pinpoint</a> using the
<a href='https:#aws.amazon.com/sdk-for-python/'>
AWS SDK for Python</a>.</p>
</body>
</html>
"""
# The message tags that you want to apply to the email.
TAG0 = {'Name': 'key0', 'Value': 'value0'}
TAG1 = {'Name': 'key1', 'Value': 'value1'}
# The character encoding that you want to use for the subject line and message
# body of the email.
CHARSET = "UTF-8"
# Create a new Pinpoint resource and specify a region.
client = boto3.client('pinpoint-email', region_name=AWS_REGION)
# Send the email.
try:
# Create a request to send the email. The request contains all of the
# message attributes and content that were defined earlier.
response = client.send_email(
FromEmailAddress=SENDER,
# An object that contains all of the email addresses that you want to
# send the message to. You can send a message to up to 50 recipients in
# a single call to the API.
Destination={
'ToAddresses': TOADDRESSES,
'CcAddresses': CCADDRESSES,
'BccAddresses': BCCADDRESSES
},
# The body of the email message.
Content={
# Create a new Simple message. If you need to include attachments,
# you should send a RawMessage instead.
'Simple': {
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
'Body': {
'Html': {
'Charset': CHARSET,
'Data': BODY_HTML
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
}
}
}
},
# The configuration set that you want to use when you send this message.
ConfigurationSetName=CONFIGURATION_SET,
EmailTags=[
TAG0,
TAG1
]
)
# Display an error if something goes wrong.
except ClientError as e:
print("The message wasn't sent. Error message: \"" + e.response['Error']['Message'] + "\"")
else:
print("Email sent!")
print("Message ID: " + response['MessageId']) | [
"arunmastermind.sci@gmail.com"
] | arunmastermind.sci@gmail.com |
89aadd7f9dd9e91da3e1da7db4d4e2395ffb8883 | 93b495b3624399c81b7edb39d1f6c5cebb2cd987 | /vyper/ast.py | 445bdf16b1ecb9ebfb855d44e98e25836353f5e9 | [
"Apache-2.0"
] | permissive | fubuloubu/vyper-redux | bf4b91d00290e5ed063ce74b44b740af6c3afae7 | a190c69083a968136ce10d1ceb68e42e41ff9de1 | refs/heads/master | 2020-12-20T16:29:44.390444 | 2020-01-25T07:53:23 | 2020-01-25T07:53:23 | 236,137,024 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,683 | py | import ast as py_ast
import inspect
import sys
from typing import (
Any,
Dict,
List,
Tuple,
Type,
Union,
)
import lark
import stringcase
class Ast(py_ast.AST):
_fields = ()
class Module(Ast):
_fields = ('methods',)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
self.methods, children = split_ast(children, Method)
self.variables, children = split_ast(children, Variable)
assert len(children) == 0, f"Did not save everything: {children}"
class Method(Ast):
_fields = (
'decorators',
'name',
'parameters',
'body',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
decorators_node, children = split_tree(children, "decorators")
assert len(decorators_node) <= 1, "Should not have more than 1 set of decorators"
self.decorators = decorators_node[0].children
method_type, children = split_tree(children, "method_type")
assert len(method_type) == 1, "Should not have more than 1 method_type"
method_type = convert_to_dict(method_type[0].children)
self.name = method_type['NAME']
self.parameters = method_type.get('parameters', None)
body, children = split_tree(children, "body")
assert len(body) == 1, "Should not have more than 1 body"
self.body = body[0].children
assert len(children) == 0, f"Did not save everything: {children}"
class Decorator(Ast):
_fields = (
'type',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
assert len(children) == 1
assert children[0].type == 'DECORATOR_NAME'
self.type = children[0].value
class Statement(Ast):
pass
class PassStmt(Statement):
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
pass # NOTE: Check later for only statement in body
class ExprStmt(Statement):
_fields = (
'assignment',
'expression',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
assert len(children) == 2
self.assignment = children[0]
self.expression = children[1]
class Var(Ast):
_fields = (
'name',
'type',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
properties = convert_to_dict(children)
self.name = properties['NAME']
self.type = properties.get('TYPE', None) # NOTE: Do not know type yet if none
class Variable(Ast):
_fields = (
'name',
'type',
'public',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
properties = convert_to_dict(children)
if 'with_getter' in properties.keys():
self.public = True
properties = properties['with_getter']
else:
self.public = False
self.name = properties['NAME']
self.type = get_type(properties)
class Parameter(Variable):
pass
class Attribute(Var):
_fields = (
'var',
'property',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
assert len(children) == 2
self.var = children[0]
properties = convert_to_dict(children[1])
self.property = properties['NAME']
def split_ast(
nodes: List[Ast],
ast_type: Type[Ast],
) -> Tuple[List[Ast], List[Ast]]:
selected = [n for n in nodes if isinstance(n, ast_type)]
others = [n for n in nodes if not isinstance(n, ast_type)]
return selected, others
def split_tree(
nodes: List[lark.Tree],
rule_type: str,
) -> Tuple[List[lark.Tree], List[lark.Tree]]:
selected = [n for n in nodes if n.data == rule_type]
others = [n for n in nodes if n.data != rule_type]
return selected, others
def convert_to_dict(
node: Union[List[Union[lark.Tree, lark.Token, Ast]], Union[lark.Tree, lark.Token, Ast]],
) -> Dict:
if isinstance(node, lark.Token):
return {node.type: node.value}
elif isinstance(node, lark.Tree):
return {node.data: convert_to_dict(node.children)}
elif isinstance(node, list):
obj = list()
for n in node:
attr = convert_to_dict(n)
obj.append(attr)
minified_obj = dict()
for item in obj:
if isinstance(item, dict) and all([k not in minified_obj.keys() for k in item.keys()]):
minified_obj.update(item)
else:
return obj # Give up an abort
return minified_obj
elif isinstance(node, Ast):
return node
else:
raise ValueError(f"Cannot convert {node}.")
def get_type(properties: Dict[str, Any]) -> str:
if 'storage' in properties.keys():
return get_type(properties['storage'])
if 'abi_type' in properties.keys():
return get_type(properties['abi_type'])
if 'memory' in properties.keys():
return get_type(properties['memory'])
if 'BASIC_TYPE' in properties.keys():
return properties['BASIC_TYPE']
raise ValueError(f"Could not process {properties}.")
def _get_ast_classes():
ast_classes = dict()
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Ast):
ast_classes[name] = obj
return ast_classes
AST_CLASSES = _get_ast_classes()
class AstConverter(lark.Transformer):
def __init__(self, *args, **kwargs):
for name, ast_class in _get_ast_classes().items():
# NOTE: Convention is for classnames to be CamalCase,
# but Lark rules are snake_case
setattr(self, stringcase.snakecase(name), ast_class)
super().__init__(*args, **kwargs)
class _CheckLarkConversionFailures(py_ast.NodeVisitor):
def visit(self, node):
node_class = node.__class__.__name__
for member_name in node._fields:
member = getattr(node, member_name)
if isinstance(member, (lark.Tree, lark.Token)):
raise ValueError(
f"Could not convert {member_name} in {node_class}: {member}"
)
if isinstance(member, list):
for item in member:
if isinstance(item, (lark.Tree, lark.Token)):
raise ValueError(
f"Could not convert {member_name} in {node_class}: {item}"
)
super().visit(node)
def ast_parse(parse_tree: lark.Tree) -> Ast:
ast = AstConverter().transform(parse_tree)
_CheckLarkConversionFailures().visit(ast)
return ast
| [
"fubuloubu@gmail.com"
] | fubuloubu@gmail.com |
76d531c0fb4fd4d379533d60ae77b3f0fcab52b5 | f8b04cc46a5d792e516e1627df78a5142879dd07 | /movie_project/app/home/forms.py | 5f67b3cd0f2a072d35471eca25ada4c23503a037 | [] | no_license | Meep-a/first | aa7611ce460498662cdb97671321345f47c615dd | 27fc94c7de28c8e1875cb6c9b31a061c6f11cd50 | refs/heads/master | 2020-07-05T07:23:20.064211 | 2019-09-28T15:24:41 | 2019-09-28T15:24:41 | 202,570,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,559 | py | # 表单
# coding:utf8
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, FileField, TextAreaField
from wtforms.validators import DataRequired, Email, Regexp, EqualTo, ValidationError
from app.models import User
class RegisterForm(FlaskForm):
'''
用户注册表单
'''
# 名称
name = StringField(
label="昵称",
validators=[
DataRequired('请输入昵称!')
],
description='昵称',
render_kw={
'id': "input_name",
'class': "form-control input-lg",
'placeholder': "请输入昵称!"
}
)
# 邮箱
email = StringField(
label='邮箱',
validators=[
DataRequired('请输入邮箱!'),
Email('邮箱格式不正确!')
],
description='邮箱',
render_kw={
'id': "input_email",
'class': "form-control input-lg",
'placeholder': "请输入邮箱!"
}
)
# 手机
phone = StringField(
label='手机',
validators=[
DataRequired('请输入手机!'),
Regexp("1[3589]\d{9}", message="手机格式不正确!")
],
description='手机',
render_kw={
'id': "input_phone",
'class': "form-control input-lg",
'placeholder': "请输入手机!"
}
)
pwd = PasswordField(
label="密码",
validators=[
DataRequired('请输入密码!')
],
description="密码",
render_kw={
'id': "input_password",
'class': "form-control input-lg",
'placeholder': "请输入密码!",
# 'required': "required",
}
)
repwd = PasswordField(
label="密码",
validators=[
DataRequired('请输入密码!'),
EqualTo('pwd', message="两次密码不一致!")
],
description="密码",
render_kw={
'id': "input_repassword",
'class': "form-control input-lg",
'placeholder': "请输入密码!",
# 'required': "required",
}
)
submit = SubmitField(
'注册',
render_kw={
"class": "btn btn-lg btn-success btn-block",
}
)
# 用户名验证
def validata_name(self, field):
name = field.data
user = User.query.filter_by(
name=name
).count()
if user == 1:
raise ValidationError("昵称已存在!")
# 邮箱验证
def validata_email(self, field):
email = field.data
user = User.query.filter_by(
email=email
).count()
if user == 1:
raise ValidationError("邮箱已存在!")
def validata_phone(self, field):
phone = field.data
user = User.query.filter_by(
phone=phone
).count()
if user == 1:
raise ValidationError("手机已存在!")
class LoginForm(FlaskForm):
# 名称
name = StringField(
label="账号/手机/邮箱",
validators=[
DataRequired('请输入账号!')
],
description='账号',
render_kw={
'id': "input_name",
'class': "form-control input-lg",
'placeholder': "请输入账号!"
}
)
pwd = PasswordField(
label="密码",
validators=[
DataRequired('请输入密码!'),
# EqualTo('pwd', message="两次密码不一致!")
],
description="密码",
render_kw={
'id': "input_repassword",
'class': "form-control input-lg",
'placeholder': "请输入密码!",
# 'required': "required",
}
)
submit = SubmitField(
'登录',
render_kw={
"class": "btn btn-lg btn-success btn-block",
}
)
class UserdateilForm(FlaskForm):
# 名称
name = StringField(
label="昵称",
validators=[
DataRequired('请输入昵称!')
],
description='昵称',
render_kw={
'id': "input_name",
'class': "form-control input-lg",
'placeholder': "请输入昵称!"
}
)
# 邮箱
email = StringField(
label='邮箱',
validators=[
DataRequired('请输入邮箱!'),
Email('邮箱格式不正确!')
],
description='邮箱',
render_kw={
'id': "input_email",
'class': "form-control input-lg",
'placeholder': "请输入邮箱!"
}
)
# 手机
phone = StringField(
label='手机',
validators=[
DataRequired('请输入手机!'),
Regexp("1[3589]\d{9}", message="手机格式不正确!")
],
description='手机',
render_kw={
'id': "input_phone",
'class': "form-control input-lg",
'placeholder': "请输入手机!"
}
)
face = FileField(
label='头像',
validators=[
DataRequired('请上传头像!'),
],
description="头像"
)
info = TextAreaField(
label="简介",
validators=[
DataRequired('请输入简介!')
],
description="简介",
render_kw={
'class': "form-control",
'rows': "10",
'id': "input_info"
}
)
submit = SubmitField(
'保存修改',
render_kw={
"class": "btn btn-success",
}
)
class UpdatePwdForm(FlaskForm):
oldpwd = PasswordField(
label="旧密码",
validators=[
DataRequired('请输入旧密码!'),
# EqualTo('pwd', message="两次密码不一致!")
],
description="旧密码",
render_kw={
'id': "input_oldpwd",
'class': "form-control",
'placeholder': "请输入旧密码!",
# 'required': "required",
}
)
newpwd = PasswordField(
label="新密码",
validators=[
DataRequired('请输入新密码!'),
# EqualTo('pwd', message="两次密码不一致!")
],
description="新密码",
render_kw={
'id': "input_newpwd",
'class': "form-control",
'placeholder': "请输入新密码!",
# 'required': "required",
}
)
submit = SubmitField(
'修改密码',
render_kw={
"class": "btn btn-success",
}
)
# # 旧密码验证
# def validate_old_pwd(self, field):
# from flask import session
# old_pwd = field.data
# name = session["user"]
# user = User.query.filter_by(name=name).first()
# if not user.check_pwd(old_pwd):
# raise ValidationError("旧密码错误!",'err')
class CommentForm(FlaskForm):
# 名称
comment = TextAreaField(
label="提交评论",
validators=[
DataRequired('请输入评论!')
],
description='评论',
render_kw={
'id': "input_content",
'placeholder': "请输入评论!"
}
)
submit = SubmitField(
'提交评论',
render_kw={
"class": "btn btn-success",
}
) | [
"dengdota2@sina.com"
] | dengdota2@sina.com |
d5384eb6d5db932b918839c6357b689034129fe1 | 27467c4fa7f75c01019e667be730a41026b7cb7d | /news/apps.py | 792353b3147eb090379f2624ca2625f05ede1a64 | [] | no_license | ProgMagnus/News-site-django-prototype | 647dfa92ad1d606f766d2a22174d0a924d88245e | 1ef860397d7399290cb75a0cc10476f995b5b48e | refs/heads/master | 2023-07-17T20:20:05.120624 | 2021-08-20T20:46:05 | 2021-08-20T20:46:05 | 398,384,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.apps import AppConfig
class NewsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'news'
verbose_name = 'My News'
| [
"teamofpandas@gmail.com"
] | teamofpandas@gmail.com |
2c9ec18ab1cc22af1abf040b041a1ca23db8dc47 | 5d056eba554c9c5d19687af8a95ff0db5b5f457b | /oneflow/python/test/tensor/test_tensor.py | a7a012dd1b5f99353e386841f18303c927b8de72 | [
"Apache-2.0"
] | permissive | wanghongsheng01/framework_cambricon | e96597f9b4ebfb6057bed0e13ce3a20a8baf07e2 | 187faaa2cb9ba995080ba22499b6219c2d36f0ac | refs/heads/master | 2023-07-02T04:03:18.827934 | 2021-07-26T08:56:01 | 2021-07-26T08:56:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,739 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import random
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
import oneflow.typing as oft
@flow.unittest.skip_unless_1n1d()
class TestTensor(flow.unittest.TestCase):
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_numpy_and_default_dtype(test_case):
shape = (2, 3, 4, 5)
tensor = flow.Tensor(*shape)
flow.nn.init.ones_(tensor)
test_case.assertTrue(tensor.dtype == flow.float32)
test_case.assertTrue(
np.array_equal(tensor.numpy(), np.ones(shape, dtype=np.float32))
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_copy_to_and_from_numpy(test_case):
np_arr = np.array([4, 6], dtype=np.float32)
tensor = flow.Tensor(np_arr, dtype=flow.float32)
test_case.assertTrue(np.array_equal(tensor.numpy(), np_arr))
test_case.assertEqual(np.float32, tensor.numpy().dtype)
np_arr = np.array([4, 6], dtype=np.int32)
tensor = flow.Tensor(np_arr, dtype=flow.int32)
test_case.assertTrue(np.array_equal(tensor.numpy(), np_arr))
test_case.assertEqual(np.int32, tensor.numpy().dtype)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_construct_from_numpy_or_list(test_case):
shape = (2, 3, 4, 5)
np_arr = np.random.rand(*shape).astype(np.float32)
tensor = flow.Tensor(np_arr)
test_case.assertTrue(np.array_equal(tensor.numpy(), np_arr))
# construct with contiguous numpy data
np_int_arr = np.random.randint(-100, high=100, size=shape, dtype=np.int32)
tensor = flow.Tensor(np_int_arr, dtype=flow.int32)
test_case.assertEqual(tensor.dtype, flow.int32)
test_case.assertTrue(np_arr.flags["C_CONTIGUOUS"])
test_case.assertTrue(np.array_equal(tensor.numpy(), np_int_arr))
# construct with not contiguous numpy data
np_arr = np.random.random((1, 256, 256, 3)).astype(np.float32)
np_arr = np_arr.transpose(0, 3, 1, 2)
tensor = flow.Tensor(np_arr)
test_case.assertFalse(np_arr.flags["C_CONTIGUOUS"])
test_case.assertTrue(np.array_equal(tensor.numpy(), np_arr))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_construct_from_another_tensor(test_case):
shape = (2, 3, 4, 5)
np_arr = np.random.rand(*shape).astype(np.float32)
tensor = flow.Tensor(np_arr)
output = flow.Tensor(tensor)
test_case.assertEqual(output.dtype, flow.float32)
test_case.assertTrue(np.array_equal(output.numpy(), np_arr))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_init_methods(test_case):
# test float dtype init
shape = (2, 3, 4, 5)
x = flow.Tensor(*shape)
np_ones = np.ones(x.shape)
np_zeros = np.zeros(x.shape)
random_fill_val = random.uniform(-100.0, 100.0)
x.fill_(random_fill_val)
test_case.assertTrue(np.allclose(x.numpy(), random_fill_val * np_ones))
flow.nn.init.ones_(x)
test_case.assertTrue(np.array_equal(x.numpy(), np_ones))
flow.nn.init.zeros_(x)
test_case.assertTrue(np.array_equal(x.numpy(), np_zeros))
flow.nn.init.constant_(x, random_fill_val)
test_case.assertTrue(np.allclose(x.numpy(), random_fill_val * np_ones))
z = flow.Tensor(5, 4, 3, 2)
flow.nn.init.kaiming_normal_(z, a=0.1, mode="fan_out", nonlinearity="relu")
flow.nn.init.kaiming_uniform_(z)
flow.nn.init.xavier_normal_(z)
flow.nn.init.xavier_uniform_(z)
# test int dtype init
x = flow.Tensor(*shape, dtype=flow.int32)
np_ones = np.ones(x.shape, dtype=np.int32)
np_zeros = np.zeros(x.shape, dtype=np.int32)
random_fill_val = random.randint(-100, 100)
x.fill_(random_fill_val)
test_case.assertTrue(np.allclose(x.numpy(), random_fill_val * np_ones))
flow.nn.init.ones_(x)
test_case.assertTrue(np.array_equal(x.numpy(), np_ones))
flow.nn.init.zeros_(x)
test_case.assertTrue(np.array_equal(x.numpy(), np_zeros))
flow.nn.init.constant_(x, random_fill_val)
test_case.assertTrue(np.allclose(x.numpy(), random_fill_val * np_ones))
test_case.assertEqual(flow.nn.init.calculate_gain("conv2d"), 1)
test_case.assertEqual(flow.nn.init.calculate_gain("tanh"), 5.0 / 3)
@unittest.skipIf(
True, "consistent_tensor doesn't work right now",
)
def test_creating_consistent_tensor(test_case):
shape = (2, 3)
x = flow.Tensor(*shape, placement=flow.placement("gpu", ["0:0"], None))
x.set_placement(flow.placement("cpu", ["0:0"], None))
x.set_is_consistent(True)
test_case.assertTrue(not x.is_cuda)
x.determine()
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_device(test_case):
shape = (2, 3, 4, 5)
x = flow.Tensor(*shape)
test_case.assertTrue(not x.is_cuda)
x = flow.Tensor(*shape, device=flow.device("cuda"))
test_case.assertTrue(x.is_cuda)
x = flow.Tensor(*shape, device=flow.device("cpu"))
test_case.assertTrue(not x.is_cuda)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_autograd_related_methods(test_case):
shape = (2, 3, 4, 5)
x = flow.Tensor(*shape)
y = flow.Tensor(*shape, requires_grad=True)
x.fill_(1.0)
y.fill_(2.0)
z = x + y
test_case.assertFalse(x.requires_grad)
test_case.assertTrue(x.is_leaf)
test_case.assertTrue(y.requires_grad)
test_case.assertTrue(y.is_leaf)
test_case.assertTrue(z.requires_grad)
test_case.assertFalse(z.is_leaf)
with flow.no_grad():
m = x + y
test_case.assertTrue(m.is_leaf)
test_case.assertFalse(m.requires_grad)
m.requires_grad = True
v = flow.Tensor(*shape, requires_grad=True)
z.retain_grad()
w = v + z
grad = flow.Tensor(*shape)
grad.fill_(1.0)
w.backward(gradient=grad, retain_graph=True)
test_case.assertNotEqual(v.grad, None)
test_case.assertNotEqual(y.grad, None)
test_case.assertNotEqual(z.grad, None)
test_case.assertIsNone(x.grad)
w.backward(gradient=grad, retain_graph=True)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_register_hook(test_case):
shape = (2, 3)
x = flow.Tensor(*shape, requires_grad=True)
x.register_hook(lambda grad: grad * 2 + 1)
y = x.sum() + (x * 2).sum()
y.backward()
test_case.assertTrue(np.array_equal(x.grad.numpy(), np.ones(shape) * 7))
x = flow.Tensor(*shape, requires_grad=True)
new_grad = flow.Tensor([[1, 2, 3], [4, 5, 6]])
x.register_hook(lambda _: new_grad)
y = x.sum() + (x * 2).sum()
y.backward()
test_case.assertTrue(np.array_equal(x.grad.numpy(), new_grad.numpy()))
grad_nonlocal = None
def assign_nonlocal_variable_and_return_none(grad):
nonlocal grad_nonlocal
grad_nonlocal = grad
x = flow.Tensor(*shape, requires_grad=True)
new_grad = flow.Tensor([[1, 2, 3], [4, 5, 6]])
x.register_hook(assign_nonlocal_variable_and_return_none)
y = x.sum() + (x * 2).sum()
y.backward()
test_case.assertTrue(np.array_equal(grad_nonlocal.numpy(), np.ones(shape) * 3))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_user_defined_data(test_case):
list_data = [5, 5]
tuple_data = (5, 5)
numpy_data = np.array((5, 5))
x = flow.Tensor(list_data)
y = flow.Tensor(tuple_data)
z = flow.Tensor(numpy_data)
test_case.assertTrue(np.array_equal(x.numpy(), 5 * np.ones(x.shape)))
test_case.assertTrue(np.array_equal(y.numpy(), 5 * np.ones(y.shape)))
test_case.assertTrue(np.array_equal(z.numpy(), 5 * np.ones(z.shape)))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_mirrored_tensor_and_op(test_case):
x1 = flow.Tensor([[1.0, 2.0]])
test_case.assertEqual(x1.dtype, flow.float32)
test_case.assertEqual(x1.shape, flow.Size((1, 2)))
x2 = flow.Tensor([[1.0], [2.0]])
# TODO(Liang Depeng): change to MatMul module
op = (
flow.builtin_op("matmul")
.Input("a")
.Input("b")
.Attr("transpose_a", False)
.Attr("transpose_b", False)
.Attr("alpha", float(1.0))
.Output("out")
.Build()
)
y = op(x1, x2)[0]
test_case.assertTrue(
np.array_equal(y.numpy(), np.array([[5.0]], dtype=np.float32))
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_to_list(test_case):
list_data = [[1.0, 3.0], [5.0, 6.0]]
input = flow.Tensor(list_data)
test_case.assertEqual(list_data, input.tolist())
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_nelement(test_case):
shape = (2, 3, 4)
input = flow.Tensor(*shape)
test_case.assertEqual(input.nelement(), 24)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_numel(test_case):
shape = (2, 3, 4, 5)
input = flow.Tensor(*shape)
test_case.assertEqual(input.numel(), 120)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_print(test_case):
shape = (2, 3, 4, 5)
input = flow.Tensor(*shape)
input_str = str(input)
test_case.assertTrue(input_str.startswith("tensor("))
test_case.assertTrue("device=" not in input_str)
gpu_input = flow.Tensor(*shape, device="cuda")
gpu_input_str = str(gpu_input)
test_case.assertTrue("device=" in gpu_input_str)
test_case.assertTrue("cuda:0" in gpu_input_str)
requires_grad_input = flow.Tensor(*shape, requires_grad=True)
requires_grad_input_str = str(requires_grad_input)
test_case.assertTrue("requires_grad=" in requires_grad_input_str)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_indexing(test_case):
class SliceExtracter:
def __getitem__(self, key):
return key
se = SliceExtracter()
def compare_getitem_with_numpy(tensor, slices):
np_arr = tensor.numpy()
test_case.assertTrue(np.array_equal(np_arr[slices], tensor[slices].numpy()))
def compare_setitem_with_numpy(tensor, slices, value):
np_arr = tensor.numpy()
if isinstance(value, flow.Tensor):
np_value = value.numpy()
else:
np_value = value
np_arr[slices] = np_value
tensor[slices] = value
test_case.assertTrue(np.array_equal(np_arr, tensor.numpy()))
x = flow.Tensor(5, 5)
v = flow.Tensor([[0, 1, 2, 3, 4]])
compare_getitem_with_numpy(x, se[-4:-1:2])
compare_getitem_with_numpy(x, se[-1:])
compare_setitem_with_numpy(x, se[-1:], v)
compare_setitem_with_numpy(x, se[2::2], 2)
x = flow.Tensor(2, 3, 4)
v = flow.Tensor(3)
compare_setitem_with_numpy(x, se[:, :, 2], v)
x = flow.Tensor(2, 3, 4)
compare_setitem_with_numpy(x, se[1, :, 2], v)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_div(test_case):
x = flow.Tensor(np.random.randn(1, 1))
y = flow.Tensor(np.random.randn(2, 3))
of_out = x / y
np_out = np.divide(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = x / 3
np_out = np.divide(x.numpy(), 3)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = 3 / x
np_out = np.divide(3, x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(1))
of_out = 3 / x
np_out = np.divide(3, x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_mul(test_case):
x = flow.Tensor(np.random.randn(1, 1))
y = flow.Tensor(np.random.randn(2, 3))
of_out = x * y
np_out = np.multiply(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = x * 3
np_out = np.multiply(x.numpy(), 3)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = 3 * x
np_out = np.multiply(3, x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_add_tensor_method(test_case):
x = flow.Tensor(np.random.randn(1, 1))
y = flow.Tensor(np.random.randn(2, 3))
of_out = x + y
np_out = np.add(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = x + 3
np_out = np.add(x.numpy(), 3)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = 3 + x
np_out = np.add(3, x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_sub_tensor_method(test_case):
x = flow.Tensor(np.random.randn(1, 1))
y = flow.Tensor(np.random.randn(2, 3))
of_out = x - y
np_out = np.subtract(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = x - 3
np_out = np.subtract(x.numpy(), 3)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
x = flow.Tensor(np.random.randn(2, 3))
of_out = 3 - x
np_out = np.subtract(3, x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_sum(test_case):
input = flow.Tensor(np.random.randn(4, 5, 6), dtype=flow.float32)
of_out = input.sum(dim=(2, 1))
np_out = np.sum(input.numpy(), axis=(2, 1))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_asinh(test_case):
input = flow.Tensor(np.random.randn(4, 5, 6), dtype=flow.float32)
of_out = input.asinh()
np_out = np.arcsinh(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_arcsinh(test_case):
input = flow.Tensor(np.random.randn(4, 5, 6), dtype=flow.float32)
of_out = input.arcsinh()
np_out = np.arcsinh(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_asin(test_case):
input = flow.Tensor(np.random.random((4, 5, 6)) - 0.5, dtype=flow.float32)
of_out = input.asin()
np_out = np.arcsin(input.numpy())
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_arcsin(test_case):
input = flow.Tensor(np.random.random((4, 5, 6)) - 0.5, dtype=flow.float32)
of_out = input.arcsin()
np_out = np.arcsin(input.numpy())
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_mean(test_case):
input = flow.Tensor(np.random.randn(2, 3), dtype=flow.float32)
of_out = input.mean(dim=0)
np_out = np.mean(input.numpy(), axis=0)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_neg(test_case):
input = flow.Tensor(np.random.randn(2, 3), dtype=flow.float32)
of_out = -input
np_out = -input.numpy()
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_negative(test_case):
input = flow.Tensor(np.random.randn(2, 3), dtype=flow.float32)
of_out = input.negative()
np_out = -input.numpy()
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_greater(test_case):
input1 = flow.Tensor(
np.array([1, 1, 4]).astype(np.float32), dtype=flow.float32,
)
input2 = flow.Tensor(
np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32,
)
of_out = input1.gt(input2)
np_out = np.greater(input1.numpy(), input2.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_less(test_case):
input1 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
input2 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = input1.lt(input2)
np_out = np.less(input1.numpy(), input2.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_slice(test_case):
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
input = flow.Tensor(x)
test_case.assertTrue(np.allclose(input[0].numpy(), x[0], 1e-5, 1e-5))
test_case.assertTrue(np.allclose(input[1].numpy(), x[1], 1e-5, 1e-5))
test_case.assertTrue(np.allclose(input[0, :].numpy(), x[0, :], 1e-5, 1e-5))
test_case.assertTrue(
np.allclose(input[0, :, 0:2].numpy(), x[0, :, 0:2], 1e-5, 1e-5)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_logical_slice_assign(test_case):
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
input = flow.Tensor(x)
input[:, 0] = 3.1415926
x[:, 0] = 3.1415926
test_case.assertTrue(np.allclose(input.numpy(), x, 1e-5, 1e-5))
input[:, 1:2] = 1
x[:, 1:2] = 1
test_case.assertTrue(np.allclose(input.numpy(), x, 1e-5, 1e-5))
input[:] = 1.234
x[:] = 1.234
test_case.assertTrue(np.allclose(input.numpy(), x, 1e-5, 1e-5))
input[0] = 0
x[0] = 0
test_case.assertTrue(np.allclose(input.numpy(), x, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_zeros_(test_case):
shape = (2, 3)
x = flow.Tensor(np.random.randn(*shape), dtype=flow.float32)
x.zeros_()
test_case.assertTrue(np.array_equal(x.numpy(), np.zeros(shape)))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_construct_small_tensor(test_case):
shape = (2, 3, 4, 5)
np_arr = np.random.rand(*shape).astype(np.float32)
tensor = flow.tensor(np_arr)
test_case.assertTrue(np.array_equal(tensor.numpy(), np_arr))
test_case.assertEqual(tensor.dtype, flow.float32)
np_int_arr = np.random.randint(-100, high=100, size=shape, dtype=np.int32)
tensor = flow.tensor(np_int_arr, dtype=flow.int32)
test_case.assertEqual(tensor.dtype, flow.int32)
list_data = [[1, 2.0], [5, 3]]
tensor = flow.tensor(list_data)
test_case.assertEqual(tensor.dtype, flow.float32)
test_case.assertTrue(
np.allclose(tensor.numpy(), np.array(list_data), 1e-4, 1e-4)
)
tuple_data = ((1, 2, 5), (4, 3, 10))
tensor = flow.tensor(tuple_data)
test_case.assertEqual(tensor.dtype, flow.int64)
test_case.assertTrue(np.array_equal(tensor.numpy(), np.array(tuple_data)))
scalar = 5.5
tensor = flow.tensor(scalar)
test_case.assertEqual(tensor.dtype, flow.float32)
test_case.assertTrue(np.allclose(tensor.numpy(), np.array(scalar), 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_floor(test_case):
input = flow.Tensor(np.random.randn(4, 5, 6), dtype=flow.float32)
of_out = input.floor()
np_out = np.floor(input.numpy())
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_round(test_case):
shape = (2, 3)
np_input = np.random.randn(*shape)
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = flow.round(of_input)
np_out = np.round(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np.zeros(shape), 1e-4, 1e-4)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_where(test_case):
x = flow.Tensor(
np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]),
dtype=flow.float32,
)
y = flow.Tensor(np.ones(shape=(3, 2)), dtype=flow.float32)
condition = flow.Tensor(np.array([[0, 1], [1, 0], [1, 0]]), dtype=flow.int32)
of_out = condition.where(x, y)
np_out = np.array([[1.0000, 0.3139], [0.3898, 1.0000], [0.0478, 1.0000]])
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_equal(test_case):
arr1 = np.random.randint(1, 10, size=(2, 3, 4, 5))
arr2 = np.random.randint(1, 10, size=(2, 3, 4, 5))
input = flow.Tensor(arr1, dtype=flow.float32)
other = flow.Tensor(arr2, dtype=flow.float32)
of_out = input.eq(other)
np_out = np.equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def _test_tensor_atan(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_out = of_input.atan()
np_out = np.arctan(np_input)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
of_out = of_out.sum()
of_out.backward()
np_out_grad = 1 / (1 + np_input ** 2)
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_out_grad, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def _test_tensor_arctan(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_out = of_input.arctan()
np_out = np.arctan(np_input)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
of_out = of_out.sum()
of_out.backward()
np_out_grad = 1 / (1 + np_input ** 2)
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_out_grad, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_detach(test_case):
shape = (2, 3, 4, 5)
x = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, requires_grad=True,
)
test_case.assertTrue(np.allclose(x.detach().numpy(), x.numpy(), 1e-4, 1e-4))
test_case.assertEqual(x.detach().requires_grad, False)
y = x * 2
z = y.detach()
test_case.assertEqual(z.is_leaf, True)
test_case.assertEqual(z.grad_fn, None)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_clamp_(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = input.clamp(0.1, 0.5)
np_out = np.clip(input.numpy(), 0.1, 0.5)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_clip_(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = input.clip(0.1, 0.5)
np_out = np.clip(input.numpy(), 0.1, 0.5)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def _test_cast_tensor_function(test_case):
shape = (2, 3, 4, 5)
np_arr = np.random.randn(*shape).astype(np.float32)
input = flow.Tensor(np_arr, dtype=flow.float32)
output = input.cast(flow.int8)
np_out = np_arr.astype(np.int8)
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def _test_sin_tensor_function(test_case, shape, device):
input = flow.Tensor(np.random.randn(2, 3, 4, 5))
of_out = input.sin()
np_out = np.sin(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_cos_tensor_function(test_case):
arr = np.random.randn(2, 3, 4, 5)
input = flow.Tensor(arr, dtype=flow.float32)
np_out = np.cos(arr)
of_out = input.cos()
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_std_tensor_function(test_case):
np_arr = np.random.randn(9, 8, 7, 6)
input = flow.Tensor(np_arr)
of_out = input.std(dim=1, keepdim=False)
np_out = np.std(np_arr, axis=1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_sqrt_tensor_function(test_case):
input_arr = np.random.rand(1, 6, 3, 8)
np_out = np.sqrt(input_arr)
x = flow.Tensor(input_arr)
of_out = x.sqrt()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_rsqrt_tensor_function(test_case):
np_arr = np.random.rand(3, 2, 5, 7)
np_out = 1 / np.sqrt(np_arr)
x = flow.Tensor(np_arr)
of_out = flow.rsqrt(input=x)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_square_tensor_function(test_case):
np_arr = np.random.randn(2, 7, 7, 3)
np_out = np.square(np_arr)
x = flow.Tensor(np_arr)
of_out = x.square()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_addmm_(test_case):
input = flow.Tensor(np.random.randn(2, 6), dtype=flow.float32)
mat1 = flow.Tensor(np.random.randn(2, 3), dtype=flow.float32)
mat2 = flow.Tensor(np.random.randn(3, 6), dtype=flow.float32)
of_out = input.addmm(mat1, mat2, alpha=1, beta=2)
np_out = np.add(2 * input.numpy(), 1 * np.matmul(mat1.numpy(), mat2.numpy()))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_norm_tensor_function(test_case):
input = flow.Tensor(
np.array([[-4.0, -3.0, -2.0], [-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]),
dtype=flow.float32,
)
of_out_1 = input.norm("fro")
np_out_1 = np.linalg.norm(input.numpy(), "fro")
of_out_2 = input.norm(2, dim=1)
np_out_2 = np.linalg.norm(input.numpy(), ord=2, axis=1)
of_out_3 = input.norm(float("inf"), dim=0, keepdim=True)
np_out_3 = np.linalg.norm(
input.numpy(), ord=float("inf"), axis=0, keepdims=True
)
test_case.assertTrue(np.allclose(of_out_1.numpy(), np_out_1, 1e-5, 1e-5))
test_case.assertTrue(np.allclose(of_out_2.numpy(), np_out_2, 1e-5, 1e-5))
test_case.assertTrue(np.allclose(of_out_3.numpy(), np_out_3, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_pow_tensor_function(test_case):
input = flow.Tensor(np.array([1, 2, 3, 4, 5, 6]), dtype=flow.float32)
of_out = input.pow(2.1)
np_out = np.power(input.numpy(), 2.1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out_magic = input ** 2.1
test_case.assertTrue(np.allclose(of_out_magic.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_atanh(test_case):
np_input = np.random.random((2, 3)) - 0.5
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.atanh()
np_out = np.arctanh(np_input)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4, equal_nan=True)
)
of_out = of_out.sum()
of_out.backward()
np_out_grad = 1.0 / (1.0 - np.square(np_input))
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_out_grad, 1e-4, 1e-4, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_arctanh(test_case):
np_input = np.random.random((2, 3)) - 0.5
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.arctanh()
np_out = np.arctanh(np_input)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4, equal_nan=True)
)
of_out = of_out.sum()
of_out.backward()
np_out_grad = 1.0 / (1.0 - np.square(np_input))
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_out_grad, 1e-4, 1e-4, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_tan(test_case):
np_input = np.random.random((2, 3)) - 0.5
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.tan()
np_out = np.tan(np_input)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4, equal_nan=True)
)
of_out = of_out.sum()
of_out.backward()
np_out_grad = 1 + np.square(np_out)
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_out_grad, 1e-4, 1e-4, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_acos(test_case):
input = flow.Tensor(np.random.rand(8, 11, 9, 7) - 0.5, requires_grad=True,)
of_out = input.acos()
np_out = np.arccos(input.numpy())
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
of_out = of_out.sum()
of_out.backward()
np_grad = -1.0 / np.sqrt(1 - np.square(input.numpy()))
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, 1e-4, 1e-4, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_ceil(test_case):
x = flow.Tensor(np.random.randn(2, 3), requires_grad=True)
of_out = x.ceil()
np_out = np.ceil(x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), np.zeros((2, 3)), 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_expm1(test_case):
x = flow.Tensor(np.random.randn(2, 3), requires_grad=True)
of_out = x.expm1()
np_out = np.expm1(x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), np.exp(x.numpy()), 1e-4, 1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_mish(test_case):
def np_mish(x):
f = 1 + np.exp(x)
y = x * ((f * f - 1) / (f * f + 1))
y_grad = (f * f - 1) / (f * f + 1) + x * (4 * f * (f - 1)) / (
(f * f + 1) * (f * f + 1)
)
return [y, y_grad]
np_input = np.random.randn(2, 4, 5, 6,)
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.mish()
np_out, np_grad = np_mish(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_triu(test_case):
def np_triu(x, diagonal):
y = np.triu(x, diagonal)
y_grad = np.triu(np.ones_like(x), diagonal)
return [y, y_grad]
diagonal_list = [2, -1]
for diagonal in diagonal_list:
np_input = np.random.randn(2, 4, 6)
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.triu(diagonal)
np_out, np_grad = np_triu(np_input, diagonal)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5)
)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | wanghongsheng01.noreply@github.com |
890fd3b5525d78b3bddbc5f55ff21303da111d0b | b4f203f487c0425fc2996079829715f48f06689b | /test2.py | 80d5b14539e4dddde2c4233542974efd336b1d4f | [] | no_license | seunggue/AI-school-project | 1232a6bb91a5e492289dcff1dec3c12d747e53a1 | ceaad7808a86b09062203845cab603fe71ea15c5 | refs/heads/master | 2022-12-12T08:37:51.067589 | 2020-08-30T16:33:08 | 2020-08-30T16:33:08 | 290,980,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | a = '12345'
print(a[:-3]) | [
"seungue1687@gmail.com"
] | seungue1687@gmail.com |
29f903ec82ceab6e23c6cd6d965b31ac87b9b753 | 31efbaab86fa00aec8be4539c10941cc825b1523 | /code/week2/08_for/8_3.py | d4119c522889f9653afd781d212a3e64b928fefd | [] | no_license | Aang1993/python_mathesis_course | 7eed0478e6ad023fd6b9a0da7a5701b6d696a68f | be5901d46230d803373577f46ea0dfff22512a14 | refs/heads/master | 2023-06-04T14:19:27.486748 | 2021-06-17T17:18:30 | 2021-06-17T17:18:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # mathesis.cup.gr
# N. Αβούρης: Εισαγωγή στην Python
# Μάθημα 8. Δομή for
# Άσκηση 8.3
#Να δημιουργήσετε μια λίστα με μέλη
#τα γράμματα μιας συμβολοσειράς
st = "καλή σας μέρα αρχόντες"
li = []
for ch in st:
if ch.isalpha():
li.append(ch)
print(li)
| [
"noreply@github.com"
] | Aang1993.noreply@github.com |
adcc7a4f456face62e0edc4a15503cb7ef48c86e | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/XjShRightSide/YW_GGQQ_QLFXJHA_086.py | 6a72b553056082e63709d5549fef6af05775698c | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/service")
from OptMainService import *
from OptQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/mysql")
from Opt_SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryOrderErrorMsg import queryOrderErrorMsg
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_GGQQ_QLFXJHA_086(xtp_test_case):
def setUp(self):
sql_transfer = Opt_SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_GGQQ_QLFXJHA_086')
clear_data_and_restart_sh()
Api.trade.Logout()
Api.trade.Login()
def test_YW_GGQQ_QLFXJHA_086(self):
title = '卖平(权利方平仓):限价-验资(可用资金刚好)(下单金额<费用&&可用资金=(费用-下单金额))'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('10001032', '1', '*', '1', '0', '*', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
logger.error('查询结果为False,错误原因: {0}'.format(
json.dumps(rs['测试错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_OPTION'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_CLOSE'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 2
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 4
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
a8d1a0375bce03ca9605bc955a1bef231c78a0bd | 0e638cd11c1ac64dcd1672936a1b0d7d545ee29f | /src/required_arg_after_optional_arg.py | f25f359628beedb42bfa80b56273c8056eba4778 | [] | no_license | simon-ritchie/python-novice-book | 2557d397a8f6025b63f3173c24bd4dcdb48aef8c | 1492adf603ba4dd1e9fadb48b74e49887c917dc6 | refs/heads/master | 2020-12-26T16:47:36.945581 | 2020-08-04T12:16:10 | 2020-08-04T12:16:10 | 237,569,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | def multiply_x_by_y(x=10, y):
multiplied_value = x * y
print(multiplied_value) | [
"antisocial.sid2@gmail.com"
] | antisocial.sid2@gmail.com |
8fd75ccc449a7c28615a4d003821d04a5a247f6c | 5f27addee7c9bf2708b28c1ea129e27df4f0d670 | /class-07/demo/testing-io/tests/test_guesser.py | 5cf24766498e9706476b33b87448b8466855367e | [] | no_license | abdalazzezzalsalahat/amman-python-401d3 | 04862498a95cac2ad686de70f9fc1a8dd0aaf7e3 | 2f6fbecd110ad06094768b63eabfa70294d2b919 | refs/heads/main | 2023-06-10T18:12:17.369983 | 2021-07-02T21:48:48 | 2021-07-02T21:48:48 | 363,807,895 | 0 | 1 | null | 2021-07-02T21:48:49 | 2021-05-03T03:37:34 | Jupyter Notebook | UTF-8 | Python | false | false | 3,465 | py | import builtins
import pytest
from testing_io.guesser import Guesser
def test_help():
def mock_print(*args, **kwargs):
actual = args[0]
expected = "Instantiate a Guesser then guess things"
assert actual == expected
# Setup
# Backup out original print function
old_print = builtins.print
# Swap real implementation for the mock
builtins.print = mock_print
guesser = Guesser()
guesser.help()
# Tear down
# Restore original print function
builtins.print = old_print
# expected = "Instantiate a Guesser then guess things"
# assert actual == expected
@pytest.mark.skip
def test_beginners_luck():
counter = 0
def mock_print(*args, **kwargs):
nonlocal counter
if counter == 0:
expected = "Step right up and guess my favorite color!"
else:
expected = "You got it!"
counter += 1
old_print("\n*********")
old_print(*args, **kwargs)
old_print("*********")
actual = args[0] # assignment in a test RHS SUB
assert actual == expected
def mock_input(*args, **kwargs):
return "green"
old_print = builtins.print
builtins.print = mock_print
old_input = builtins.input
builtins.input = mock_input
guesser = Guesser()
guesser.guess_fave_color()
builtins.print = old_print
builtins.input = old_input
@pytest.mark.skip
def test_third_times_the_charm():
prints = [
"Step right up and guess my favorite color!",
"Nope, that's not it.",
"Nope, that's not it.",
"You got it!",
]
prompts = [
"What is your guess?",
"What is your guess?",
"What is your guess?",
]
responses = [
"red",
"blue",
"green",
]
io_tester = IOTester(prints, prompts, responses)
guesser = Guesser()
guesser.guess_fave_color()
io_tester.exit()
@pytest.mark.skip
def test_guess_number():
prints = ["I am thinking of number between 1 and 5","Nope, try again"]
prompts = ["What number am I thinking of?"]
responses = ["1"]
IOTesterWithNums.test(prints, prompts, responses)
class IOTester():
def __init__(self, prints, prompts, responses):
self.prints = prints
self.prompts = prompts
self.responses = responses
# self.old_print = builtins.print
# self.old_input = builtins.input
# builtins.print = self.mock_print
# builtins.input = self.mock_input
print = self.mock_print
output = self.mock_input
def mock_print(self, *args, **kwargs):
expected = self.prints.pop(0)
actual = args[0]
assert actual == expected
def mock_input(self, *args, **kwargs):
expected = self.prompts.pop(0)
actual = args[0]
assert expected == actual
return self.responses.pop(0)
def exit(self):
# builtins.print = self.old_print
# builtins.input = self.old_input
assert self.prints == []
assert self.prompts == []
assert self.responses == []
class IOTesterWithNums(IOTester):
def mock_get_random_num(self):
return self.responses.pop(0)
@staticmethod
def test(prints, prompts, responses):
guesser = Guesser()
tester = IOTesterWithNums(prints, prompts, responses)
guesser.guess_random_number()
tester.exit()
| [
"dario.thornhill@gmail.com"
] | dario.thornhill@gmail.com |
6cd63baf5527151bb24780b8cd093d74cd6727e3 | 70f44c7e293a2f4a6ea7c1432410bb89bfd93591 | /main.py | 8cb07e58aa82fadb2ba022d2da5d6c861f1f959c | [] | no_license | leetschau/fuel-efficiency-analysis | 2d7785167100efe324e569de6f5929b86e21e1c8 | d2249d8dcb07c4704b801f2c664a3637445c7f77 | refs/heads/master | 2021-01-22T21:18:03.565016 | 2017-09-07T07:33:09 | 2017-09-07T07:33:09 | 100,671,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,046 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 16 12:13:11 2017
@author: leo
This script is the implementation of chapter 7 "Driving Visual Analyses with Automobile Data"
of "Practical Data Science Cookbook" by Tony Ojeda, etc.
"""
#%% Section Analyzing automobile fuel efficiency over time with Python
# -------------------------------------------
import pandas as pd
import numpy as np
vehicles = pd.read_csv("vehicles.csv")
column_names = vehicles.columns.values
column_names[[22, 23, 70, 71, 72, 73]]
len(vehicles)
len(vehicles.columns)
vehicles.columns
len(pd.unique(vehicles.year))
min(vehicles.year)
max(vehicles.year)
pd.value_counts(vehicles.fuelType1)
pd.value_counts(vehicles.trany)
vehicles["trany2"] = vehicles.trany.str[0]
pd.value_counts(vehicles.trany2)
#%% step 1 ~ 4 on Page 202
from ggplot import ggplot, aes, geom_point, xlab, ylab, ggtitle
grouped = vehicles.groupby("year")
averaged = grouped['comb08', 'highway08', 'city08'].agg([np.mean])
averaged.columns = ['comb08_mean', 'highway08_mean', 'city08_mean']
averaged['year'] = averaged.index
print(ggplot(averaged, aes('year', 'comb08_mean')) +
geom_point(color='steelblue') +
xlab('Year') +
ylab('Average MPG') +
ggtitle('All cars'))
#%% step 5
criteria1 = vehicles.fuelType1.isin(['Regular Gasoline', 'Prenium Gasoline', 'Midgrade Gasoline'])
criteria2 = vehicles.fuelType2.isnull()
criteria3 = vehicles.atvType != 'Hybrid'
vehicles_non_hybrid = vehicles[criteria1 & criteria2 & criteria3]
len(vehicles_non_hybrid)
#%% step 6
grouped = vehicles_non_hybrid.groupby(['year'])
averaged = grouped['comb08'].agg([np.mean])
print(averaged)
#%% step 7 ~ 9
pd.unique(vehicles_non_hybrid.displ)
criteria = vehicles_non_hybrid.displ.notnull()
vehicles_non_hybrid = vehicles_non_hybrid[criteria]
vehicles_non_hybrid.displ = vehicles_non_hybrid.displ.astype('float')
criteria = vehicles_non_hybrid.comb08.notnull()
vehicles_non_hybrid = vehicles_non_hybrid[criteria]
vehicles_non_hybrid.comb08 = vehicles_non_hybrid.comb08.astype('float')
print(ggplot(vehicles_non_hybrid, aes('displ', 'comb08')) +
geom_point(color='steelblue') +
xlab('Engine Displacement') +
ylab('Average MPG') +
ggtitle('Gasoline cars'))
#%% step 10
grouped_by_year = vehicles_non_hybrid.groupby(['year'])
avg_grouped_by_year = grouped_by_year['displ', 'comb08'].agg([np.mean])
#%% step 11
avg_grouped_by_year['year'] = avg_grouped_by_year.index
melted_avg_grouped_by_year = pd.melt(avg_grouped_by_year, id_vars='year')
from ggplot import facet_wrap
p = ggplot(aes(x='year', y='value', color='variable_0'), data=melted_avg_grouped_by_year)
p + geom_point() + facet_wrap('variable_0')
#%% Section Investigating the makes and models of automobiles with Python
# ------ step 1, 2 ------------------
pd.unique(vehicles_non_hybrid.cylinders)
vehicles_non_hybrid.cylinders = vehicles_non_hybrid.cylinders.astype('float')
pd.unique(vehicles_non_hybrid.cylinders)
vehicles_non_hybrid_4 = vehicles_non_hybrid[(vehicles_non_hybrid.cylinders == 4.0)]
#%% step 3
import matplotlib.pyplot as plt
%matplotlib inline
grouped_by_year_4_cylinder = vehicles_non_hybrid_4.groupby(['year']).make.nunique()
fig = grouped_by_year_4_cylinder.plot()
fig.set_xlabel('Year')
fig.set_ylabel('Number of 4-Cylinder Makes')
print(fig)
#%% step 4
grouped_by_year_4_cylinder = vehicles_non_hybrid_4.groupby(['year'])
unique_makes = []
for name, group in grouped_by_year_4_cylinder:
unique_makes.append(set(pd.unique(group['make'])))
unique_makes = reduce(set.intersection, unique_makes)
print(unique_makes)
#%% step 5, 6, 7
boolean_mask = []
for index, row in vehicles_non_hybrid_4.iterrows():
make = row['make']
boolean_mask.append(make in unique_makes)
df_common_makes = vehicles_non_hybrid_4[boolean_mask]
df_common_makes_grouped = df_common_makes.groupby(['year', 'make']).agg(np.mean).reset_index()
from ggplot import geom_line
ggplot(aes(x='year', y='comb08'), data = df_common_makes_grouped) + geom_line() + facet_wrap('make')
| [
"leetschau@gmail.com"
] | leetschau@gmail.com |
150ca0fe6a8507a3b8e281a64d08bea75404782f | 998c0b261c73bf27f059fa8604859548b6153257 | /smartPython/moveController.py | 252ce31f3bcd788fea43d2f8703e62f2b0840145 | [] | no_license | kwarnello/Smart-Python | 1ee4a87fddf5b856083c8110d88757bea30c9ed2 | 558be0528b434a3dcd46228a30ad23d0c99a3f07 | refs/heads/master | 2021-07-14T04:40:12.564507 | 2021-03-05T07:47:16 | 2021-03-05T07:47:16 | 237,638,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | '''
Created on 2 lut 2020
@author: Warus
'''
import keyboard
class Controller(object):
'''
classdocs
'''
def __init__(self, main):
'''
Constructor
'''
self.main = main
self.gameStartsInitialize()
def gameStartsInitialize(self):
'''
Initialize keyboard inputs after "New game" button is pressed
'''
keyboard.add_hotkey('w', self.main.snake.changeVelocity, args=(0, -1))
keyboard.add_hotkey('s', self.main.snake.changeVelocity, args=(0, 1))
keyboard.add_hotkey('a', self.main.snake.changeVelocity, args=(-1, 0))
keyboard.add_hotkey('d', self.main.snake.changeVelocity, args=(1, 0))
| [
"kwarnello@gmail.com"
] | kwarnello@gmail.com |
d8eeec8f2b5d006a2340b0046d783276cfbedc89 | 7d60f4855b0363d3546fac49bf3579533c316ea9 | /portal/model/dbperson.py | b1254517051190d75afd90ed60174cbe36b70971 | [] | no_license | slimjim777/Portal | 41baa28f8b575cfc75f1778ebac0fccf17be26bd | b08c095ead11b594f9c279a15ddfeb515b528674 | refs/heads/master | 2020-04-24T19:27:25.206112 | 2015-01-22T20:38:34 | 2015-01-22T20:38:34 | 13,887,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,045 | py | import datetime
from flask import session
from portal.model.models import Database
from portal import app
class Person(Database):
def family_upsert(self, record):
"""
Update or insert the provided family record.
"""
if 'familyid' not in record:
return False
# Try updating the record and get the rowcount to see if it worked
sql_update = """
UPDATE family
SET name=%(name)s, tagnumber=%(tagnumber)s, territory=%(territory)s
WHERE externalid=%(externalid)s"""
self.cursor.execute(sql_update, record)
if self.cursor.rowcount > 0:
# Updated an existing record
self.sqlconn.commit()
else:
# Create a new family record
sql_insert = """
INSERT INTO family
VALUES (DEFAULT,%(name)s,%(tagnumber)s,%(territory)s,%(externalid)s
)"""
self.cursor.execute(sql_insert, record)
self.sqlconn.commit()
return True
def update(self, record):
"""
Update a person record using key-value.
"""
valid_fields = ['partner', 'key_leader']
sql_update = "UPDATE person SET "
sql_set = []
for f in valid_fields:
if f in record:
sql_set.append(f + '=%(' + f + ')s')
sql_where = " WHERE externalid=%(externalid)s"
self.cursor.execute(sql_update + ','.join(sql_set) + sql_where, record)
self.sqlconn.commit()
return {'response': 'Success'}
def person_upsert(self, record):
"""
Update or insert the provided person record.
"""
if 'personid' not in record:
return False
# Try updating the record and get the rowcount to see if it worked
sql_update = """
UPDATE person
SET name=%(name)s, family_tag=%(family_tag)s, tagnumber=%(tagnumber)s,
type=%(type)s, kids_group=%(kids_group)s, kids_team=%(kids_team)s,
school_year=%(school_year)s, dob=%(dob)s, medical_info=%(medical_info)s,
medical_notes=%(medical_notes)s,territory=%(territory)s,
firstname=%(firstname)s, gender=%(gender)s,
marital_status=%(marital_status)s, lifegroup=%(lifegroup)s, address1=%(address1)s,
address2=%(address2)s, city=%(city)s, postcode=%(postcode)s, country=%(country)s,
home_phone=%(home_phone)s, mobile_phone=%(mobile_phone)s, email=%(email)s,
baptised=%(baptised)s, salvation=%(salvation)s, partner=%(partner)s,
key_leader=%(key_leader)s
WHERE externalid=%(externalid)s
"""
self.cursor.execute(sql_update, record)
if self.cursor.rowcount > 0:
# Updated an existing record
self.sqlconn.commit()
else:
# Create a new person record (field order must match the table)
sql_insert = """
INSERT INTO person
VALUES (DEFAULT,%(name)s,%(family_tag)s,%(tagnumber)s,%(type)s,
%(kids_group)s,%(kids_team)s,%(school_year)s,%(dob)s,
%(medical_info)s,%(medical_notes)s,%(territory)s,%(firstname)s,%(gender)s,
%(marital_status)s,%(lifegroup)s,%(address1)s,%(address2)s,%(city)s,
%(postcode)s,%(country)s,%(home_phone)s,%(mobile_phone)s,%(email)s,
%(baptised)s,%(salvation)s,%(partner)s,%(key_leader)s,%(externalid)s
)"""
self.cursor.execute(sql_insert, record)
self.sqlconn.commit()
# Update the group memberships
if 'team_serving' in record:
self.group_membership_update(record['personid'], record['team_serving'])
return True
def group_membership_update(self, personid, group_names):
"""
Update the group membership for the person by deleting the existing ones
and adding the current memberships.
"""
# Delete existing memberships
sql_delete = 'delete from membership where personid=%s'
self.cursor.execute(sql_delete, (personid,))
# Add the current memberships
if len(group_names) > 0:
names = tuple(x.replace('&', '&&') for x in group_names)
sql_insert = """
insert into membership (personid,groupsid)
select %s, groupsid from groups where name in %s
"""
self.cursor.execute(sql_insert, (personid, names,))
self.sqlconn.commit()
def people_in_groups(self, groups, fields=[]):
where = []
for f in fields:
where.append('%s=true' % f)
names = tuple(x.replace('&', '&&') for x in groups)
territories = tuple(x for x in session['access'])
if len(where) == 0:
sql = """
select personid, name, email, home_phone, mobile_phone from person
where personid in
(select personid from membership m
inner join groups g on m.groupsid=g.groupsid
where g.name in %s)
and territory in %s
order by name
"""
else:
sql = """
select personid, name, email, home_phone, mobile_phone from person
where (personid in
(select personid from membership m
inner join groups g on m.groupsid=g.groupsid
where g.name in %s) or """ + ' or '.join(where) + """
) and territory in %s
order by name
"""
people = []
self.cursor.execute(sql, (names, territories,))
rows = self.cursor.fetchall()
if not rows:
return []
for r in rows:
people.append(dict(r))
return people
def people_in_filter(self, fields):
where = []
for f in fields:
where.append('%s=true' % f)
territories = tuple(x for x in session['access'])
if len(where) > 0:
sql = """
select personid, name, email, home_phone, mobile_phone from person
where """ + ' or '.join(where) + """
and territory in %s
order by name
"""
else:
sql = """
select personid, name, email, home_phone, mobile_phone from person
where territory in %s
order by name
"""
people = []
self.cursor.execute(sql, (territories,))
rows = self.cursor.fetchall()
if not rows:
return []
for r in rows:
people.append(dict(r))
return people
def membership_update(self, personid, action, membership):
"""
Update the membership for a person.
"""
params = {
'personid': personid,
'code': membership.replace('&', '&&'),
}
if action == 'remove':
sql = """
delete from membership
where groupsid in (select groupsid from groups where code=%(code)s)
and personid=%(personid)s
"""
elif action == 'add':
sql = """
insert into membership (personid, groupsid)
(select %(personid)s,groupsid from groups where code=%(code)s)
RETURNING membershipid
"""
else:
return {'response': 'Failed', 'error': 'Invalid action'}
self.cursor.execute(sql, params)
self.sqlconn.commit()
if action == 'add':
membership_id = self.cursor.fetchone()[0]
else:
membership_id = None
return {'response': 'Success', 'membershipid': membership_id}
def groups_upsert(self, record):
"""
Update or insert the team serving record.
"""
if 'groupsid' not in record:
return False
# Try updating the record and get the rowcount to see if it worked
sql_update = """
UPDATE groups
SET name=%(name)s
WHERE code=%(code)s
"""
self.cursor.execute(sql_update, record)
if self.cursor.rowcount > 0:
# Updated an existing record
self.sqlconn.commit()
else:
# Create a new person record (field order must match the table)
sql_insert = """
INSERT INTO groups (name, code)
VALUES (%(name)s,%(code)s)
"""
self.cursor.execute(sql_insert, record)
self.sqlconn.commit()
return True
def groups_sync_deletion(self, group_ids):
"""
Use the list of current group Ids to remove extra ones in the portal database.
This needs to be done to sync deletions.
"""
groups = tuple(x for x in group_ids)
sql = "delete from groups where groupsid not in %s"
self.cursor.execute(sql, (groups,))
self.sqlconn.commit()
def family(self, family_number, event_id):
"""
Get the check to see if any children are signed-in for this tag using the local database.
"""
# Get the family record for details
family_record = self._family(family_number)
# Check for event registrations for this date
registered = self._reg_list(family_number, event_id)
family_record.update(registered)
return family_record
def _reg_list(self, family_number, event_id):
today = datetime.date.today().isoformat()
sql = """select p.*, r.status from registration r
inner join person p on person_tag=tagnumber
where r.family_tag=%s and r.eventid=%s and r.event_date=%s"""
self.cursor.execute(sql, (family_number, event_id, today,))
signed_in = []
signed_out = []
for p in self.cursor:
person = {
'name': p['name'],
'personid': p['personid'],
'tagnumber': p['tagnumber'],
'parentid': p['family_tag'],
}
if p['status'] == 'Signed-In':
signed_in.append(person)
elif p['status'] == 'Signed-Out':
signed_out.append(person)
return {'signed_in': signed_in, 'signed_out': signed_out}
def family_tag(self, familyid):
"""
Get the family tag from the local database using the family ID.
"""
sql = "SELECT tagnumber FROM family WHERE familyid=%s"
self.cursor.execute(sql, (familyid,))
f = self.cursor.fetchone()
if not f:
return 0
else:
return f.get('tagnumber', 0)
def person(self, tag_number, details=None):
"""
Get the person's details from the tag number.
This can fetch full details for a person as well.
"""
return self._person(tag_number=tag_number, details=details)
def sign_in(self, family_number, people, event_id):
"""
Sign-in a set of kids using a family tag and a list of Person tag numbers.
"""
return self._register(family_number, people, event_id, 'Signed-In', 'In Progress')
def sign_out(self, family_number, people, event_id):
"""
Sign-out a set of kids using a family tag and a list of Person tag numbers.
"""
return self._register(family_number, people, event_id, 'Signed-Out', 'Won')
def registrations(self, event_id, today_only=True):
"""
Get the registrations for the event from local database.
"""
today = datetime.date.today().isoformat()
if today_only:
self.cursor.execute("select r.*,e.name \"event_name\" from registration r inner join event e on r.eventid=e.eventid where r.eventid=%s and event_date=%s", (event_id, today,))
else:
if event_id > 0:
self.cursor.execute("select r.*,e.name \"event_name\" from registration r inner join event e on r.eventid=e.eventid where r.eventid=%s order by event_date desc", (event_id,))
else:
self.cursor.execute("select r.*,e.name \"event_name\" from registration r inner join event e on r.eventid=e.eventid order by event_date desc")
rows = self.cursor.fetchall()
if not rows:
return []
records = []
for o in rows:
record = {
'stage': o['status'],
'event_date': o['event_date'].strftime('%Y-%m-%d'),
'event_name': o['event_name']
}
# Lookup the Person
p = self._person(tag_number=o['person_tag'], details=True)
record.update(p)
records.append(record)
return records
def scan(self, tag):
"""
Get the details of the person/family from the tag.
"""
prefix = tag[0:1]
tag_number = tag[1:]
if prefix == 'F':
record = self._family(tag_number)
elif prefix == 'C' or prefix == 'L':
record = self._person(tag_number=tag_number, details=True)
else:
record = {'error': 'The format of the tag appears to be invalid.'}
return record
def find(self, search, from_person='', limit=30):
"""
Search for people by name.
"""
ids = tuple(x for x in session['access'])
sql = """select * from person where name ilike '%%'||%s||'%%' and territory in %s
and name >= %s
order by name
limit %s
"""
self.cursor.execute(sql, (search, ids, from_person, limit,))
rows = self.cursor.fetchall()
if not rows:
return []
return rows
def find_by_tag(self, tag, child=True):
"""
Find people using the tag number.
"""
sql = "select * from person where "
if child:
sql += "tagnumber=%s"
else:
sql += "family_tag=%s"
self.cursor.execute(sql, (tag,))
rows = self.cursor.fetchall()
if not rows:
return []
return rows
def get(self, personid):
"""
Get a person by their ID.
"""
self.cursor.execute("select * from person where personid=%s", (personid,))
row = self.cursor.fetchone()
if row:
p = dict(row)
return p
else:
return row
def group_membership(self, personid):
"""
Get the group membership and groups that the person is not a part of.
"""
# Get the groups the person is in
sql = """
select g.name, g.code from membership m
inner join groups g on m.groupsid=g.groupsid
where personid=%s
order by g.name
"""
self.cursor.execute(sql, (personid,))
groups = []
for g in self.cursor.fetchall():
g['name'] = g['name'].replace('&&', '&')
groups.append(dict(g))
# Get the groups the person is not in
sql = """
select g.name, g.code from groups g
where g.groupsid not in
(select m.groupsid from membership m where m.groupsid=g.groupsid
and m.personid=%s )
order by g.name
"""
self.cursor.execute(sql, (personid,))
groups_not = []
for g in self.cursor.fetchall():
g['name'] = g['name'].replace('&&', '&')
groups_not.append(dict(g))
return {'team_serving': groups, 'team_serving_not': groups_not}
def _register(self, family_number, people, event_id, stage, status):
today = datetime.date.today().isoformat()
for p in people:
# Check if the registration (Opportunity) record exists
sql = "select * from registration where person_tag=%s and family_tag=%s and eventid=%s and event_date=%s"
self.cursor.execute(sql, (p, family_number, event_id, today,))
row = self.cursor.fetchone()
if row:
# Update the existing record
self.cursor.execute("update registration set status=%s where registrationid=%s", (stage, row['registrationid'],))
self.sqlconn.commit()
else:
# Add a new registration for the person
sql = "INSERT INTO registration (person_tag, family_tag, eventid, event_date, status) VALUES (%s,%s,%s,%s,%s)"
self.cursor.execute(sql, (p, family_number, event_id, today, stage,))
self.sqlconn.commit()
return {"result": "success"}
def _family(self, family_number):
"""
Get family details from the local database.
"""
self.cursor.execute("SELECT * FROM family WHERE tagnumber=%s", (family_number,))
row = self.cursor.fetchone()
if row:
# Get the children for the parent
self.cursor.execute("SELECT * FROM person WHERE family_tag=%s and territory='Kidswork'", (family_number,))
children = []
for c in self.cursor:
child = {
'name': c['name'],
'personid': c['personid'],
'tagnumber': c['tagnumber'],
'group': c['kids_group'],
'school_year': c['school_year'],
}
children.append(child)
# Format the family record
record = {
'tagnumber': row['tagnumber'],
'parent_name': row['name'],
'children': children,
}
else:
record = {'error': 'Cannot find Family record for the parent tag ' + family_number}
return record
def _registrations(self, reg_list):
signed_in = []
signed_out = []
if 'records' in reg_list:
# Yes: get the names of the kids
for reg in reg_list.records:
# Get the name/status of each person
if reg.stage == 'Signed-In':
signed_in.append(self._person(person_id=reg.primarypersonid))
elif reg.stage == 'Signed-Out':
signed_out.append(self._person(person_id=reg.primarypersonid))
return {'signed_in': signed_in, 'signed_out': signed_out}
def _person(self, person_id=None, tag_number=None, details=None):
if person_id:
self.cursor.execute("SELECT * FROM person WHERE personid=%s", (person_id,))
elif tag_number:
self.cursor.execute("SELECT * FROM person WHERE tagnumber=%s and territory='Kidswork'", (tag_number,))
else:
return {'error': 'Person ID or Tag Number must be supplied for Person search.'}
p = self.cursor.fetchone()
if not p:
return {'error': 'No records found for the tag.'}
record = {
'name': p['name'],
'personid': p['personid'],
'tagnumber': p['tagnumber'],
'parentid': p['family_tag'],
}
# Deal with multi-select lists
if p['medical_info']:
medical_info = p['medical_info'].split(',')
else:
medical_info = []
# Add full details of the person, if requested.
if details:
f = self._family(p['family_tag'])
record.update({
'parent': f.get('parent_name', ''),
'dob': p['dob'] and p['dob'].strftime('%d/%m/%Y') or '',
'group': p['kids_group'] or '',
'team': p['kids_team'] or '',
'school_year': p['school_year'],
'medical_info': medical_info,
'medical_notes': p['medical_notes'] or '',
'family_tag': p['family_tag'],
})
return record
def registrations_sync(self, from_date=None):
"""
Push the registrations from the current date to CRM.
"""
if not from_date:
from_date = '1980-01-01 00:00:00'
# Get the registrations from the local db
sql = """
select f.familyid, p.personid, p.externalid contactid, r.*, e.* from registration r
inner join family f on f.tagnumber=family_tag
inner join person p on p.tagnumber=person_tag
inner join event e on e.eventid = r.eventid
where last_modified >= %s
order by event_date desc limit 500
"""
self.cursor.execute(sql, (from_date,))
rows = self.cursor.fetchall()
return rows
def registrations_chart(self, from_date=None, event_id=0):
"""
Get the registrations for the Kidswork chart.
"""
if not from_date:
from_date = '1980-01-01 00:00:00'
# Get the registrations from the local db
sql = """
select f.familyid, p.personid, p.externalid contactid, r.*, e.*, p.tagnumber, p.kids_group, p.school_year, f.name parent, p.name person_name
from registration r
inner join family f on f.tagnumber=family_tag
inner join person p on p.tagnumber=person_tag
inner join event e on e.eventid = r.eventid
where event_date>=%s
"""
if int(event_id) > 0:
sql += " and r.eventid=%s"
sql += "order by event_date desc limit 500"
if int(event_id) > 0:
self.cursor.execute(sql, (from_date, event_id,))
else:
self.cursor.execute(sql, (from_date,))
rows = self.cursor.fetchall()
return rows
def registrations_calc(self, event_id=0):
"""
Pivot the registration data so that it can be used for charts.
"""
event_id = int(event_id)
# Get the totals by event
event_summary = self._event_summary(event_id)
event_group = self._event_group(event_id)
# Convert the report totals into dictionary for simpler conversion to Google Chart format
events = {}
keys = {}
for e in event_summary:
keys[e['name']] = e['name']
if events.get(e['event_date']):
if events[e['event_date']].get(e['name']):
events[e['event_date']][e['name']] += e['count']
else:
events[e['event_date']][e['name']] = e['count']
else:
events[e['event_date']] = {
e['name']: e['count']
}
# Calculate the events total column
for e in event_summary:
events[e['event_date']]['Total'] = events[e['event_date']].get('Total', 0) + e['count']
for e in event_group:
kids_group = e['kids_group'] or ''
keys[e['kids_group']] = kids_group
if events.get(e['event_date']):
if events[e['event_date']].get(kids_group):
events[e['event_date']][kids_group] += e['count']
else:
events[e['event_date']][kids_group] = e['count']
else:
events[e['event_date']] = {
e[kids_group]: e['count']
}
return events, keys
def _event_summary(self, event_id=0):
# Get the totals by event
ev_sql = """select r.event_date, e.name, count(*) from registration r
inner join event e on e.eventid=r.eventid
group by r.event_date, r.eventid, e.name
"""
if event_id > 0:
ev_sql += " having r.eventid=%s order by r.event_date"
self.cursor.execute(ev_sql, (event_id,))
else:
ev_sql += " order by r.event_date"
self.cursor.execute(ev_sql)
event_summary = self.cursor.fetchall()
if not event_summary:
event_summary = []
return event_summary
def _event_group(self, event_id=0):
# Get the totals by kids group
ev_sql = """select r.event_date, p.kids_group, count(*) from registration r
inner join person p on p.tagnumber=r.person_tag
group by r.event_date, p.kids_group, r.eventid
"""
if int(event_id) > 0:
ev_sql += " having r.eventid=%s order by r.event_date"
self.cursor.execute(ev_sql, (event_id,))
else:
ev_sql += " order by r.event_date"
self.cursor.execute(ev_sql)
event_summary = self.cursor.fetchall()
if not event_summary:
event_summary = []
events = []
for e in event_summary:
if not e['kids_group']:
e['kids_group'] = '-'
events.append(e)
return events
| [
"james.jesudason@gmail.com"
] | james.jesudason@gmail.com |
aea5006c1c62d85c9324800cb824eaecd11410ea | 90b77573a20caedbd31eaf8c3802ea42224d4d38 | /day05/ex07/forms.py | f4d0e094fc2d7147084f81b153bab2c80e86ffa8 | [] | no_license | hekang42/pythondjango_piscine | 813e200478244522fb71c92b37301123ff9b6882 | 7b7ec7c14b607c441e559da1fdbceb0a4a9d30c8 | refs/heads/master | 2023-08-14T21:24:55.006949 | 2021-10-07T07:27:11 | 2021-10-07T07:27:11 | 370,340,692 | 1 | 0 | null | 2021-10-07T07:27:12 | 2021-05-24T12:15:17 | Python | UTF-8 | Python | false | false | 311 | py | from django import forms
class UpdateForm(forms.Form):
titles = forms.ChoiceField(choices=(), required=True)
texts = forms.CharField(required=True)
def __init__(self, choices, *args, **kwargs):
super(UpdateForm, self).__init__(*args, **kwargs)
self.fields['titles'].choices = choices | [
"hekang@c4r5s6.42seoul.kr"
] | hekang@c4r5s6.42seoul.kr |
7124f2a9f68d5d9087a9d1222997500c8be8d396 | 8e8d8317df9faa2a64936467ceada871158b459a | /python入门/_09_面向对象特性/_15_类方法.py | 9e0663cf43b5ddbaa36bde92044829be8b046432 | [] | no_license | ccuwxy/learn_python_code | 8d5d27deefd7212665f1bdb24708e376ebe0ed9e | 47e84fb8ae9fcc1c6bc5fc02fee51cbca127968e | refs/heads/master | 2023-04-28T20:11:32.702882 | 2021-05-11T14:28:30 | 2021-05-11T14:28:30 | 358,096,174 | 1 | 0 | null | 2021-04-25T03:58:27 | 2021-04-15T01:56:32 | Python | UTF-8 | Python | false | false | 312 | py | class Tool(object):
count = 0
@classmethod
def show_tool_count(cls):
print("工具对象的数量 %d" % cls.count)
def __init__(self, name):
self.name = name
Tool.count += 1
tool1 = Tool("斧头")
tool2 = Tool("榔头")
tool3 = Tool("水桶")
Tool.show_tool_count()
| [
"wxy990212@126.com"
] | wxy990212@126.com |
2d5241ff37c81e87fe5dde76480448e82b1a8bf5 | 2bacd64bd2679bbcc19379947a7285e7ecba35c6 | /1-notebook-examples/keras-udemy-course/ann_class2/mxnet_example.py | 9ea745fce7cb6e4c583659c52a0dfbfe86e6fcb1 | [
"MIT"
] | permissive | vicb1/deep-learning | cc6b6d50ae5083c89f22512663d06b777ff8d881 | 23d6ef672ef0b3d13cea6a99984bbc299d620a73 | refs/heads/master | 2022-12-12T15:56:55.565836 | 2020-03-06T01:55:55 | 2020-03-06T01:55:55 | 230,293,726 | 0 | 0 | MIT | 2022-12-08T05:27:43 | 2019-12-26T16:23:18 | Jupyter Notebook | UTF-8 | Python | false | false | 2,621 | py | # https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow
# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# installation is just one line:
# https://mxnet.incubator.apache.org/get_started/install.html
#
# Mac:
# pip install mxnet
#
# Linux (GPU):
# pip install mxnet-cu80
#
# Windows (a little more involved):
# https://mxnet.incubator.apache.org/get_started/windows_setup.html
import mxnet as mx
import numpy as np
import matplotlib.pyplot as plt
from util import get_normalized_data, y2indicator
# get the data, same as Theano + Tensorflow examples
# no need to split now, the fit() function will do it
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
# get shapes
N, D = Xtrain.shape
K = len(set(Ytrain))
# training config
batch_size = 32
epochs = 15
# convert the data into a format appropriate for input into mxnet
train_iterator = mx.io.NDArrayIter(
Xtrain,
Ytrain,
batch_size,
shuffle=True
)
test_iterator = mx.io.NDArrayIter(Xtest, Ytest, batch_size)
# define a placeholder to represent the inputs
data = mx.sym.var('data')
# define the model architecture
a1 = mx.sym.FullyConnected(data=data, num_hidden=500)
z1 = mx.sym.Activation(data=a1, act_type="relu")
a2 = mx.sym.FullyConnected(data=z1, num_hidden = 300)
z2 = mx.sym.Activation(data=a2, act_type="relu")
a3 = mx.sym.FullyConnected(data=z2, num_hidden=K)
y = mx.sym.SoftmaxOutput(data=a3, name='softmax')
# train it
# required in order for progress to be printed
import logging
logging.getLogger().setLevel(logging.DEBUG)
# use mx.gpu() if you have gpu
model = mx.mod.Module(symbol=y, context=mx.cpu())
model.fit(
train_iterator, # train data
eval_data=test_iterator, # validation data
optimizer=mx.optimizer.Adam(),
eval_metric='acc', # report accuracy during training
batch_end_callback = mx.callback.Speedometer(batch_size, 100), # output progress for each 100 data batches
num_epoch=epochs,
)
# no return value
# list of optimizers: https://mxnet.incubator.apache.org/api/python/optimization.html
# test it
# predict accuracy of mlp
acc = mx.metric.Accuracy()
model.score(test_iterator, acc)
print(acc)
print(acc.get())
# currently, there is no good way to plot the training loss / accuracy history
# https://github.com/apache/incubator-mxnet/issues/2873
#
# some have suggested parsing the logs
# https://github.com/apache/incubator-mxnet/blob/master/example/kaggle-ndsb1/training_curves.py
| [
"vbajenaru@gmail.com"
] | vbajenaru@gmail.com |
1ba533ce7e9b7e2069995dcfa5cf5053a9b1d190 | aa349f9257e45933a682a38df9ef41e0ae28c730 | /find_second.py | c6d51c357f6bcf20b919310f8027d9e3bd76cbc9 | [] | no_license | lcbasu/Pyhthon-Search-Engine | 4f01d417e6e69268584ac7303db7f67df68959a8 | 988b36dea82f2c125b820562ad29b1bd35166b40 | refs/heads/master | 2021-05-27T09:45:45.082514 | 2014-03-28T10:53:44 | 2014-03-28T10:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | def find_second(search_string,target_string):
first_occur = search_string.find(target_string)
second_occur = search_string.find(target_string,first_occur+1)
return second_occur
search_string = '<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8" /><meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"><title>iFest 2012</title><link rel="stylesheet" type="text/css" href="css/style_makeover.css" /><p class="copyright"><span>© IEEE IIT Roorkee | <a href="http://ifest1.ieeeiitr.com/ifest_2011.php">iFest 2011</a></span></div></body></html><span>© IEEE IIT Roorkee | <a href="http://ifest2.ieeeiitr.com/ifest_2011.php">iFest 2011</a></span></div></body></html><span>© IEEE IIT Roorkee | <a href="http://ifest3.ieeeiitr.com/ifest_2011.php">iFest 2011</a></span></div></body></html><span>© IEEE IIT Roorkee | <a href="http://ifest4.ieeeiitr.com/ifest_2011.php">iFest 2011</a></span></div></body></html>'
target_string = 'ifest'
print find_second(search_string,target_string) | [
"lokesh.basu@gmail.com"
] | lokesh.basu@gmail.com |
fd84e7fc567df5271c49d18a9b47958ec6a061d6 | 6435a3b29ca60c0d45f741a07c6d8ac486d8fbeb | /freeze.py | 211149ab2b3515e3468acc5290de241b746bf0aa | [
"MIT"
] | permissive | pyskell/whatcanbitcoindo | b408d0e977fddc1a3c2dc796d527c11fefdfa6bd | 51ea38d5edb409709189daced073b4e336f06e9e | refs/heads/master | 2023-05-09T15:52:39.468558 | 2020-08-16T15:04:47 | 2020-08-16T15:04:47 | 264,560,135 | 2 | 0 | MIT | 2021-06-02T21:50:17 | 2020-05-17T01:21:50 | HTML | UTF-8 | Python | false | false | 245 | py | #!/usr/bin/env python3
from flask_frozen import Freezer
from app import app
app.config['FREEZER_DESTINATION'] = 'docs'
app.config['FREEZER_DESTINATION_IGNORE'] = ['CNAME']
freezer = Freezer(app)
if __name__ == '__main__':
freezer.freeze() | [
"pyskell@users.noreply.github.com"
] | pyskell@users.noreply.github.com |
ccf06c41e6a3a1868b2cadf52a808e58e3d17238 | 5ab6b12f342a1d011a0ed90dc2c111de1fd250bd | /brdc_inventory/models/inherited_res_partner.py | 8f423ace62d24fab3bb92385a1b6487b3beab8ca | [] | no_license | ainkrad/modified-odoo-10 | 7afdddc2db8e15469a897bc637287ac0c6d62689 | e8b77b4cf270c9536dabeed793a78346f3553263 | refs/heads/master | 2020-03-27T18:57:21.281829 | 2018-09-24T02:57:53 | 2018-09-24T02:57:53 | 146,955,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from odoo import api, fields, models
class inherited_res_partner(models.Model):
_inherit = 'res.partner'
# agency_id = fields.Many2one('employee.hierarchy')
# Ranz
collector_area_id = fields.Many2many('config.barangay', string="Barangay assigned")
# Ranz
| [
"ahyanfrancisco30@gmail.com"
] | ahyanfrancisco30@gmail.com |
2de41bec0678268d11fc70fbf12b8eaeb770d951 | 34efa4d0acbf8b13e710c5df5e2825a355072d39 | /ADT_3D_vectors.py | 3d87c9308939248b961a615f0ce884ada5451b0c | [] | no_license | kriskros341/concreteAbstractions | 2535a86130c4b21ea76864717aab3348f3b9438c | 1f71b5411df95470b750538c2416c1962e534387 | refs/heads/main | 2023-04-18T12:43:11.755047 | 2021-05-02T11:50:42 | 2021-05-02T11:50:42 | 334,690,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | class vector:
def __init__(self, x, y, z):
self.x_coord = x
self.y_coord = y
self.z_coord = z
def __add__(self, another_vector):
new_x = self.x_coord+another_vector.x_coord
new_y = self.y_coord+another_vector.y_coord
new_z = self.z_coord+another_vector.z_coord
return vector(new_x, new_y, new_z)
def distance(self, another_vector):
x = (self.x_coord-another_vector.x_coord)**2
y = (self.y_coord-another_vector.y_coord)**2
z = (self.z_coord-another_vector.z_coord)**2
return round(((x+y+z)**(1/2)),4)
def dot_prod(self):
return self.x_coord**2+self.y_coord**2+self.z_coord**2
def __str__(self):
return f"x: {self.x_coord}, y: {self.y_coord}, z: {self.z_coord}"
v1 = vector(1,2,1)
v2 = vector(2,1,4)
print(
(v1+v2).dot_prod() # LOL
)
print(v1.distance(v2))
| [
"krzysztofczuba884@gmail.com"
] | krzysztofczuba884@gmail.com |
bedcfd6082c49f5b3370197ed3b1f2b588641be4 | 3da7cb968fd408657bfeb8ca3b83209966060f58 | /TaskPython/run2.py | c9b35d708bf5d63a46cc4b681fd89fabc32a71a9 | [] | no_license | 5up3rc/AwvScan | af2dcb384d814d2eab6560a0cfab99d9c73ed940 | a5f279f39480b87b6823fe55bf7e68e5e5dcd1c4 | refs/heads/master | 2021-01-18T09:51:22.438968 | 2015-08-12T13:13:47 | 2015-08-12T13:13:47 | 46,076,852 | 2 | 1 | null | 2015-11-12T19:48:47 | 2015-11-12T19:48:47 | null | UTF-8 | Python | false | false | 1,913 | py | # coding=utf-8
import urllib,time,os,base64,json
import _winreg
wvs_path = ""
def get_html(url):
url=url.strip()
html=urllib.urlopen(url).read()
return html
def writefile(logname,cmd):
try:
fp = open(logname,'a')
fp.write(cmd+"\n")
fp.close()
except:
return False
def regedit(re_root,re_path,re_key):
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,re_path)
value,type = _winreg.QueryValueEx(key,re_key)
return value
except:
return False
def get_console(url):
now = time.strftime('%Y-%m-%d %X', time.localtime(time.time()))
date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
try:
a = get_html(url)
#print a
if len(a) > 50:
base = base64.b64decode(a)
#print base
json_arr = json.loads(base)
target_url = json_arr['target_url']
user = json_arr['siteuser']
pwd = json_arr['sitepwd']
scan_rule = json_arr['scan_rule']
hash = json_arr['hash']
print json_arr
console = '"%s\\wvs_console.exe" /Scan %s --HtmlAuthUser=%s --HtmlAuthPass=%s --EnablePortScanning=True /Verbose /ExportXML /SaveLogs /SaveFolder E:\\wwwroot\\report\\%s\\' %(wvs_path,target_url,user,pwd,hash)
#console = console + '\ndel %0'
scantime = time.strftime('%Y-%m-%d %X', time.localtime(time.time()))
print "%s\n%s\n" %(scantime,console)
writefile('bat\\%s.bat'%hash,console)
cmd = 'cmd /c bat\\%s.bat' %hash
print "%s\n%s\n%s\n" %(now,target_url,cmd)
os.system(cmd)
except Exception , e:
info = '%s\nError: %s' %(now,e)
writefile('logs\\%s-Error.log'%date,info)
print info
wvs_path = regedit(0,"SOFTWARE\Acunetix\WVS9","Path")
#exit()
url = 'http://10.118.44.8/scan/tasklist.php'
i = 0
while 1:
now = time.strftime('%Y-%m-%d %X', time.localtime(time.time()))
try:
a = get_console(url)
i +=1
time.sleep(5)
except Exception , e:
info = '%s\nError: %s' %(now,e)
writefile('Error.log',info)
print info
time.sleep(1) | [
"coolxia@foxmail.com"
] | coolxia@foxmail.com |
42510ad34d67ca4ee16a1543e88bd46fa32f9de3 | 4a06498a34de4a4800ecea582d489e28693deaf6 | /MarketPurchase.py | fa6b4317134d22aab6fd39d813733df21c5c6af1 | [] | no_license | Ale1503/Launchpad-Learning- | 81b4cc8cf5baef843537a7cdb56f8cd56f869078 | dff7d2e9feb45c348991e77387a140c3f109375e | refs/heads/main | 2023-08-11T02:26:19.412255 | 2021-10-04T04:27:40 | 2021-10-04T04:27:40 | 316,850,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | #Pulpería
list_of_products = [
{ 'Code': '1000', 'Name': 'Coke', 'Price': 5 },
{ 'Code': '1001', 'Name': 'Tropical', 'Price': 5 },
{ 'Code': '3001', 'Name': 'Alcohol', 'Price': 6 },
{ 'Code': '3002', 'Name': 'Cacique', 'Price': 10},
{ 'Code': '1212', 'Name': 'Komplete', 'Price': 15}
]
#Shopping cart
cart = []
#Facts about the customer
def customer_information():
print("Hello I hope you are doing well. Please let me get your information in oder to procceed with the purchase ")
customers_name = input("What is your name? ")
payment_method = input("What is your payment method? (Debit/Cash)" )
amount = int (input("How much money do you have? In dollars "))
#Choosing the products category
def selection_of_products():
purchase_step = input("Write the initial: [S]earch Product, [E]nter product code, [P]ay ")
if purchase_step == 's':
search_for_name()
if purchase_step == 'e':
search_for_code()
if purchase_step == 'p':
print (cart)
#Search for name
def search_for_name():
product_chosen = input("Choose the name of the product ")
for product in list_of_products:
if product_chosen == product['Name']:
cart.append(product['Name']),cart.append(product['Price'])
print("Item added"), print('These are your current products', cart)
selection_of_products()
#Search for code
def search_for_code():
product_chosen = input("Choose the code of the product ")
for product in list_of_products:
if product_chosen == product['Code']:
cart.append(product['Code']),cart.append(product['Price'])
print("Item added"), print('These are your current products', cart)
selection_of_products()
customer_information()
selection_of_products()
## - Ask if the user wants to enter the product code or wants to search for a product.
## [S]earch Product, [E]nter product code, [P]ay
## -> For Search Product: Enter a string and show the user all the products that match that string (Code, Name and price).
## -> For Enter product code: get the product code, look for the product with that code and add it to the shopping cart if it exists.
## -> After adding a new product, show the current shopping cart and total amount due.
## -> Ask again the first step. Repeat steps above if needed. If pay is chosen, check if the user can afford it (including taxes).
## -> Show an error if the amount of money is less than the required. Show a Thank you message if the amount of money is ok. Show to the user if there is any money left.
## OPTIONAL: be able to remove products from the shopping cart: [S]earch Product, [E]nter product code, [R]emove product from shopping cart or [P]ay.
| [
"alejandrovs151@gmail.com"
] | alejandrovs151@gmail.com |
a89f0e94dccdc78c5c769c12c90d705a0cdb7516 | 906d5b4f21c390966985b40262fc0f0ff1326878 | /aoc18b.py | f088fca2687d30cb5330f8d0a842a01ab8cf3219 | [] | no_license | Yabk/AdventOfCode2017 | 428be1700119f22fc27ae92640629776e3b53f89 | a75b9140757c2705216727552b4b158c879224ef | refs/heads/master | 2021-09-01T06:33:59.518297 | 2017-12-25T11:10:06 | 2017-12-25T11:10:06 | 113,624,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | #!/usr/bin/python3
#99, 127, 0, 129
import fileinput
from queue import Queue
import threading
from time import sleep
print_lock = threading.Lock()
def main():
instructions = []
for line in fileinput.input():
if line == '\n':
break
instructions.append(line.split())
q0 = Queue()
q1 = Queue()
waiting = [False, False]
sent = [0, 0]
received = [0, 0]
prog_a = threading.Thread(target=worker, args=(instructions, 0, q0, q1, waiting, sent, received))
prog_b = threading.Thread(target=worker, args=(instructions, 1, q1, q0, waiting, sent, received))
prog_a.start()
prog_b.start()
#for i in range(20):
# print(sent)
# print(received)
# print()
# sleep(5)
prog_a.join()
prog_b.join()
print(sent)
print(received)
def worker(instructions, id, receive, send, is_waiting, times_sent, times_received):
i = 0
registers = {'p': id}
while (i >= 0) and (i < len(instructions)):
instruction = instructions[i][0]
first = instructions[i][1]
if first not in registers:
registers[first] = 0
if len(instructions[i]) == 3:
second = instructions[i][2]
try:
value = int(second)
except ValueError:
if second not in registers:
registers[second] = 0
value = None
if instruction == 'snd':
#with print_lock:
# print("id: {} sending reg {} with value {}".format(id, first, registers[first]))
send.put(registers[first])
times_sent[id] += 1
elif instruction == 'set':
if value is not None:
registers[first] = value
else:
registers[first] = registers[second]
elif instruction == 'add':
if value is not None:
registers[first] += value
else:
registers[first] += registers[second]
elif instruction == 'mul':
if value is not None:
registers[first] *= value
else:
registers[first] *= registers[second]
elif instruction == 'mod':
if value is not None:
registers[first] %= value
else:
registers[first] %= registers[second]
elif instruction == 'rcv':
#with print_lock:
# print("id: {} receiving reg {}".format(id, first))
if receive.empty() and is_waiting[1-id] and send.empty():
is_waiting[id] = True
send.put(False)
return
is_waiting[id] = True
got = receive.get()
is_waiting[id] = False
if got is False:
return
registers[first] = got
times_received[id] += 1
#with print_lock:
# print("id: {} received reg {} value {}".format(id, first, got))
elif (instruction == 'jgz') and ((representsInt(first) and (int(first) > 0)) or ((first in registers) and (registers[first] > 0))):
#old = i
if value is not None:
i += value - 1
else:
i += registers[second] - 1
#with print_lock:
# print("id: {} jumping from {} to {}".format(id, old, i+1))
#sleep(1)
i += 1
send.put(False)
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
if __name__ == '__main__':
main()
| [
"jakov.ivkovic@gmail.com"
] | jakov.ivkovic@gmail.com |
4e543f00e2d14fc40a0fe4bc585aa1c2dfa3cba2 | 017a0e2c26455cf8a7a120fb21de7c8acf8e34b6 | /412_fizz_buzz.py | f73c92b6bb8162beb64c6617f2329051744f8d4c | [] | no_license | csliubo/leetcode | 06ee285ac84657a6010f53a1555c05eb0e9075ab | cd9a4d261830180a59bc92cc706fe0dd2f1746f9 | refs/heads/master | 2021-01-19T04:38:17.386615 | 2020-03-03T07:12:00 | 2020-03-03T07:12:00 | 63,233,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | # -*- coding:utf-8 -*-
__author__ = [
'"liubo" <liubo.cs@hotmail.com>'
]
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in range(1, n + 1):
if i % 15 == 0:
ret.append("FizzBuzz")
elif i % 5 == 0:
ret.append("Buzz")
elif i % 3 == 0:
ret.append("Fizz")
else:
ret.append(str(i))
return ret
| [
"liubo.cs@hotmail.com"
] | liubo.cs@hotmail.com |
1475c2e69309824b15d81801250c9ed88512b87e | 66e0f8dc025fd46a593c1360ce26e85869225332 | /model/classifier/logestic_reg_classifier.py | bfb2425b1c933c13ea7c9bad1a5c501b8d93828e | [] | no_license | PppBr/Spooky | 5fb1f47c8c0cd004dfdabf8d5eb379dbdc86b490 | 467e709ad1371db85b54cc47391d8dbf6eba17f9 | refs/heads/master | 2020-09-24T11:51:47.520715 | 2019-12-04T01:47:31 | 2019-12-04T01:47:31 | 225,754,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,638 | py | import numpy as np
from config import Config
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
class LogesticRegClassifier():
def __init__(self, multiclass_logloss, fe, evaluate = True):
"""Initialize a Naive Bayes Classifier
Args:
multiclass_logloss: import the loss function
fe: feature extractor used in this classifier
evaluate: whether to do cross validation (T/F)
"""
self.loss = multiclass_logloss
self.evaluate = evaluate
self.fe = fe
self.meta_loaded = fe.meta
def train(self, x, y):
"""Train Naive Bayes Classifier
Args:
x: dataframe contains training info
y: nmpy array containing labels
"""
# Mapping authors to author ids
lbl_enc = preprocessing.LabelEncoder()
y = lbl_enc.fit_transform(y.values)
# Create a Logestic Regression Model
clf = LogisticRegression(C=1.0)
# Training with cross validation
if self.evaluate:
avgLoss = []
avgProb = []
skf = StratifiedKFold(n_splits=5, shuffle = True, random_state = 2018)
for train_index, valid_index in skf.split(x, y):
train_x, valid_x = x.iloc[train_index, :], x.iloc[valid_index, :]
train_y, valid_y = y[train_index], y[valid_index]
# Load features from feature extactor
stat = self.fe.load_statistic_features(train_x, valid_x)
if not self.meta_loaded:
train_x = stat["train_count"]
valid_x = stat["test_count"]
else:
train_x = stat["train_tfidf_plus_meta"]
valid_x = stat["test_tfidf_plus_meta"]
clf.fit(train_x, train_y)
pred_y = clf.predict(valid_x)
print("--------------------------------------------------------------")
print(classification_report(valid_y,pred_y))
# Predicting with probability
predictions = clf.predict_proba(valid_x)
avgProb.append(predictions)
avgLoss.append(self.loss(valid_y, predictions))
print("model: Logestic regression")
print ("logloss: %0.3f " % self.loss(valid_y, predictions))
print("\033[0;37;41mfinal overall logloss cv: %0.3f \033[0m" % np.mean(avgLoss))
print("-------------------------------------------------------")
else:
if not self.meta_loaded:
x = stat["train_count"]
else:
x = stat["train_tfidf_plus_meta"]
# Training
clf.fit(x, y)
# Model dumping
joblib.dump(clf, "./results/classifier_models/nbmc")
def predict(self, x):
"""Classification for x
Args:
x: dataframe contains training info
Return:
return an class that this x belonging to.
"""
clf = joblib.load("./results/classifier_models/nbmc")
# Features loading
stat = self.fe.load_statistic_features(x, x)
if not self.meta_loaded:
x = stat["test_count"]
else:
x = stat["test_tfidf_plus_meta"]
# Predicting
predictions = clf.predict_proba(x)
return predictions
| [
"bolopeng1991@gmail.com"
] | bolopeng1991@gmail.com |
bef472a1d7fc8c8b51ea93d9df05c7b22106bbbe | dc121ef8cfb9c4a8659698d15dc6bd27236ff093 | /blog22-fft/blog22-image02.py | 95dc87162a07261192d7731b8f048bd92e3f37d4 | [] | no_license | WeiSen0011/ImageProcessing-Python | 29328179d26f16242953957ae0d5a190a5265387 | 85438d642cb6093a100dcd1b9146e784b43b2b53 | refs/heads/master | 2023-07-14T01:16:30.481792 | 2021-08-20T09:54:03 | 2021-08-20T09:54:03 | 400,161,692 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # -*- coding: utf-8 -*-
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
#读取图像
img = cv.imread('lena.png', 0)
#傅里叶变换
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
res = np.log(np.abs(fshift))
#傅里叶逆变换
ishift = np.fft.ifftshift(fshift)
iimg = np.fft.ifft2(ishift)
iimg = np.abs(iimg)
#展示结果
plt.subplot(131), plt.imshow(img, 'gray'), plt.title('Original Image')
plt.axis('off')
plt.subplot(132), plt.imshow(res, 'gray'), plt.title('Fourier Image')
plt.axis('off')
plt.subplot(133), plt.imshow(iimg, 'gray'), plt.title('Inverse Fourier Image')
plt.axis('off')
plt.show()
| [
"noreply@github.com"
] | WeiSen0011.noreply@github.com |
797ced415fa7d934c058f3bcd3023a6af0a876c5 | c26431bfe6244776cd6f7a3baeb92a560ab8bba8 | /makeCorrectionsWorkspace_singletau_legacy_2018.py | 84072fcc7a4b49728acaff1f5e5294167588cdf6 | [] | no_license | KIT-CMS/LegacyCorrectionsWorkspace | 157e0db9890a13fc0767a8c51d13a237c8b44085 | 807f175392086c5810ec329fea5864292e2f3110 | refs/heads/master | 2021-09-11T07:34:38.963273 | 2020-12-23T14:32:28 | 2020-12-23T14:37:37 | 202,510,204 | 0 | 2 | null | 2020-12-23T14:37:38 | 2019-08-15T09:09:38 | Python | UTF-8 | Python | false | false | 3,382 | py | #!/usr/bin/env python
import ROOT
import imp
import json
from array import array
import numpy as np
wsptools = imp.load_source('wsptools', 'workspaceTools.py')
def GetFromTFile(str):
f = ROOT.TFile(str.split(':')[0])
obj = f.Get(str.split(':')[1]).Clone()
f.Close()
return obj
# Boilerplate
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.RooWorkspace.imp = getattr(ROOT.RooWorkspace, 'import')
ROOT.TH1.AddDirectory(0)
w = ROOT.RooWorkspace('w')
####################################################################################################
# Single tau trigger weights
####################################################################################################
loc = 'inputs/2018/singletau/DESY/'
# Wrap input histograms in workspace.
histsToWrap = []
for sample in ["_mc", "_emb"]:
for trg in ["singleANDdouble_t", "single_t", "double_t"]:
for dm in ["1pr", "1pr1pi0", "3pr"]:
for region in ["barrel", "endcap"]:
histsToWrap.append(
(loc + 'eff_tauTriggers_2018_v3.root:trg_{trg}{sample}_{dm}_{reg}'.format(trg=trg,
sample=sample,
dm=dm,
reg=region),
't_trg_{trg}{sample}_{dm}_{reg}'.format(trg=trg,
sample=sample,
dm=dm,
reg=region)
)
)
for task in histsToWrap:
wsptools.SafeWrapHist(w, ['t_pt'],
GetFromTFile(task[0]), name=task[1])
# Build histograms inclusive in eta.
w.factory('expr::t_abs_eta("TMath::Abs(@0)", t_eta[0])')
for sample in ["_mc", "_emb"]:
for trg in ["singleANDdouble_t", "single_t", "double_t"]:
for dm in ["1pr", "1pr1pi0", "3pr"]:
w.factory('expr::t_trg_{trg}{sample}_{dm}("(@0<=1.48)*@1+(@0>1.48)*(@0<2.1)*@2", t_abs_eta, t_trg_{trg}{sample}_{dm}_barrel, t_trg_{trg}{sample}_{dm}_endcap)'.format(trg=trg, sample=sample, dm=dm))
# Build histogram inclusive in eta and dm.
for sample in ["_mc", "_emb"]:
for trg in ["singleANDdouble_t", "single_t", "double_t"]:
w.factory('expr::t_trg_{trg}{sample}("(@0==0)*(@1)+(@0==1)*(@2)+(@0==2)*(@2)+(@0==10)*(@3)+(@0==11)*(@3)", t_dm[0], t_trg_{trg}{sample}_1pr, t_trg_{trg}{sample}_1pr1pi0, t_trg_{trg}{sample}_3pr)'.format(sample=sample, trg=trg))
# Wrap histograms for single tau efficiencies measured in W* events.
histsToWrap = [
(loc + 'SingleTauTriggerEff_MediumDeepTau2017v2p1_2018.root:MC', 't_trg_single_t_wstar_mc'),
(loc + 'SingleTauTriggerEff_MediumDeepTau2017v2p1_2018.root:Data', 't_trg_single_t_wstar_data'),
(loc + 'SingleTauTriggerEff_MediumDeepTau2017v2p1_2018.root:SF', 't_trg_single_t_wstar_ratio')
]
for task in histsToWrap:
wsptools.SafeWrapHist(w, ['t_pt'],
GetFromTFile(task[0]), name=task[1])
w.Print()
w.writeToFile('output/htt_scalefactors_singletau_legacy_2018.root')
w.Delete()
| [
"maxiburkart@gmail.com"
] | maxiburkart@gmail.com |
8f0559a6949b29f1325ea7e4b0952a514e72b342 | 2a17e6a5d78849469b2094ec11f8a51e86475128 | /DIU_HS/settings.py | 36d64d8a3fb733268c23cd3ad16ffc365d0de70c | [] | no_license | maxhasan882/DIU_HS | fbe25b5d22dded5171b7bd9c31a75c16f03a7f8a | cbffe3b3799e46afe492064ecb45b617e8ff536b | refs/heads/master | 2020-07-29T07:54:07.332060 | 2019-09-20T09:12:00 | 2019-09-20T09:12:00 | 209,721,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '-qqcz^f-332ox2t)s(b$d&slmg^c+q@m!--w*7_%w_pckp(gdq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'rest_framework',
'dblayer',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DIU_HS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DIU_HS.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'diuhsDB',
'USER': 'postgres',
'PASSWORD': 'mithu1996',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
INTERNAL_IPS = [
'127.0.0.1',
] | [
"rhmithu50@gmail.com"
] | rhmithu50@gmail.com |
396039b4b69ec0abb1fed167cff3f0a55462a939 | 1c051bdd645ba9667320b47d18938f1fa1cbc183 | /design_patterns/decorator_exception_handler.py | 01d126a9529c5985056537a7ede30cff068ad46a | [] | no_license | shuvava/python_algorithms | 095eecadd7c093bdc289f93455ca7446d039996f | 8b3b1f146b7eac5dc15b16aaf837441069cf5989 | refs/heads/master | 2023-08-18T11:33:49.607395 | 2023-07-24T15:14:01 | 2023-07-24T15:14:01 | 165,668,551 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://habr.com/ru/post/474278/
usage
```py
class Math(object):
@property
def exception_handlers(self):
return {
ZeroDivisionError: lambda e: 'division on zero is forbidden'
}
@ProcessException(exception_handlers)
def divide(self, a, b):
return a // b
```
"""
from asyncio import QueueEmpty, QueueFull
from concurrent.futures import TimeoutError
from inspect import iscoroutinefunction
class ProcessException(object):
__slots__ = ('handlers',)
def __init__(self, custom_handlers=None):
if isinstance(custom_handlers, property):
custom_handlers = custom_handlers.__get__(self, self.__class__)
raise_exception = ProcessException.raise_exception
exclude = {
QueueEmpty: lambda e: None,
QueueFull: lambda e: None,
TimeoutError: lambda e: None
}
self.handlers = {
**exclude,
**(custom_handlers or {}),
Exception: raise_exception
}
def __call__(self, func):
handlers = self.handlers
if iscoroutinefunction(func):
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except Exception as e:
return handlers.get(e.__class__, handlers[Exception])(e)
else:
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
return handlers.get(e.__class__, handlers[Exception])(e)
return wrapper
@staticmethod
def raise_exception(e: Exception):
raise e
| [
"shuvava@users.noreply.github.com"
] | shuvava@users.noreply.github.com |
8311e5d8b1c97f90f09fbff5a087b36337ab95a6 | 647e218cc4db604355f122c80f3a20b67bfef1bb | /env/bin/django-admin | 996e007ef91d4c3bfe18cb81e47f172ad5cd7911 | [] | no_license | Shraddhasaini/Django | e7ebf6aff6acf5ccf9c5dd6212451bd5d57debc4 | 42d453cfb5e3934065f6b13dadbe0e99dc3bdb6c | refs/heads/master | 2022-12-04T04:59:25.930218 | 2020-07-12T04:09:03 | 2020-07-12T04:09:03 | 208,974,215 | 3 | 1 | null | 2022-11-17T05:34:05 | 2019-09-17T06:19:08 | Python | UTF-8 | Python | false | false | 290 | #!/home/shraddha/tutorial/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"shraddhasaini99@gmail.com"
] | shraddhasaini99@gmail.com | |
e62d6ee0063d55a34d7e42e6d6a8b94c20009a8b | ad011e22a78c39f592a40287694f47feb3411d58 | /year_III/JNP_II/thresholdalert/thresholdalert/jsonschema.py | a179d0ca8ae34da40b4b0f7e47e4c76f400b0f31 | [] | no_license | filip-plata/CS_Bachelor | 4401534aa8ed0ffc1d01c8e7c78f4fd7cdddc798 | 52cc5a810457807c25e3a9967497d1ccea9fb8e3 | refs/heads/master | 2020-12-04T12:13:27.403049 | 2020-01-04T18:14:33 | 2020-01-04T18:14:33 | 231,760,456 | 1 | 0 | null | 2020-01-04T15:52:30 | 2020-01-04T12:37:16 | HTML | UTF-8 | Python | false | false | 133 | py | tick_message_schema = {
"type": "object",
"properties": {
"url": {"type": "string"}
},
"required": ["url"]
}
| [
"filip.plata@outlook.com"
] | filip.plata@outlook.com |
20fe052d943272f47ff5e5867e8567c5cb741235 | e67bb08f59c7b3858d79230c12c298236228696b | /airflow/data_analysis/extraction_data/spiders/niumba/homes.py | 3a5b9fa27d77102f097f55dbda4a2a16cc09751b | [] | no_license | jesustorresdev/vituin-project | 78a82d1b6660492d0663da8dd23cab73be7b9819 | f67f508662cf935a1d2985f0f8e1c0674ec06102 | refs/heads/master | 2023-03-18T01:25:39.129219 | 2020-07-17T14:11:45 | 2020-07-17T14:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,558 | py | # -*- coding: UTF-8 -*-
"""Extracción de viviendas en Niumba
"""
__authors__ = 'Sergio Díaz, Jesús Torres'
__organization__ = 'Universidad de La Laguna'
__licensed__ = 'UNLICENSED'
__contact__ = "jmtorres@ull.edu.es, sdiazgon@ull.edu.es"
# __copyright__ = f"Copyright 2017-2018 {__organization__}"
from scrapy import Request
from extraction_data.urls import NiumbaURLs
from extraction_data.items import ListNiumbaHomeItem
from extraction_data.required_fields import ListNiumbaHomeRequiredFields
from extraction_data.scrapy_spider import ScrapySpider
from extraction_data.utils import split_try
ELASTICSEARCH_INDEX = 'niumba_homes'
ELASTICSEARCH_DOC_TYPE = 'unstructured'
#TODO use loaders
class NiumbaSpider(ScrapySpider):
name = "niumba_listHomes"
def __init__(self, place=''):
self.place = place
self.start_urls = NiumbaURLs(place)
self.first_searched = self.get_if_first_searched(ELASTICSEARCH_INDEX, ELASTICSEARCH_DOC_TYPE)
def parse(self, response):
current_page = self.xpath(response,"//div[@class ='pagination listView']/span[@class='hidden-xs']", type='text')
homes = self.xpath(response,"//div[@id='mainSrpResults']/div[contains(@class,'data-tracking-tree-N group')]", type='object',
stop_if_error=True)
for home in homes:
url=self.xpath(home, './', type='attribute', attribute='data-rental-unit-url')
id=self.xpath(home, './', type='attribute', attribute='id')[4:]
place = self.xpath(home,'.//p[@class="mobile shortBreadCrumb"]', type='text')
lat = self.xpath(home,'.//div[@class="map-container"]', type='attribute', attribute='data-lat')
lng = self.xpath(home,'.//div[@class="map-container"]', type='attribute', attribute='data-lng')
if url != 'https://www.niumba.com' and url:
request = Request(url, callback=self.parse_home)
request.meta['id'] = id
request.meta['place'] = place.strip()
request.meta['lat'] = lat
request.meta['lng'] = lng
print(' id=', id, ', url=', url, 'place=', place)
if self.first_searched:
yield request
elif not self.exist_item(ELASTICSEARCH_INDEX, url):
yield request
total_of_apartments = self.xpath(response, "//span[@class='data-tracking-tree-NG']", type='attribute',
attribute='data-tracking-tree').replace(',','')
print('')
print('')
print('')
print('current_page',current_page)
print('')
print('')
print('')
if float(current_page) < (float(total_of_apartments) / 50):
next_page_url = 'https://www.niumba.com'+ \
self.xpath(response, '//a[@class="next hidden-xs"]', type='attribute', attribute='href')
yield Request(
next_page_url, callback=self.parse
)
def parse_home(self, response):
item = ListNiumbaHomeItem()
title = self.xpath(response, "//meta[@property='og:title']", type='attribute', attribute='content')
title = title[:title.find('Alojamiento')-5]
description = self.xpath(response, "//meta[@name='description']", type='attribute', attribute='content')
price = self.xpath(response, "//div[@class='nonRap']/strong", type='text')[1:]
numberReviewsTripadvisor = split_try(self.xpath(response, "//span[@itemprop='reviewCount']", type='text'), 0)
if numberReviewsTripadvisor:
numberReviewsTripadvisor = split_try(numberReviewsTripadvisor, 0)
mainBubblesTripadvisor = self.xpath(response, "//meta[@itemprop='ratingValue']", type='attribute', attribute='content')
bathrooms = split_try(self.xpath(response, ".//i[@class='icon icon-bath']/..", type='text'), 0)
type_residence = self.xpath(response, "//i[@class='icon icon-house']/parent::li", type='text')
capacity = self.xpath(response, "//i[@class='icon icon-sleeps']/../span", type='text', pos_array=0)
min_stay = self.xpath(response, "//i[@class='icon icon-min-stay']/../span", type='text')
if min_stay:
try:
min_stay = int(min_stay)
except:
min_stay = 'Varía'.decode('UTF-8')
rooms = self.xpath(response, "//i[@class='icon icon-bed']/../span[1]", type='text')
tourist_license = self.xpath(response, "//p[@class='touristLicence']", type='text',pos_extract=1).strip()
attributes_response = self.xpath(response, "//dl[@class='group']", type='object', pos_array=0)
attributes_name = self.get_attributes_array_tripadvisor_rentals(self.xpath(attributes_response,".//dt", type='text', pos_extract=None))
attributes_value = self.get_attributes_array_tripadvisor_rentals(self.xpath(attributes_response,".//dd", type='text', pos_extract=None))
attributes = self.get_attribute_tripadvisor_rentals(attributes_name,attributes_value)
list_attributes = {'response_rate':'','years_advertising':'','last_update':'', 'average_response_time':''}
for key in attributes:
list_attributes.update({key:attributes[key]})
item['id'] = response.meta['id']
item['url'] = response.url
item['title'] = title
item['description'] = description
item['type_residence'] = type_residence
item['rooms'] = rooms
item['min_stay'] = min_stay
item['price'] = price
item['capacity'] = capacity
item['bathrooms'] = bathrooms
item['number_reviews_tripadvisor'] = numberReviewsTripadvisor
item['main_bubbles_tripadvisor'] = mainBubblesTripadvisor
item['tourist_license'] = tourist_license
item['average_response_time'] = list_attributes['average_response_time']
item['response_rate'] = list_attributes['response_rate']
item['years_advertising'] = list_attributes['years_advertising']
item['last_update'] = list_attributes['last_update']
item['lat'] = response.meta['lat']
item['lng'] = response.meta['lng']
item['place'] = response.meta['place']
item['place_searched'] = self.place
self.check_item(item, ListNiumbaHomeRequiredFields())
self.update_database(item, ELASTICSEARCH_INDEX, ELASTICSEARCH_DOC_TYPE, self.first_searched)
if self.first_searched:
self.first_searched = False
return item
| [
"vituin@ull.edu.es"
] | vituin@ull.edu.es |
2fd5e2233fe2d48b72531aea174a8ac70305cd77 | d61e5d9a63bb81bef9473c34ab8eb13757f78159 | /simplecloud/task/utils.py | 7a281cceed9acc95c5af79ff4e4786d0097ca29b | [
"Apache-2.0"
] | permissive | lzufalcon/simplecloud | 2c3bfbb60ce9b2adaf5baec942a7fdb267ccd521 | 6cee961ece603ee772db3b62c9995569df40db6b | refs/heads/master | 2020-05-27T12:23:46.329622 | 2013-08-11T09:08:32 | 2013-08-11T09:08:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | # -*- coding: utf-8 -*-
from flask import current_app, flash
from flask.ext.login import current_user
from .models import Task
from .constants import TASK_SUCCESS, TASK_FAILED, TASK_STATUS
from ..extensions import db
def old_log_task(name, status=TASK_SUCCESS):
current_app.logger.info("%s: status=%s" % (name, TASK_STATUS[status]))
task = Task(name=name, owner_id=current_user.id, status_code=status)
db.session.add(task)
db.session.commit()
def log_task(name, status=TASK_SUCCESS, message=None):
current_app.logger.info("%s: status=%s" % (name, TASK_STATUS[status]))
task = Task(name=name, owner_id=current_user.id, status_code=status)
db.session.add(task)
db.session.commit()
if status == TASK_SUCCESS and message:
flash(message, "success")
if status == TASK_FAILED and message:
flash(message, "error")
| [
"sjtushi@gmail.com"
] | sjtushi@gmail.com |
c7d5a68d39e961ee4c9ec10435be6e35e0e1993f | dae73129f2de86ecd7d75c269ce4864e6e3fc5ec | /core/core.py | 8f2f4db83825f7bb22f449481b1ba0b6380a18fb | [] | no_license | Vrbikk/pyvision | 97adce7302ac73914d549f3d8c1556137d298441 | 9e30d11dddecb1d9cb40f631f36e560440473cb5 | refs/heads/master | 2021-01-15T19:59:42.944924 | 2016-09-17T21:59:37 | 2016-09-17T21:59:37 | 68,334,475 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import logging
from loader import *
from classes import nvr
from xml.dom import minidom
from tools import mail
import requests
import xml.etree.ElementTree as ET
from core import mining
logger = logging.getLogger("core")
def scan():
logger.info("Starting scan procedure")
nvr_list = loader.load()
for i in range(0, len(nvr_list)):
nvr_list[i].status = mining.get_device_status(nvr_list[i])
if nvr_list[i].status == "ONLINE":
nvr_list[i].uptime = mining.get_device_uptime(nvr_list[i])
logger.info('Finished scan procedure')
logger.info("Sending mail report")
mail.send_mail(nvr_list)
| [
"tondavrba@gmail.com"
] | tondavrba@gmail.com |
1a97333c01f4adb419bcb769aca382312cd6fb35 | c27dc28f5a644138deaabfcb51c4fdf18f406a1b | /report2csv.py | c12d9a40f4ed8d02dbf6c0583c2cff919d8adab9 | [] | no_license | chandlee/report2csv | caef74c64e2fd9f2774d9f65d8104237e70e55df | af0fbe399bd379fbc8821a3b2ed26cb4f40cf443 | refs/heads/master | 2021-01-18T08:37:07.402730 | 2014-04-22T20:00:22 | 2014-04-22T20:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,510 | py | #-------------------------------------------------------------------------------
# Name: report2csv
# Purpose:
#
# Author: krh5058
#
# Created: 22/04/2014
# Copyright: (c) krh5058 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import csv
import os
import re
import sys
def write_csv(file,list):
try:
file.writerow(list)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def main(argv):
try:
f = open('reports/140422.html', 'r')
csvfile = open('140422.csv', 'w', newline='')
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
try:
## filedir = os.path.join(os.path.dirname(__file__), 'reports/')
head = ['pi-id', 'project-id', 'billable-approved', 'billable-used', 'billable-remaining', 'in-kind-session-approved', 'in-kind-session-used', 'in-kind-session-remaining', 'in-kind-other-approved', 'in-kind-other-used', 'in-kind-other-remaining']
fwrite = csv.writer(csvfile, delimiter=',')
write_csv(fwrite,head)
projn = 0
storeswitch = False;
l=[]
for line in f:
## print(line)
if re.search(r'ProjectID',line):
projn = projn + 1
proj = re.search('\w{3}\d{1,4}_\w{3,4}',line)
if proj:
l.append(re.search('\w{3}\d{1,4}',proj.group(0)).group(0))
print(l[0])
l.append(proj.group(0))
print(l[1])
elif re.search(r'<TABLE',line):
if projn>0:
storeswitch = True;
write_l = l
print(storeswitch)
elif re.search(r'</TABLE>',line):
if storeswitch:
write_csv(fwrite,write_l)
l=[]
write_l=[]
storeswitch = False;
print(storeswitch)
else:
dat = re.search('[-]?\d+[.]{1}\d{2}',line)
if dat:
print(dat.group(0))
if storeswitch:
write_l.append(dat.group(0))
## if storeswitch:
## l.append(
print(projn, ' matches found.')
print('Done!')
finally:
f.close()
csvfile.close()
if __name__ == '__main__':
main(sys.argv)
| [
"ken.r.hwang@gmail.com"
] | ken.r.hwang@gmail.com |
a2663b753ceff0ae88b8d0cd9dff97879cc47b8e | 8bef00a7980955714ec7fe7ea3c6ab152d5a3d89 | /ERP_project/dashboard_auth/urls.py | 36fabfe63c942e2661a0b4cbde2d4a242535d214 | [] | no_license | debajyoti1998/django_ERP | 3cbd558538d1946ba526b44b586d522fb992ebd8 | 3e487d00e8cc985f97dca6e35243da45979d3437 | refs/heads/main | 2023-02-12T16:25:36.039141 | 2021-01-07T15:42:36 | 2021-01-07T15:42:36 | 320,846,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.dashboard, name='dashboard'),
path('login', views.login, name='login'),
path('signup', views.signup, name='signup'),
# path('products', views.products, name='products'),
path('logout', views.logout, name='logout'),
] | [
"debajyotipanda1998@gmail.com"
] | debajyotipanda1998@gmail.com |
f7aee6e9c0dfeb0f4fcc072449878a3ad954db47 | b2f0e4b1578956875d88f64ed0f67e3067659353 | /dnazip/__init__.py | bcfaac4900e4591f76da7e6caef06c00d1d61e38 | [] | no_license | Bartvelp/dnazip | 491cb92bc1a1384194e638969d5f573f7900cc05 | 0450c5dda00bde05ae6ecea7eca1695aad6b4cec | refs/heads/master | 2022-03-16T16:51:40.426100 | 2019-10-22T13:30:46 | 2019-10-22T13:30:46 | 213,034,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | from .compress import compress
from .decompress import decompress | [
"bart.grosman@wur.nl"
] | bart.grosman@wur.nl |
0508646f006714131ef5208da636c23d6fd84ed5 | d7b38f240337bfb4ddac589f2f9d75d6e7809503 | /ReadTeInfoData.py | 34cd0380d46fa264cca0c3359d8b63ebcf4c8601 | [] | no_license | lipiny/ao | 969bb17f0ec3e57a75bc7e1f85a5ed3695786cd5 | 8b0553c977304e583703f2dc00a5b5d534fac03d | refs/heads/master | 2020-06-17T06:12:00.047831 | 2017-01-23T06:26:34 | 2017-01-23T06:26:34 | 75,035,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,622 | py | # -*- coding: utf-8 -*-
import numpy as np
import os
import io
import random
#from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
#order = 'all' parameter:none
#order = 'continuous' parameter:begin, rescale_num
#order = 'backorder' parameter:rescale_num
#order = 'random' parameter:begin, end, rescale_num
def readfile_te_infodata(order, begin, end, rescale_num, filename_info, filename_data, training_term_list):
#---data read---
#----begin----
#===============================
#check file existance and read data
#===============================
if (os.path.exists(filename_info)) is False:
print('file %s not exist'%filename_info)
exit()
fileRead = io.open(filename_info, 'r', encoding='utf8')
lines_info = fileRead.readlines()
fileRead.close()
#===============================
#store titles and categories in two array and return
#===============================
total_art_num = len(lines_info)
if ( order == 'all' ):
art_num = total_art_num
sample_docs = []
for i in range(0, art_num):
sample_docs.append(i)
elif ( order == 'continuous' ):
art_num = rescale_num
sample_docs = []
for i in range(begin, begin + rescale_num + 1):
sample_docs.append(i)
elif ( order == 'backorder' ):
art_num = rescale_num
sample_docs = []
for i in range(total_art_num - rescale_num, total_art_num):
sample_docs.append(i)
# in mode random, the article No.end is not included.
elif ( order == 'random' ):
art_num = rescale_num
if ( end - begin + 1 >= rescale_num ):
sample_docs=random.sample(range(begin,end+1),rescale_num)
else:
print("error: wrong mode setting in <readfile_tr_infodata>!")
exit();
title=['' for i in range(0,art_num)]
categorie=['' for i in range(0,art_num)]
for i in range(0,art_num):
count = 0
while((lines_info[sample_docs[i]][count] == '\t') is False):
count = count + 1
title[i]=lines_info[sample_docs[i]][0:count]
categorie[i]=lines_info[sample_docs[i]][count+1:]
#---info read---
#----finish----
#---data read---
#----begin----
#===============================
#check file existance and read data
#===============================
if (os.path.exists(filename_data)) is False:
print('file %s not exist'%filename_data)
exit()
fileRead = io.open(filename_data, 'r', encoding='utf8')
lines_data = fileRead.readlines()
fileRead.close()
#===============================
#CSR format
#result indptr, indices, data
#===============================
indptr = np.array([0])
indices = np.array([])
data = np.array([])
for i in range(0,art_num):
index_1 = 0
index_2 = 0
judge = 0
num_words = 0
indptr_tmp = 0
indices_tmp = -1
data_tmp = -1
#---indptr
while((lines_data[sample_docs[i]][index_2] == ' ') is False):
index_2 = index_2 + 1
indptr_tmp = int(lines_data[sample_docs[i]][index_1:index_2])
num_words = int(indptr_tmp)
for j in range(0, num_words):
index_2 = index_2 + 1
index_1 = index_2
#---indices
while((lines_data[sample_docs[i]][index_2] == ':') is False):
index_2 = index_2 + 1
indices_tmp=int(lines_data[sample_docs[i]][index_1:index_2])
for k in range(0, len(training_term_list)):
if( indices_tmp == training_term_list[k] ):
judge = 1
indices = np.append(indices, [k])
break
if( judge == 0 ):
#print('judge = 0')
indptr_tmp = indptr_tmp - 1
if( j < num_words -1 ):
while((lines_data[sample_docs[i]][index_2] == ' ') is False):
index_2 = index_2 + 1
elif( judge == 1 ):
#print('judge = 1')
index_2 = index_2 + 1
index_1 = index_2
if(j < num_words - 1):
while((lines_data[sample_docs[i]][index_2] == ' ') is False):
index_2 = index_2 + 1
data_tmp=int(lines_data[sample_docs[i]][index_1:index_2])
elif( j == num_words - 1 ):
data_tmp=int(lines_data[sample_docs[i]][index_1:])
else:
print('error in [ReadTeInfoData.py] with j')
exit()
data = np.append(data, [data_tmp])
else:
print('error in [ReadTeInfoData.py] with judge')
exit()
judge = 0
indptr = np.append(indptr, [indptr[len(indptr)-1] + indptr_tmp])
if(i%20==0):
print('-- article %d in %d read finish'%(i, art_num))
#---info data---
#----finish----
#===============================
#compress to a CSC matrix
#===============================
term_list_CSC_matrix = csc_matrix((data, indices, indptr), shape=(len(training_term_list), art_num))
#print(type(term_list_CSC_matrix))
#term_list_CSR_matrix = term_list_CSC_matrix.tocsr()
#print(type(term_list_CSR_matrix))
return term_list_CSC_matrix, art_num, title, categorie
| [
"summermelody7@hotmail.com"
] | summermelody7@hotmail.com |
d6a2692395d973722c538c781b8fecfa4e62647b | c53fcab99e84ccfe6d9f1455e7471892fbd6661e | /kubeface/commands/copy.py | 63edb408d96fc9594aa6ee83d89f860d51b393cf | [
"Apache-2.0"
] | permissive | proj4spes/kubeface | 3af558ae05f1fd89b2d93e81ce479094ef3f4b8f | 443d7432e6d2f8e4d20b6326e98fabeec7ad68b6 | refs/heads/master | 2021-04-03T02:53:10.284569 | 2017-06-22T19:40:30 | 2017-06-22T19:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | '''
Copy files, including support for google storage buckets.
'''
import sys
import argparse
import logging
from .. import storage
from ..common import configure_logging
from .. import serialization
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("source")
parser.add_argument("destination")
parser.add_argument(
"--no-error",
action="store_true",
default=False,
help="")
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="")
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="")
parser.add_argument(
"--print-deserialized",
action="store_true",
default=False,
help="")
def run(argv=sys.argv[1:]):
args = parser.parse_args(argv)
configure_logging(args)
logging.info("Reading: %s" % args.source)
input_handle = storage.get(args.source)
if args.print_deserialized:
deserialized = serialization.load(input_handle)
input_handle.seek(0)
print(deserialized)
if args.destination == "-":
print(input_handle.read())
else:
logging.info("Writing: %s" % args.destination)
storage.put(args.destination, input_handle)
logging.info("Completed.")
| [
"timodonnell@gmail.com"
] | timodonnell@gmail.com |
843dc125233ca5108cf01dcae4e55797ec9ef84a | c942047b74e1aee3af44dbe1b860c991726a3019 | /tests/test_module.py | cf1bad011afd3d62be8b9f82d5eec14f38b75b57 | [] | no_license | inmanta/ubuntu | 96fe435b91c712990afa520217a9b4a7a401401d | c39e39adc9e9bb4092c557a00f69962ca65a0bdb | refs/heads/master | 2023-08-04T21:54:06.863774 | 2023-07-31T22:49:32 | 2023-07-31T22:49:32 | 68,010,270 | 0 | 0 | null | 2023-09-08T23:49:30 | 2016-09-12T13:14:57 | Python | UTF-8 | Python | false | false | 751 | py | """
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
from pytest_inmanta.plugin import Project
def test_module(project: Project) -> None:
project.compile("import ubuntu")
| [
"noreply@github.com"
] | inmanta.noreply@github.com |
46beb75230bc354dd6d57c72f5e1dc3e5608e6cf | 91849d362c15ffc221b8017454a4a9bd8292ab79 | /clean_data.py | 5546d7b3dd0b5404e4bd050211aa4446b34980ab | [] | no_license | kasturikundu/DA-BC-Project-One | 840f7537affd829c705811e11f8a15f4be3f26ce | ca96d11648e70c35f355b59c2b636ea64d7fa4f2 | refs/heads/master | 2021-02-28T02:58:45.469508 | 2020-03-10T22:33:24 | 2020-03-10T22:33:24 | 245,656,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | print("change 2")
print("change 3") | [
"kasturi.kundu@Kasturi-Kundus-MBP13R-2.local"
] | kasturi.kundu@Kasturi-Kundus-MBP13R-2.local |
4209e5499b98a104adc9693ae8356a5bc01c7ae4 | 30cf02eb3c15da89db2e6efd3d405e92d0c8df36 | /src/pyobo/sources/gwascentral_study.py | 0f00a9432c87ccdffe5a7ed6c2fc786e107b0af4 | [
"MIT"
] | permissive | shunsunsun/pyobo | f53e5e6a4bb0b3ea135312cd8a54c905a52bd754 | 407c8f15873eb84cb5351ccc6e6ae0e8e3add22a | refs/heads/master | 2023-04-04T01:13:16.456853 | 2021-04-05T15:57:33 | 2021-04-05T15:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | # -*- coding: utf-8 -*-
"""Converter for GWAS Central."""
import logging
import tarfile
from typing import Iterable
from xml.etree import ElementTree
from pyobo.struct import Obo, Reference, Term, has_part
from pyobo.utils.path import ensure_path
logger = logging.getLogger(__name__)
VERSION = 'jan2021'
URL = f'http://www.gwascentral.org/docs/GC_{VERSION}.tar.gz'
PREFIX = 'gwascentral.study'
def get_obo():
"""Get GWAS Central Studies as OBO."""
return Obo(
ontology=PREFIX,
name='GWAS Central Study',
iter_terms=iterate_terms,
iter_terms_kwargs=dict(version=VERSION),
data_version=VERSION,
typedefs=[has_part],
auto_generated_by=f'bio2obo:{PREFIX}',
)
def _get_term_from_tree(tree: ElementTree.ElementTree) -> Term:
name = tree.find('name').text
description = tree.find('description').text
if description:
description = description.strip().replace('\n', ' ')
identifier = tree.find('identifier').text
term = Term(
reference=Reference(PREFIX, identifier, name),
definition=description,
)
for experiment in tree.findall('experiments'):
experiment_name = experiment.find('name').text
experiment_id = experiment.find('identifier').text
term.append_relationship(has_part, Reference(
'gwascentral.experiment',
identifier=experiment_id,
name=experiment_name,
))
return term
def iterate_terms(version: str) -> Iterable[Term]:
"""Iterate over GWAS Central Study terms."""
path = ensure_path(PREFIX, url=URL, version=version)
with tarfile.open(path) as tar_file:
for tar_info in tar_file:
if not tar_info.path.endswith('.xml'):
continue
with tar_file.extractfile(tar_info) as file:
try:
tree = ElementTree.parse(file)
except ElementTree.ParseError:
logger.warning('malformed XML in %s', tar_info.path)
continue
yield _get_term_from_tree(tree)
if __name__ == '__main__':
get_obo().write_default()
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
48ac0153509338639aee71aa6570d6b64e685c44 | fd95b15568916e52f89fc7bedcb6d7819c251c71 | /br_test.py | 208e0771ea8c6acd4ad45d50c58a9bb5b5128de4 | [] | no_license | thedanlaabs/br_exercise | 1a9720e9b1363bc9646eaff29577ea20d0cd4e75 | f7e16e2be2439bcb4576e35da35bfa583d1b9fb0 | refs/heads/master | 2020-03-28T20:59:44.593804 | 2018-09-17T12:31:15 | 2018-09-17T12:31:15 | 149,120,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | """
Write a smoke test to verify the functionality of the B/R home page. You may use selenium web-driver to do this.
This doesn’t have to be exhaustive, but feel free to add additional ideas of what could be automated if you are
running out of time.
"""
"""
This test quickly opens the b/r homepage, checks the title, and checks that a link (NFL on the top bar) is working.
The next bits of functionality I'd check would be to confirm an image is loading properly, then see if I could find a
date on the page to confirm I wasn't looking at something cached. Lastly, I'd mess around with the Edit Teams
functionality, but would need to go through a dummy account creation for that.
The only gotcha in my setup is that the chromedriver needs to be sitting in a folder on C:/ (or change the location
in the code below).
"""
import br_page
from selenium import webdriver
driver = webdriver.Chrome("C:\\chromedriver_win32\\chromedriver.exe")
def test_BR():
br = br_page.MainPage(driver)
br.open_page()
assert br.is_title_good()
assert br.is_nfl_link_working()
driver.quit()
test_BR()
| [
"thedanlaabs@gmail.com"
] | thedanlaabs@gmail.com |
e9c5f30f1bc8ea3b6321a8daf805d87181566bb1 | e9ee565cfff9e6b2a1ea6f73368f4a8948274795 | /src/pybel/repository.py | 8fc43dbcd8c9585d14524792df384d3f7421bf00 | [
"MIT"
] | permissive | pybel/pybel | 7e79530b454e23ae48486a5c0e3207744b7fa139 | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | refs/heads/master | 2022-08-26T18:41:25.724850 | 2022-02-11T12:22:35 | 2022-02-11T12:22:35 | 68,376,693 | 133 | 40 | MIT | 2022-02-11T12:11:24 | 2016-09-16T12:09:49 | Python | UTF-8 | Python | false | false | 18,905 | py | # -*- coding: utf-8 -*-
"""Utilities for BEL repositories."""
import json
import logging
import os
import sys
import time
from dataclasses import dataclass, field
from itertools import chain
from typing import Any, Iterable, Mapping, Optional, Set, TextIO, Tuple, Union
import click
import pandas as pd
from tqdm.autonotebook import tqdm
from .cli import (
connection_option,
host_option,
password_option,
user_option,
verbose_option,
)
from .constants import CITATION
from .io import from_bel_script, to_bel_commons, to_indra_statements
from .io.api import dump, load
from .manager import Manager
from .manager.citation_utils import enrich_pubmed_citations
from .struct import BELGraph
from .struct.operations import union
from .version import get_version
__all__ = [
"BELMetadata",
"BELRepository",
"append_click_group",
]
logger = logging.getLogger(__name__)
private_option = click.option("--private", is_flag=True)
OUTPUT_KWARGS = {
"nodelink.json": dict(indent=2, sort_keys=True),
"cx.json": dict(indent=2, sort_keys=True),
"jgif.json": dict(indent=2, sort_keys=True),
}
@dataclass
class BELMetadata:
"""A container for BEL document metadata."""
name: Optional[str] = None
version: Optional[str] = None
description: Optional[str] = None
authors: Optional[str] = None
contact: Optional[str] = None
license: Optional[str] = None
copyright: Optional[str] = None
disclaimer: Optional[str] = None
def new(self) -> BELGraph:
"""Generate a new BEL graph with the given metadata."""
graph = BELGraph()
self.update(graph)
return graph
def update(self, graph: BELGraph) -> None:
"""Update the BEL graph's metadata."""
if self.name:
graph.name = self.name
if self.version:
graph.version = self.version
if self.authors:
graph.authors = self.authors
if self.description:
graph.description = self.description
if self.contact:
graph.contact = self.contact
if self.license:
graph.licenses = self.license
if self.copyright:
graph.copyright = self.copyright
if self.disclaimer:
graph.disclaimer = self.disclaimer
@dataclass
class BELRepository:
"""A container for a BEL repository."""
directory: str
output_directory: Optional[str] = None
bel_cache_name: str = "_cache.bel"
metadata: Optional[BELMetadata] = None
formats: Tuple[str, ...] = ("pickle", "nodelink.json")
#: Must include {file_name} and {extension}
cache_fmt: str = "{file_name}.{extension}"
global_summary_ext: str = "summary.tsv"
warnings_ext: str = "warnings.tsv"
#: Arguments passed to :func:`pybel.from_path` during compilation
from_path_kwargs: Mapping[str, Any] = field(default_factory=dict)
#: The location where the summary DataFrame will be output as a TSV.
bel_summary_path: str = field(init=False)
def __post_init__(self) -> None: # noqa: D105
if self.output_directory is None:
self.output_directory = self.directory
self.bel_summary_path = self._build_cache_ext_path(
root=self.output_directory,
file_name=self.bel_cache_name,
extension=self.global_summary_ext.lstrip("."),
)
def _get_global_cache_path_by_extension(self, extension: str) -> str:
return self._build_cache_ext_path(self.output_directory, self.bel_cache_name, extension)
def _build_warnings_path(self, root: str, file_name: str) -> str:
return self._build_cache_ext_path(root, file_name, self.warnings_ext.lstrip("."))
def _build_summary_path(self, root: str, file_name: str) -> str:
return self._build_cache_ext_path(root, file_name, "summary.json")
def _build_cache_ext_path(self, root: str, file_name: str, extension: str) -> str:
return os.path.join(
root,
self.cache_fmt.format(file_name=file_name, extension=extension.lstrip(".")),
)
def walk(self) -> Iterable[Tuple[str, Iterable[str], Iterable[str]]]:
"""Recursively walk this directory."""
return os.walk(self.directory)
def iterate_bel(self) -> Iterable[Tuple[str, str]]:
"""Yield all paths to BEL documents."""
for root, _dirs, file_names in self.walk():
for file_name in sorted(file_names):
if not file_name.startswith("_") and file_name.endswith(".bel"):
yield root, file_name
def clear_global_cache(self) -> None:
"""Clear the global cache."""
self._remove_root_file_name(self.output_directory, self.bel_cache_name)
def clear_local_caches(self) -> None:
"""Clear all caches of BEL documents in the repository."""
for root, file_name in self.iterate_bel():
self._remove_root_file_name(root, file_name)
def clear_local_warned(self) -> None:
"""Clear caches for BEL documents with errors."""
for root, file_name in self.iterate_bel():
if self._has_warnings(root, file_name):
self._remove_root_file_name(root, file_name)
def _has_warnings(self, root: str, file_name: str) -> bool:
return os.path.exists(self._build_warnings_path(root, file_name))
def _remove_root_file_name(self, root: str, file_name: str) -> None:
for _, path in self._iterate_extension_path(root, file_name):
if os.path.exists(path):
os.remove(path)
def _iterate_extension_path(self, root: str, file_name: str) -> Iterable[Tuple[str, str]]:
for extension in self.formats:
yield extension, self._build_cache_ext_path(root, file_name, extension)
def _import_local(self, root: str, file_name: str) -> Optional[BELGraph]:
for _, path in self._iterate_extension_path(root, file_name):
if os.path.exists(path):
return load(path)
return None
def _import_global(self) -> Optional[BELGraph]:
return self._import_local(self.output_directory, self.bel_cache_name)
def _export_local(self, graph: BELGraph, root: str, file_name: str) -> None:
for extension, path in self._iterate_extension_path(root, file_name):
kwargs = OUTPUT_KWARGS.get(extension, {})
dump(graph, path, **kwargs)
with open(self._build_summary_path(root, file_name), "w") as file:
json.dump(graph.summarize.dict(), file, indent=2)
if graph.warnings:
logger.info(f" - {graph.number_of_warnings()} warnings")
warnings_path = self._build_warnings_path(root, file_name)
warnings_df = pd.DataFrame(
[
(
exc.line_number,
exc.position,
exc.line,
exc.__class__.__name__,
str(exc),
)
for _, exc, _ in graph.warnings
],
columns=["Line Number", "Position", "Line", "Error", "Message"],
)
warnings_df.to_csv(warnings_path, sep="\t", index=False)
def _export_global(self, graph: BELGraph) -> None:
self._export_local(graph, self.output_directory, self.bel_cache_name)
def get_graph(
self,
manager: Optional[Manager] = None,
use_cached: bool = True,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
from_path_kwargs: Optional[Mapping[str, Any]] = None,
) -> BELGraph:
"""Get a combine graph."""
if use_cached:
graph = self._import_global()
if graph is not None:
return graph
graphs = self.get_graphs(
manager=manager,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
from_path_kwargs=from_path_kwargs,
)
graph = union(graphs.values())
if self.metadata is not None:
self.metadata.update(graph)
self._get_summary_df_from_graphs(graphs)
self._export_global(graph)
return graph
def get_indra_statements(self, **kwargs):
"""Get INDRA statements for all graphs.
:rtype: List[indra.statements.Statement]
"""
return list(chain.from_iterable(to_indra_statements(graph) for graph in self.get_graphs(**kwargs).values()))
def get_graphs(
self,
manager: Optional[Manager] = None,
use_cached: bool = True,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
from_path_kwargs: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, BELGraph]:
"""Get a mapping of all graphs' paths to their compiled BEL graphs."""
if manager is None:
manager = Manager()
paths = self.iterate_bel()
if use_tqdm:
paths = tqdm(list(paths), **(tqdm_kwargs or {}))
rv = {}
for root, file_name in paths:
path = os.path.join(root, file_name)
if use_cached:
graph = self._import_local(root, file_name)
if graph is not None:
rv[path] = graph
continue
_from_path_kwargs = from_path_kwargs or {}
_from_path_kwargs.update(self.from_path_kwargs)
try:
graph = rv[path] = from_bel_script(path, manager=manager, **_from_path_kwargs)
graph.path = os.path.relpath(os.path.join(root, file_name), self.directory)
except Exception as exc:
logger.warning(f"problem with {path}: {exc}")
continue
enrich_pubmed_citations(graph=graph, manager=manager)
self._export_local(graph, root, file_name)
return rv
def get_summary_df(
self,
manager: Optional[Manager] = None,
use_cached: bool = False,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
from_path_kwargs: Optional[Mapping[str, Any]] = None,
save: Union[bool, str, TextIO] = True,
) -> pd.DataFrame:
"""Get a pandas DataFrame summarizing the contents of all graphs in the repository."""
graphs = self.get_graphs(
manager=manager,
use_cached=use_cached,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
from_path_kwargs=from_path_kwargs,
)
return self._get_summary_df_from_graphs(graphs, save=save)
def _get_summary_df_from_graphs(self, graphs, save: Union[str, bool, TextIO] = True):
summary_dicts = {
os.path.relpath(path, self.directory): graph.summarize.dict() for path, graph in graphs.items()
}
df = pd.DataFrame.from_dict(summary_dicts, orient="index")
if isinstance(save, str):
df.to_csv(save, sep="\t")
elif save:
df.to_csv(self.bel_summary_path, sep="\t")
return df
def build_cli(self): # noqa: D202
"""Build a command line interface."""
@click.group(help=f"Tools for the BEL repository at {self.directory} using PyBEL v{get_version()}")
@click.pass_context
def main(ctx):
"""Group the commands."""
ctx.obj = self
append_click_group(main)
return main
def get_extensions(self, root: str, file_name: str) -> Set[str]:
"""Get all compiled files for the given BEL."""
# TODO check that this is a valid BEL path!
return {extension for extension, path in self._iterate_extension_path(root, file_name) if os.path.exists(path)}
def _get_global_caches(self):
return self.get_extensions(self.output_directory, self.bel_cache_name)
def _iterate_citations(self, **kwargs) -> Iterable[Tuple[str, str]]:
"""List all citations in documents in this repository."""
for _, _, data in self.get_graph(**kwargs).edges(data=True):
citation = data.get(CITATION)
if citation is not None:
yield citation.namespace, citation.identifier
def _write_caches(bel_repository: BELRepository, root: str, file_name: str):
extensions = ", ".join(sorted(bel_repository.get_extensions(root, file_name)))
has_warnings = os.path.exists(bel_repository._build_warnings_path(root, file_name))
try:
with open(bel_repository._build_summary_path(root, file_name)) as file:
summary = json.load(file)
except FileNotFoundError:
summary = None
if extensions and has_warnings:
s = click.style("✘️ ", fg="red")
elif extensions and not has_warnings:
s = click.style("✔︎ ", fg="green")
else:
s = click.style("? ", fg="yellow", bold=True)
path = os.path.join(root, file_name)
s += os.path.relpath(path, bel_repository.directory)
if extensions:
s += click.style(f" ({extensions})", fg="green")
if summary:
s += click.style(
f' ({summary["Number of Nodes"]} nodes, {summary["Number of Edges"]} edges)',
fg="blue",
)
click.echo(s)
def append_click_group(group: click.Group) -> None: # noqa: D202, C901
"""Append a :py:class:`click.Group`."""
@group.command()
@click.pass_obj
def ls(bel_repository: BELRepository):
"""List the contents of the repository."""
global_caches = bel_repository._get_global_caches()
if global_caches:
click.secho("Global Cache", fg="red", bold=True)
_write_caches(
bel_repository,
bel_repository.output_directory,
bel_repository.bel_cache_name,
)
click.secho("Local Caches", fg="red", bold=True)
for root, file_name in bel_repository.iterate_bel():
_write_caches(bel_repository, root, file_name)
@group.command()
@click.pass_obj
def citations(repository: BELRepository):
"""List citations in the repository."""
for database, reference in sorted(set(repository._iterate_citations(use_tqdm=True)), key=lambda x: int(x[1])):
click.echo(f"{database}\t{reference}")
@group.command()
@host_option
@user_option
@password_option
@click.option("-s", "--sleep", type=int, default=3, help="Seconds to sleep between sending")
@private_option
@click.pass_obj
def upload_separate(
repository: BELRepository,
host: str,
user: str,
password: str,
sleep: int,
private: bool,
):
"""Upload all to BEL Commons."""
it = tqdm(repository.get_graphs().items())
for name, graph in it:
res = to_bel_commons(graph, host=host, user=user, password=password, public=not private)
res_json = res.json()
task_id = res_json.get("task_id")
if task_id is not None:
it.write(f"task:{task_id} - {name}")
it.write(f'see: {host.rstrip("/")}/api/task/{task_id}')
time.sleep(sleep)
else:
it.write(f"problem with {name}: {res_json}")
@group.command()
@host_option
@user_option
@password_option
@private_option
@click.pass_obj
def upload_combine(repository: BELRepository, host: str, user: str, password: str, private: bool):
"""Upload the combine graph."""
graph = repository.get_graph()
res = to_bel_commons(graph, host=host, user=user, password=password, public=not private)
res_json = res.json()
task_id = res_json.get("task_id")
if task_id is not None:
click.echo(f"task:{task_id} - {graph}")
click.echo(f'see: {host.rstrip("/")}/api/task/{task_id}')
else:
click.echo(f"problem with {graph.name}: {res_json}")
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache(bel_repository: BELRepository):
"""Clear the cached data for the repository."""
bel_repository.clear_global_cache()
bel_repository.clear_local_caches()
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache_global(bel_repository: BELRepository):
"""Clear the cached data for the repository."""
bel_repository.clear_global_cache()
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache_local(bel_repository: BELRepository):
"""Clear the cached data for the repository."""
bel_repository.clear_local_caches()
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache_warned(bel_repository: BELRepository):
"""Clear the cached data for the documents that have warnings."""
bel_repository.clear_local_warned()
@group.command()
@connection_option
@click.option("-r", "--reload", is_flag=True)
@click.option("--no-tqdm", is_flag=True)
@verbose_option
@click.pass_obj
def compile(bel_repository: BELRepository, connection: str, reload: bool, no_tqdm: bool):
"""Summarize the repository."""
if reload:
bel_repository.clear_global_cache()
bel_repository.clear_local_caches()
manager = Manager(connection=connection)
graph = bel_repository.get_graph(
manager=manager,
use_cached=(not reload),
use_tqdm=(not no_tqdm),
tqdm_kwargs=dict(
desc="Loading BEL",
leave=False,
),
from_path_kwargs=dict(
use_tqdm=(not no_tqdm),
tqdm_kwargs=dict(
leave=False,
),
),
)
click.echo(graph.summarize.str())
@group.command()
@click.argument("file", type=click.File("w"))
@click.pass_obj
def html(bel_repository: BELRepository, file: TextIO):
"""Output an HTML summary."""
graph = bel_repository.get_graph()
try:
from pybel_tools.assembler.html import to_html_file
except ImportError:
click.secho("pybel_tools.assembler.html is not available", fg="red")
sys.exit(1)
else:
to_html_file(graph, file)
@click.group()
@click.version_option()
@click.option(
"-d",
"--directory",
default=os.getcwd(),
type=click.Path(file_okay=False, dir_okay=True, exists=True),
help="Defaults to current working directory",
)
@click.pass_context
def main(ctx, directory: str):
"""Command line interface for bel-repository."""
ctx.obj = BELRepository(directory=directory)
append_click_group(main)
if __name__ == "__main__":
main()
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
4a4aedbcae688967b5b85e60e73a727908c934a5 | 4fc016459e4c78680c61488c771eb6b7eb20d5fe | /Python-Algorithms-DataStructure/src/leet/104_MaximumDepthofBinaryTree.py | f6cf6d7dcb193f73f277665a5f23cbafd59f85b0 | [] | no_license | coremedy/Python-Algorithms-DataStructure | 7c318de68fd9694377a0a4369d8dbeb49e1e17aa | 3873502679a5def6af4be03028542f07d059d1a9 | refs/heads/master | 2021-01-25T07:34:17.714241 | 2015-11-05T10:17:40 | 2015-11-05T10:17:40 | 27,949,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | '''
Created on 2015-08-02
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer}
def maxDepth(self, root):
if root is None:
return 0
return self.DFS(0, root)
def DFS(self, depth, node):
if node is None:
return depth
return max(self.DFS(depth + 1, node.left), self.DFS(depth + 1, node.right))
if __name__ == '__main__':
pass | [
"coremedy@hotmail.com"
] | coremedy@hotmail.com |
6962525e8e33fe4066d13652ab2d270efb038cae | b9a28995df0bd5507c4a2be523606bcfce04db84 | /login/login.py | b24f5cefa2ee29d87df2d71f20e6f5bc6b2646e7 | [] | no_license | camikaze99/Messaging-Application | f12623269c707d4ad9a7127e105498def708a32d | b3c829d525166d980e24dd481680ac5fbb110298 | refs/heads/master | 2020-07-18T15:05:11.713160 | 2019-10-03T20:47:00 | 2019-10-03T20:47:00 | 206,266,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | from flask import Flask, render_template, redirect, url_for, request, session, flash
from functools import wraps
import os
loginpage = Flask(__name__)
loginpage.secret_key = os.urandom(24)
def login_required(f):
@wraps(f)
def wrap(*args,**kwargs):
if 'logged_in' in session:
return f(*args,**kwargs)
else:
return redirect(url_for('login'))
return wrap
@loginpage.route('/')
@login_required
def home():
return redirect(url_for("login"))
@loginpage.route('/index')
@login_required
def index():
return render_template('index.html')
@loginpage.route('/register')
def register():
return render_template('register.html')
@loginpage.route('/termsandprivacy')
def termsandprivacy():
return render_template('termsandprivacy.html')
@loginpage.route('/login', methods=['GET', 'POST'])
def login():
error = None
usernames=['admin','bob','kees','nick','james']
passwords=['admin10','bob10','kees10','nick10','james10']
if request.method == 'POST':
if request.form['username'] in usernames and request.form['password'] in passwords:
session['logged_in'] = True
return redirect(url_for('index'))
else:
error = 'Invalid credentials'
return render_template('login.html', error=error)
@loginpage.route('/logout')
def logout():
session.pop('logged_in', None)
return redirect(url_for('home'))
if __name__ == '__main__':
loginpage.run(debug=True)
| [
"noreply@github.com"
] | camikaze99.noreply@github.com |
7526e1a07f83c8b237e6f892e95f0b2f235bb4b0 | 8fb5319079f3d9a5524a4fa44dc9fdeb4e578a33 | /Contours/counting_coins.py | 3d0b8461f126be4dabeaf660096bdf9d2180144c | [] | no_license | KhairulIzwan/Python-OpenCV-Basics | 1dc414a07d25b2800f3a6f4eb7edf375e891b92b | 2bcf3536c9d5225188dce7c081600459a7b1ebb0 | refs/heads/main | 2023-04-26T17:37:10.838035 | 2021-05-23T03:11:36 | 2021-05-23T03:11:36 | 369,949,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | #!/usr/bin/env python
import numpy as np
import argparse
import cv2
# use argparse to handle parsing our command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
cv2.imshow("Image", image)
edged = cv2.Canny(blurred, 30, 150)
cv2.imshow("Edges", edged)
_, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print "I count %d coins in this image" % (len(cnts))
coins = image.copy()
cv2.drawContours(coins, cnts, -1, (0, 255, 0), 2)
cv2.imshow("Coins", coins)
cv2.waitKey(0)
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
# print "Coin #%d" % (i + 1)
coinBar = "Coin #%d" % (i + 1)
coin = image[y:y + h, x:x + w]
cv2.imshow(coinBar, coin)
mask = np.zeros(image.shape[:2], dtype = "uint8")
((centerX, centerY), radius) = cv2.minEnclosingCircle(c)
cv2.circle(mask, (int(centerX), int(centerY)), int(radius), 255, -1)
mask = mask[y:y + h, x:x + w]
cv2.imshow("Masked Coin", cv2.bitwise_and(coin, coin, mask=mask))
cv2.waitKey(0)
cv2.destroyWindow(coinBar)
| [
"wansnap@gmail.com"
] | wansnap@gmail.com |
f38e731a213eca630b28a42646f8fa716aa07cb0 | 06f51ad6b75c4325631a7e0017c95c0b59834283 | /Transformer/nmt1.py | 182b217258149a156b41c60b207a0dae2a49a653 | [] | no_license | drzqb/NLP | 3ce01a88e5d582391677a6c1a1b4217faeb494f9 | 69fe353aa3c4e7fcdc056bf3ac48691f4579f770 | refs/heads/master | 2021-08-01T01:53:58.204967 | 2021-07-29T17:13:58 | 2021-07-29T17:13:58 | 161,282,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,861 | py | '''
Transformer model for neural translation
'''
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
import sys
import pickle
import os
from nltk.tokenize import WordPunctTokenizer
tf.flags.DEFINE_integer('maxword', 30, 'max length of any sentences')
tf.flags.DEFINE_integer('block', 2, 'number of Encoder submodel')
tf.flags.DEFINE_integer('head', 8, 'number of multi_head attention')
tf.flags.DEFINE_string('model_save_path', 'model/', 'The path where model shall be saved')
tf.flags.DEFINE_integer('batch_size', 32, 'Batch size during training')
tf.flags.DEFINE_integer('epochs', 50000, 'Epochs during training')
tf.flags.DEFINE_float('lr', 0.0001, 'Initial learing rate')
tf.flags.DEFINE_integer('embedding_en_size', 512, 'Embedding size for english words')
tf.flags.DEFINE_integer('embedding_ch_size', 512, 'Embedding size for chinese words')
tf.flags.DEFINE_boolean('graph_write', True, 'whether the compute graph is written to logs file')
tf.flags.DEFINE_float('keep_prob', 0.5, 'The probility used to dropout')
tf.flags.DEFINE_string('mode', 'train0', 'The mode of train or predict as follows: '
'train0: train first time or retrain'
'train1: continue train'
'predict: predict')
tf.flags.DEFINE_integer('per_save', 10, 'save model for every per_save')
FLAGS = tf.flags.FLAGS
def layer_norm(x, scale, bias, epsilon=1.0e-8):
"""Layer norm raw computation."""
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def softmax(A, Mask):
'''
:param A: B*ML1*ML2
:param Mask: B*ML1*ML2
:return: C
'''
C = tf.exp(A)
Cf = tf.zeros_like(C)
C = tf.where(Mask, C, Cf)
Cs = tf.reduce_sum(C, axis=-1, keepdims=True)
C = tf.div(C, Cs)
return C
class Transformer():
'''
Transformer模型
go: start token
eos: end token
l_dict_en: number of word in english dictionary
l_dict_ch: number of word in chinese dictionary
config: parameters for shell
'''
def __init__(self, go=0, eos=1, l_dict_en=1000, l_dict_ch=1000, config=FLAGS):
self.go = go
self.eos = eos
self.l_dict_en = l_dict_en
self.l_dict_ch = l_dict_ch
self.config = config
# 建立seq2seq的tensorflow模型
def build_model(self):
with tf.name_scope('Input'):
encoder_inputs = tf.placeholder(shape=[None, None], dtype=tf.int32, name='encoder_inputs')
decoder_inputs = tf.placeholder(shape=[None, None], dtype=tf.int32, name='decoder_inputs')
decoder_targets = tf.placeholder(shape=[None, None], dtype=tf.int32, name='decoder_targets')
encoder_length = tf.placeholder(shape=[None], dtype=tf.int32, name='encoder_length')
decoder_length = tf.placeholder(shape=[None], dtype=tf.int32, name='decoder_length')
max_encoder_length = tf.reduce_max(encoder_length, name='max_encoder_length')
max_decoder_length = tf.reduce_max(decoder_length, name='max_decoder_length')
encoder_pos = tf.placeholder(tf.float32, [None, self.config.embedding_en_size], name='encoder_position')
decoder_pos = tf.placeholder(tf.float32, [None, self.config.embedding_ch_size], name='decoder_position')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
padding_mask_encoder = tf.tile(tf.expand_dims(tf.greater(encoder_inputs, 0), 1),
[self.config.head, tf.shape(encoder_inputs)[1], 1])
padding_mask_decoder = tf.tile(tf.expand_dims(tf.greater(decoder_inputs, 0), 1),
[self.config.head, tf.shape(decoder_inputs)[1], 1])
padding_mask_decoder_encoder = tf.tile(tf.expand_dims(tf.greater(encoder_inputs, 0), 1),
[self.config.head, tf.shape(decoder_inputs)[1], 1],
name='mask_decoder_encoder')
sequence_mask_decoder = tf.sequence_mask(decoder_length, max_decoder_length)
with tf.name_scope('Embedding'):
embedding_matrix_en = tf.Variable(
tf.random_uniform([self.l_dict_en, self.config.embedding_en_size], -1.0, 1.0),
dtype=tf.float32, name='embedding_matrix_en')
embedding_matrix_ch = tf.Variable(
tf.random_uniform([self.l_dict_ch, self.config.embedding_ch_size], -1.0, 1.0),
dtype=tf.float32, name='embedding_matrix_ch')
encoder_embeded = tf.nn.embedding_lookup(embedding_matrix_en, encoder_inputs)
decoder_embeded = tf.nn.embedding_lookup(embedding_matrix_ch, decoder_inputs)
with tf.variable_scope('encoder'):
encoder_p = tf.nn.dropout(encoder_embeded + encoder_pos[:max_encoder_length], keep_prob)
for block in range(self.config.block):
with tf.variable_scope('selfattention' + str(block)):
WQ = tf.layers.Dense(self.config.embedding_en_size, use_bias=False, name='Q')
WK = tf.layers.Dense(self.config.embedding_en_size, use_bias=False, name='K')
WV = tf.layers.Dense(self.config.embedding_en_size, use_bias=False, name='V')
WO = tf.layers.Dense(self.config.embedding_en_size, use_bias=False, name='O')
Q = tf.concat(tf.split(WQ(encoder_p), self.config.head, axis=-1), axis=0)
K = tf.concat(tf.split(WK(encoder_p), self.config.head, axis=-1), axis=0)
V = tf.concat(tf.split(WV(encoder_p), self.config.head, axis=-1), axis=0)
QK = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) / tf.sqrt(
self.config.embedding_en_size / self.config.head)
Z_p = tf.nn.dropout(WO(tf.concat(
tf.split(tf.matmul(softmax(QK, padding_mask_encoder), V),
self.config.head, axis=0), axis=-1)), keep_prob)
scale_rn = tf.get_variable('scale_rn',
initializer=tf.ones([self.config.embedding_en_size], dtype=tf.float32))
bias_rn = tf.get_variable('bias_rn',
initializer=tf.zeros([self.config.embedding_en_size], dtype=tf.float32))
encoder_p = layer_norm(encoder_p + Z_p, scale_rn, bias_rn)
with tf.variable_scope('feedforward' + str(block)):
ffrelu = tf.layers.Dense(4 * self.config.embedding_en_size, activation=tf.nn.relu, name='ffrelu')
ff = tf.layers.Dense(self.config.embedding_en_size, name='ff')
scale_ff = tf.get_variable('scale_ff',
initializer=tf.ones([self.config.embedding_en_size], dtype=tf.float32))
bias_ff = tf.get_variable('bias_ff',
initializer=tf.zeros([self.config.embedding_en_size], dtype=tf.float32))
encoder_p = layer_norm(ff(ffrelu(encoder_p)) + encoder_p, scale_ff, bias_ff)
with tf.variable_scope('decoder'):
# for train
with tf.variable_scope('decoder'):
decoder_p = tf.nn.dropout(decoder_embeded + decoder_pos[:max_decoder_length], keep_prob)
future_mask = tf.tile(
tf.expand_dims(tf.sequence_mask(tf.range(1, limit=tf.shape(decoder_inputs)[1] + 1)), 0),
[tf.shape(decoder_inputs)[0] * self.config.head, 1, 1])
future_mask_final = padding_mask_decoder & future_mask
for block in range(self.config.block):
with tf.variable_scope('mask_selfattention' + str(block)):
WQ = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='Q')
WK = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='K')
WV = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='V')
WO = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='O')
Q = tf.concat(tf.split(WQ(decoder_p), self.config.head, axis=-1), axis=0)
K = tf.concat(tf.split(WK(decoder_p), self.config.head, axis=-1), axis=0)
V = tf.concat(tf.split(WV(decoder_p), self.config.head, axis=-1), axis=0)
QK = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) / tf.sqrt(
self.config.embedding_ch_size / self.config.head)
Z_p = tf.nn.dropout(WO(tf.concat(
tf.split(tf.matmul(softmax(QK, future_mask_final), V),
self.config.head, axis=0), axis=-1)), keep_prob)
scale_rn = tf.get_variable('scale_rn', initializer=tf.ones([self.config.embedding_ch_size],
dtype=tf.float32))
bias_rn = tf.get_variable('bias_rn', initializer=tf.zeros([self.config.embedding_ch_size],
dtype=tf.float32))
decoder_p = layer_norm(decoder_p + Z_p, scale_rn, bias_rn)
with tf.variable_scope('encoder_decoder_attention' + str(block)):
WQ = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='Q')
WK = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='K')
WV = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='V')
WO = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='O')
Q = tf.concat(tf.split(WQ(decoder_p), self.config.head, axis=-1), axis=0)
K = tf.concat(tf.split(WK(encoder_p), self.config.head, axis=-1), axis=0)
V = tf.concat(tf.split(WV(encoder_p), self.config.head, axis=-1), axis=0)
QK = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) / tf.sqrt(
self.config.embedding_ch_size / self.config.head)
Z_p = tf.nn.dropout(WO(tf.concat(
tf.split(
tf.matmul(softmax(QK, padding_mask_decoder_encoder), V),
self.config.head, axis=0), axis=-1)), keep_prob)
scale_rn = tf.get_variable('scale_rn', initializer=tf.ones([self.config.embedding_ch_size],
dtype=tf.float32))
bias_rn = tf.get_variable('bias_rn', initializer=tf.zeros([self.config.embedding_ch_size],
dtype=tf.float32))
decoder_p = layer_norm(decoder_p + Z_p, scale_rn, bias_rn)
with tf.variable_scope('feedforward' + str(block)):
ffrelu = tf.layers.Dense(4 * self.config.embedding_ch_size, activation=tf.nn.relu,
name='ffrelu')
ff = tf.layers.Dense(self.config.embedding_ch_size, name='ff')
scale_ff = tf.get_variable('scale_ff', initializer=tf.ones([self.config.embedding_ch_size],
dtype=tf.float32))
bias_ff = tf.get_variable('bias_ff', initializer=tf.zeros([self.config.embedding_ch_size],
dtype=tf.float32))
decoder_p = layer_norm(ff(ffrelu(decoder_p)) + decoder_p, scale_ff, bias_ff)
output_layer = tf.layers.Dense(self.l_dict_ch, name='project')
logits = output_layer(decoder_p)
# for inference
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
k = tf.constant(0, dtype=tf.int32)
decoder_infer_inputs = self.go * tf.ones([tf.shape(decoder_length)[0], k + 1], dtype=tf.int32)
decoder_infer_outputs = tf.zeros([tf.shape(decoder_length)[0], 0], dtype=tf.int32)
def cond(k, decoder_infer_inputs, decoder_infer_outputs):
return tf.less(k, max_decoder_length)
def body(k, decoder_infer_inputs, decoder_infer_outputs):
decoder_infer = tf.nn.embedding_lookup(embedding_matrix_ch, decoder_infer_inputs) + decoder_pos[
:(k + 1)]
padding_mask_decoder_infer = tf.tile(tf.expand_dims(tf.greater(decoder_infer_inputs, 0), 1),
[self.config.head, tf.shape(decoder_infer_inputs)[1], 1])
future_mask_infer = tf.tile(
tf.expand_dims(tf.sequence_mask(tf.range(1, limit=tf.shape(decoder_infer_inputs)[1] + 1)), 0),
[tf.shape(decoder_infer_inputs)[0] * self.config.head, 1, 1])
future_mask_final_infer = padding_mask_decoder_infer & future_mask_infer
padding_mask_decoder_encoder_infer = tf.tile(tf.expand_dims(tf.greater(encoder_inputs, 0), 1),
[self.config.head, tf.shape(decoder_infer_inputs)[1],
1],
name='mask_decoder_encoder')
for block in range(self.config.block):
with tf.variable_scope('mask_selfattention' + str(block)):
WQ = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='Q')
WK = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='K')
WV = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='V')
WO = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='O')
Q = tf.concat(tf.split(WQ(decoder_infer), self.config.head, axis=-1), axis=0)
K = tf.concat(tf.split(WK(decoder_infer), self.config.head, axis=-1), axis=0)
V = tf.concat(tf.split(WV(decoder_infer), self.config.head, axis=-1), axis=0)
QK = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) / tf.sqrt(
self.config.embedding_ch_size / self.config.head)
Z_p = WO(tf.concat(
tf.split(
tf.matmul(softmax(QK, future_mask_final_infer), V),
self.config.head, axis=0), axis=-1))
scale_rn = tf.get_variable('scale_rn', initializer=tf.ones([self.config.embedding_ch_size],
dtype=tf.float32))
bias_rn = tf.get_variable('bias_rn', initializer=tf.zeros([self.config.embedding_ch_size],
dtype=tf.float32))
decoder_infer = layer_norm(decoder_infer + Z_p, scale_rn, bias_rn)
with tf.variable_scope('encoder_decoder_attention' + str(block)):
WQ = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='Q')
WK = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='K')
WV = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='V')
WO = tf.layers.Dense(self.config.embedding_ch_size, use_bias=False, name='O')
Q = tf.concat(tf.split(WQ(decoder_infer), self.config.head, axis=-1), axis=0)
K = tf.concat(tf.split(WK(encoder_p), self.config.head, axis=-1), axis=0)
V = tf.concat(tf.split(WV(encoder_p), self.config.head, axis=-1), axis=0)
QK = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) / tf.sqrt(
self.config.embedding_ch_size / self.config.head)
Z_p = WO(tf.concat(tf.split(
tf.matmul(softmax(QK, padding_mask_decoder_encoder_infer), V),
self.config.head, axis=0), axis=-1))
scale_rn = tf.get_variable('scale_rn', initializer=tf.ones([self.config.embedding_ch_size],
dtype=tf.float32))
bias_rn = tf.get_variable('bias_rn', initializer=tf.zeros([self.config.embedding_ch_size],
dtype=tf.float32))
decoder_infer = layer_norm(decoder_infer + Z_p, scale_rn, bias_rn)
with tf.variable_scope('feedforward' + str(block)):
ffrelu = tf.layers.Dense(4 * self.config.embedding_ch_size, activation=tf.nn.relu,
name='ffrelu')
ff = tf.layers.Dense(self.config.embedding_ch_size, name='ff')
scale_ff = tf.get_variable('scale_ff', initializer=tf.ones([self.config.embedding_ch_size],
dtype=tf.float32))
bias_ff = tf.get_variable('bias_ff', initializer=tf.zeros([self.config.embedding_ch_size],
dtype=tf.float32))
decoder_infer = layer_norm(ff(ffrelu(decoder_infer)) + decoder_infer, scale_ff, bias_ff)
output_layer = tf.layers.Dense(self.l_dict_ch, name='project')
infer_logits = output_layer(decoder_infer)
decoder_infer_outputs_tmp = tf.argmax(infer_logits[:, -1:], axis=-1, output_type=tf.int32)
decoder_infer_outputs_tmpf = tf.zeros_like(decoder_infer_outputs_tmp)
decoder_infer_outputs = tf.concat(
[decoder_infer_outputs, tf.where(sequence_mask_decoder[:, k:(k + 1)], decoder_infer_outputs_tmp,
decoder_infer_outputs_tmpf)], axis=-1)
decoder_infer_inputs_tmp = decoder_infer_outputs[:, -1:]
decoder_infer_inputs_tmpf = tf.zeros_like(decoder_infer_inputs_tmp)
eos_mask = tf.not_equal(decoder_infer_inputs_tmp, self.eos * tf.ones_like(decoder_infer_inputs_tmp))
decoder_infer_inputs = tf.concat(
[decoder_infer_inputs, tf.where(eos_mask, decoder_infer_inputs_tmp, decoder_infer_inputs_tmpf)],
axis=-1)
return tf.add(k, 1), decoder_infer_inputs, decoder_infer_outputs
_, _, decoder_infer_outputs = tf.while_loop(cond, body,
[k, decoder_infer_inputs, decoder_infer_outputs],
shape_invariants=[k.get_shape(),
tf.TensorShape(
[decoder_length.get_shape().as_list()[
0], None]),
tf.TensorShape(
[decoder_length.get_shape().as_list()[
0], None])
])
with tf.name_scope('Loss'):
prediction = tf.argmax(logits, axis=-1, output_type=tf.int32)
predictionf = tf.zeros_like(prediction)
prediction = tf.where(sequence_mask_decoder, prediction, predictionf)
prediction = tf.identity(prediction, name='prediction')
accuracy = tf.cast(tf.equal(prediction, decoder_targets), tf.float32)
accuracyf = tf.zeros_like(accuracy)
accuracy = tf.where(sequence_mask_decoder, accuracy, accuracyf)
accuracy = tf.reduce_sum(accuracy) / tf.cast(tf.reduce_sum(decoder_length), tf.float32)
accuracy = tf.identity(accuracy, name='accuracy')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=decoder_targets, logits=logits)
costf = tf.zeros_like(cost)
loss = tf.reduce_mean(tf.div(tf.reduce_sum(tf.where(sequence_mask_decoder, cost, costf), axis=-1),
tf.cast(decoder_length, tf.float32)), name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=self.config.lr)
train_op = optimizer.minimize(loss, name='train_op')
prediction_infer = tf.identity(decoder_infer_outputs, name='prediction_infer')
if FLAGS.graph_write:
writer = tf.summary.FileWriter('logs', graph=tf.get_default_graph(), filename_suffix='nmt5')
writer.flush()
writer.close()
print('Graph saved successfully!')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
number_trainable_variables = 0
variable_names = [v.name for v in tf.trainable_variables()]
values = sess.run(variable_names)
for k, v in zip(variable_names, values):
print("Variable: ", k)
print("Shape: ", v.shape)
number_trainable_variables += np.prod([s for s in v.shape])
print('Total number of parameters: %d' % number_trainable_variables)
saver = tf.train.Saver(max_to_keep=1)
saver.save(sess, FLAGS.model_save_path + 'nmt5')
sess.close()
print('Model saved successfully!')
def train(self, entext2id, entext2id_length, en_dict, en_reverse_dict, chtext2id_input, chtext2id_input_length,
ch_dict, ch_reverse_dict, chtext2id_target):
sess = tf.Session()
new_saver = tf.train.import_meta_graph(self.config.model_save_path + 'nmt5.meta')
new_saver.restore(sess, self.config.model_save_path + 'nmt5')
graph = tf.get_default_graph()
encoder_inputs = graph.get_operation_by_name('Input/encoder_inputs').outputs[0]
decoder_inputs = graph.get_operation_by_name('Input/decoder_inputs').outputs[0]
decoder_targets = graph.get_operation_by_name('Input/decoder_targets').outputs[0]
keep_prob = graph.get_operation_by_name('Input/keep_prob').outputs[0]
encoder_length = graph.get_operation_by_name('Input/encoder_length').outputs[0]
decoder_length = graph.get_operation_by_name('Input/decoder_length').outputs[0]
encoder_pos = graph.get_operation_by_name('Input/encoder_position').outputs[0]
decoder_pos = graph.get_operation_by_name('Input/decoder_position').outputs[0]
loss = graph.get_tensor_by_name('Loss/loss:0')
train_op = graph.get_operation_by_name('Loss/train_op')
prediction = graph.get_tensor_by_name('Loss/prediction:0')
accuracy = graph.get_tensor_by_name('Loss/accuracy:0')
prediction_infer = graph.get_tensor_by_name('Loss/prediction_infer:0')
test = [
"China practices ethnic regional autonomy .",
'Great achievements have been gained in judicial work .',
'The fact that human rights in China have improved is beyond all dispute .',
"China is a developing country with a great diversity of religious beliefs .",
'In new China , people enjoy human rights .'
]
test_strip = [test[i].lower().split() for i in range(len(test))]
test_len = [len(test_strip[i]) for i in range(len(test))]
print(test_len)
test2id = []
for i in range(len(test)):
tmp = []
for word in test_strip[i]:
tmp.append(en_dict[word] if word in en_dict.keys() else en_dict['<UNK>'])
test2id.append(tmp)
max_test_len = np.max(test_len)
for i in range(len(test_len)):
if test_len[i] < max_test_len:
test2id[i] += [en_dict['<PAD>']] * (max_test_len - test_len[i])
test_encoder_input = test2id
test_encoder_input_length = test_len
test_decoder_inputs_length = [15, 12, 18, 10, 15]
print(test_encoder_input)
m_samples = len(entext2id)
total_batch = m_samples // self.config.batch_size
pos_encoder = np.array(
[[position / np.power(10000.0, 2.0 * (i // 2) / self.config.embedding_en_size) for i in
range(self.config.embedding_en_size)]
for position in range(self.config.maxword)])
pos_encoder[:, 0::2] = np.sin(pos_encoder[:, 0::2])
pos_encoder[:, 1::2] = np.cos(pos_encoder[:, 1::2])
pos_decoder = np.array(
[[position / np.power(10000.0, 2.0 * (i // 2) / self.config.embedding_ch_size) for i in
range(self.config.embedding_ch_size)]
for position in range(self.config.maxword)])
pos_decoder[:, 0::2] = np.sin(pos_decoder[:, 0::2])
pos_decoder[:, 1::2] = np.cos(pos_decoder[:, 1::2])
loss_ = []
acc_ = []
for epoch in range(1, self.config.epochs + 1):
loss_epoch = 0.0
acc_epoch = 0.0
for batch in range(total_batch):
x_input_batch = entext2id[batch * self.config.batch_size:(batch + 1) * self.config.batch_size]
y_input_batch = chtext2id_input[batch * self.config.batch_size:(batch + 1) * self.config.batch_size]
y_target_batch = chtext2id_target[batch * self.config.batch_size:(batch + 1) * self.config.batch_size]
x_input_batch_length = entext2id_length[
batch * self.config.batch_size:(batch + 1) * self.config.batch_size]
y_input_batch_length = chtext2id_input_length[
batch * self.config.batch_size:(batch + 1) * self.config.batch_size]
x_input_batch = self.padding(x_input_batch, x_input_batch_length, en_dict['<PAD>'])
y_input_batch = self.padding(y_input_batch, y_input_batch_length, ch_dict['<PAD>'])
y_target_batch = self.padding(y_target_batch, y_input_batch_length, ch_dict['<PAD>'])
feed_dict = {
encoder_inputs: x_input_batch,
decoder_inputs: y_input_batch,
decoder_targets: y_target_batch,
encoder_length: x_input_batch_length,
decoder_length: y_input_batch_length,
encoder_pos: pos_encoder,
decoder_pos: pos_decoder,
keep_prob: self.config.keep_prob
}
prediction_, acc_batch, loss_batch, _ = sess.run([prediction, accuracy, loss, train_op],
feed_dict=feed_dict)
sys.stdout.write('>> %d/%d | %d/%d loss:%.9f acc:%.2f%%\n' % (
epoch, self.config.epochs, batch + 1, total_batch, loss_batch, 100.0 * acc_batch))
sys.stdout.flush()
prediction_infer_ = sess.run(prediction_infer, feed_dict={encoder_inputs: test_encoder_input,
encoder_length: test_encoder_input_length,
decoder_length: test_decoder_inputs_length,
encoder_pos: pos_encoder,
decoder_pos: pos_decoder,
keep_prob: 1.0})
for i_test in range(len(test)):
tmp = []
for idx in prediction_infer_[i_test]:
if idx == ch_dict['<EOS>']:
break
tmp.append(ch_reverse_dict[idx])
sys.stdout.write('English: %s\n' % (test[i_test]))
sys.stdout.write('Chinese: %s\n\n' % (''.join(tmp)))
sys.stdout.write(
'-------------------------------------------------------------------------------------------------\n')
sys.stdout.flush()
loss_epoch += loss_batch
acc_epoch += acc_batch
loss_.append(loss_epoch / total_batch)
acc_.append(acc_epoch / total_batch)
print('\033[1;31;40m')
print('>> %d/%d | Loss:%.9f Acc:%.2f%%\n' % (epoch, self.config.epochs, loss_[-1], 100. * acc_[-1]))
print('\033[0m')
r = np.random.permutation(m_samples)
entext2id = self.rearrange(entext2id, r)
chtext2id_input = self.rearrange(chtext2id_input, r)
chtext2id_target = self.rearrange(chtext2id_target, r)
entext2id_length = self.rearrange(entext2id_length, r)
chtext2id_input_length = self.rearrange(chtext2id_input_length, r)
if epoch % self.config.per_save == 0:
new_saver.save(sess, self.config.model_save_path + 'nmt5')
print('Model saved successfully!')
fig = plt.figure(figsize=(10, 8))
plt.plot(loss_)
plt.savefig(self.config.model_save_path + 'Transformer_Loss.png', bbox_inches='tight')
plt.close(fig)
def predict(self, en_dict, ch_dict, ch_reverse_dict):
sess = tf.Session()
tf.train.import_meta_graph(self.config.model_save_path + 'nmt5.meta').restore(sess,
self.config.model_save_path + 'nmt5')
graph = tf.get_default_graph()
encoder_inputs = graph.get_operation_by_name('Input/encoder_inputs').outputs[0]
keep_prob = graph.get_operation_by_name('Input/keep_prob').outputs[0]
encoder_length = graph.get_operation_by_name('Input/encoder_length').outputs[0]
decoder_length = graph.get_operation_by_name('Input/decoder_length').outputs[0]
encoder_pos = graph.get_operation_by_name('Input/encoder_position').outputs[0]
decoder_pos = graph.get_operation_by_name('Input/decoder_position').outputs[0]
prediction_infer = graph.get_tensor_by_name('Loss/prediction_infer:0')
pos_encoder = np.array(
[[position / np.power(10000.0, 2.0 * (i // 2) / self.config.embedding_en_size) for i in
range(self.config.embedding_en_size)]
for position in range(self.config.maxword)])
pos_encoder[:, 0::2] = np.sin(pos_encoder[:, 0::2])
pos_encoder[:, 1::2] = np.cos(pos_encoder[:, 1::2])
pos_decoder = np.array(
[[position / np.power(10000.0, 2.0 * (i // 2) / self.config.embedding_ch_size) for i in
range(self.config.embedding_ch_size)]
for position in range(self.config.maxword)])
pos_decoder[:, 0::2] = np.sin(pos_decoder[:, 0::2])
pos_decoder[:, 1::2] = np.cos(pos_decoder[:, 1::2])
print('\033[1;31;40m')
while 1:
test = input('Please enter English sentence or q to quit\n')
if test.lower() == 'q':
break
test = input('English sentence: ')
test_strip = [WordPunctTokenizer().tokenize(test.lower().strip(' '))]
test_len = [len(test_strip[0])]
test2id = []
tmp = []
for word in test_strip[0]:
tmp.append(en_dict[word] if word in en_dict.keys() else en_dict['<UNK>'])
test2id.append(tmp)
test_encoder_input = test2id
test_encoder_length = test_len
test_decoder_length = [30]
prediction_infer_ = sess.run(prediction_infer, feed_dict={encoder_inputs: test_encoder_input,
encoder_length: test_encoder_length,
decoder_length: test_decoder_length,
encoder_pos: pos_encoder,
decoder_pos: pos_decoder,
keep_prob: 1.0})
tmp = []
for idx in prediction_infer_[0]:
if idx == ch_dict['<EOS>']:
break
tmp.append(ch_reverse_dict[idx])
sys.stdout.write('\n')
sys.stdout.write('English: %s\n' % (test))
sys.stdout.write('Chinese: %s\n\n' % (''.join(tmp)))
sys.stdout.flush()
print('\033[0m')
def padding(self, x, l, padding_id):
l_max = np.max(l)
return [x[i] + [padding_id] * (l_max - l[i]) for i in range(len(x))]
def rearrange(self, x, r):
return [x[ri] for ri in r]
def load_dict():
with open('data/en_dict.txt', 'rb') as f:
en_dict = pickle.load(f)
with open('data/en_reverse_dict.txt', 'rb') as f:
en_reverse_dict = pickle.load(f)
with open('data/ch_dict.txt', 'rb') as f:
ch_dict = pickle.load(f)
with open('data/ch_reverse_dict.txt', 'rb') as f:
ch_reverse_dict = pickle.load(f)
return en_dict, en_reverse_dict, ch_dict, ch_reverse_dict
def load_train_data():
with open('data/entext2id.txt', 'rb') as f:
entext2id = pickle.load(f)
with open('data/chtext2id_input.txt', 'rb') as f:
chtext2id_input = pickle.load(f)
with open('data/chtext2id_target.txt', 'rb') as f:
chtext2id_target = pickle.load(f)
return entext2id, chtext2id_input, chtext2id_target
def main(unused_argv):
en_dict, en_reverse_dict, ch_dict, ch_reverse_dict = load_dict()
transformer = Transformer(go=ch_dict['<GO>'], eos=ch_dict['<EOS>'],
l_dict_en=len(en_dict), l_dict_ch=len(ch_dict)
)
if FLAGS.mode == 'train0': # train first time or retrain
if not os.path.exists(FLAGS.model_save_path):
os.makedirs(FLAGS.model_save_path)
transformer.build_model()
entext2id, chtext2id_input, chtext2id_target = load_train_data()
entext2id_length = [len(en) for en in entext2id]
chtext2id_input_length = [len(ch) for ch in chtext2id_input]
transformer.train(entext2id, entext2id_length, en_dict, en_reverse_dict,
chtext2id_input, chtext2id_input_length,
ch_dict, ch_reverse_dict,
chtext2id_target
)
elif FLAGS.mode == 'train1': # continue train
entext2id, chtext2id_input, chtext2id_target = load_train_data()
entext2id_length = [len(en) for en in entext2id]
chtext2id_input_length = [len(ch) for ch in chtext2id_input]
transformer.train(entext2id, entext2id_length, en_dict, en_reverse_dict,
chtext2id_input, chtext2id_input_length,
ch_dict, ch_reverse_dict,
chtext2id_target
)
elif FLAGS.mode == 'predict':
transformer.predict(en_dict, ch_dict, ch_reverse_dict)
if __name__ == '__main__':
tf.app.run()
| [
"noreply@github.com"
] | drzqb.noreply@github.com |
d04a3cc08125307c425fc4a3bbdbde890ec4fcda | b5aa43c8db450c3bcacc8f28897eab684a8032a1 | /data/games/missiles/states/level_fail.py | 1968f989a08e959a762da5f55ffd79a9dd9aa27b | [] | no_license | iminurnamez/Python_Arcade_Collab | 29a74cf2a6264969de9bae3c4a6ed23d6282e793 | 67702414ed30addd1bf46339bb458df34ed88f2a | refs/heads/master | 2021-04-15T07:32:18.573004 | 2018-05-13T14:29:19 | 2018-05-13T14:29:19 | 126,644,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | import pygame as pg
from data.components.state_machine import _State
from data.core.tools import scaled_mouse_pos
from data.core import constants as prog_constants
from data.components.labels import Label, ButtonGroup
from data.components.special_buttons import NeonButton
from .. import constants
class LevelFail(_State):
def __init__(self, controller):
super(LevelFail, self).__init__(controller)
def startup(self, persistent):
self.persist = persistent
font = prog_constants.FONTS["Fixedsys500c"]
sr = constants.SCREEN_RECT
color = constants.LOW_LIGHT_GREEN
level_num = self.persist["player"].level_num
self.labels = [
Label(font, 48, "Level {} Failed".format(level_num), color,
{"midtop": (sr.centerx, 5)}),
Label(font, 32, "All your cities are", color,
{"midbottom": (sr.centerx, 200)}),
Label(font, 32, "belong to dust", color,
{"midtop": (sr.centerx, 200)})]
self.buttons = ButtonGroup()
NeonButton((373, 630), "OK", 32, self.to_high_scores,
None, self.buttons)
def to_high_scores(self, *args):
self.persist["player"].clear_save()
self.done = True
self.next = "HIGH_SCORES"
def get_event(self, event, scale):
self.buttons.get_event(event)
def update(self, surface, keys, current_time, dt, scale):
self.buttons.update(scaled_mouse_pos(scale))
self.draw(surface)
def draw(self, surface):
surface.fill(constants.BACKGROUND_BASE)
for label in self.labels:
label.draw(surface)
self.buttons.draw(surface) | [
"easinerf@gmail.com"
] | easinerf@gmail.com |
8ded148e045cac0a80cad6edcc563d60c127de9e | 07996c7f93e7b200146cd314520100cf99d003bd | /raw data/40_tos_with_paragraph/code/crawlParagraph/venv/bin/pip | 78b57aad8638f50daca262ddf547c190564af8a6 | [] | no_license | tjuyanghw/data_policy_analyzer | 31ae683128ca5241fa8f0cb67e2f1132820c2d02 | 010a44ff024bd6d97b21f409f6c62f969e1fdc55 | refs/heads/master | 2022-07-02T19:23:14.141170 | 2020-05-13T16:24:11 | 2020-05-13T16:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | #!/Users/xiaoyue/scrapyenv/maliciousLibrarySpider/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"xiaoyue10131748@gmail.com"
] | xiaoyue10131748@gmail.com | |
b29a1e598f2e2fc26af73f214978b0998c04b461 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/NISS/common_shamil_v3/hr_violation_punishment/__openerp__.py | 719dad2e987819e3bcda88edba479c93f4c3fcff | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2013-2014 NCTR (<http://www.nctr.sd>).
#
##############################################################################
{
'name': 'Employee Violation and Punishment',
'version': '1.1',
'author': 'NCTR',
'category': 'Human Resources',
'website': 'http://www.nctr.sd',
'summary': 'Employee Violation and Punishment',
'description': """
Employee Violation and Punishment
==========================
""",
'images' : ['images/violations.png'],
'depends': ['hr_payroll_custom'],
'data': [
'security/ir.model.access.csv',
'hr_violation_punishment_view.xml',
'report/hr_report.xml',
'hr_violations_punishment_workflow.xml',
'wizard/emp_violations_punishments.xml',
],
'demo': [],
'test': [],
'installable': True,
'application': True,
'auto_install': False,
'css': [ ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.