hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a058cbd2b326ac9d2e33d348ddcee720fe5f9bb
| 537
|
py
|
Python
|
session3/PWM_LED.py
|
rezafari/raspberry
|
e6720780f3c65ee1809040fc538f793fe44f0111
|
[
"MIT"
] | 19
|
2017-09-26T04:37:55.000Z
|
2021-12-15T05:39:57.000Z
|
session3/PWM_LED.py
|
rezafari/raspberry
|
e6720780f3c65ee1809040fc538f793fe44f0111
|
[
"MIT"
] | 44
|
2017-11-22T04:56:26.000Z
|
2018-03-17T14:30:00.000Z
|
session3/PWM_LED.py
|
rezafari/raspberry
|
e6720780f3c65ee1809040fc538f793fe44f0111
|
[
"MIT"
] | 21
|
2017-09-23T05:25:59.000Z
|
2021-05-31T10:24:49.000Z
|
######################################################################
# PWM_LED.py
#
# This program produce a pwm and control light exposure of an LED
# with changing it's duty cycle
######################################################################
import RPi.GPIO as GPIO
ledPin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin, GPIO.OUT)
pwmLed = GPIO.PWM(ledPin, 100)
pwmLed.start(100)
while(True):
dutyStr = input("Please Enter Brightness(0 to 100): ")
duty = int(dutyStr)
pwmLed.ChangeDutyCycle(duty)
| 23.347826
| 70
| 0.512104
|
4a058d3665f639bb4eab41afaef32cbf738eca70
| 17,170
|
py
|
Python
|
intent_detection/encoder_clients.py
|
EdwardBurgin/polyai-models
|
20cc6ff59396a2884a748509526a022347a7340c
|
[
"Apache-2.0"
] | 2
|
2020-10-16T11:30:59.000Z
|
2021-03-28T04:51:25.000Z
|
intent_detection/encoder_clients.py
|
EdwardBurgin/polyai-models
|
20cc6ff59396a2884a748509526a022347a7340c
|
[
"Apache-2.0"
] | null | null | null |
intent_detection/encoder_clients.py
|
EdwardBurgin/polyai-models
|
20cc6ff59396a2884a748509526a022347a7340c
|
[
"Apache-2.0"
] | 3
|
2020-10-13T09:13:01.000Z
|
2021-05-31T04:20:18.000Z
|
"""Sentence encoder library for tensorflow_hub based sentence encoders
The included sentence encoders are:
- BERT: https://arxiv.org/abs/1810.04805
- USE multilingual: https://arxiv.org/abs/1907.04307
- ConveRT: https://arxiv.org/abs/1911.03688
Copyright PolyAI Limited.
"""
import abc
import os
import pickle
import glog
import numpy as np
import tensorflow as tf
import tensorflow_hub as tf_hub
import tensorflow_text # NOQA: it is used when importing ConveRT.
import tf_sentencepiece # NOQA: it is used when importing USE.
from bert.tokenization import FullTokenizer
from tqdm import tqdm
from encoder_client import EncoderClient
from sentence_transformers import SentenceTransformer
_CONVERT_PATH = "http://models.poly-ai.com/convert/v1/model.tar.gz"
_USE_PATH = ("https://tfhub.dev/google/universal-sentence-encoder-"
"multilingual-large/1")
_BERT_PATH = "https://tfhub.dev/google/bert_uncased_L-24_H-1024_A-16/1"
def l2_normalize(encodings):
"""L2 normalizes the given matrix of encodings."""
norms = np.linalg.norm(encodings, ord=2, axis=-1, keepdims=True)
return encodings / norms
class ClassificationEncoderClient(object):
"""A model that maps from text to dense vectors."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def encode_sentences(self, sentences):
"""Encodes a list of sentences
Args:
sentences: a list of strings
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
return NotImplementedError
def get_encoder_client(encoder_type, cache_dir=None):
"""get an EncoderClient object
Args:
encoder_type: (str) one of "use", "convert", "combined", "laser_convert" or "bert"
cache_dir: The directory where an encoding dictionary will be cached
Returns:
a ClassificationEncoderClient
"""
if encoder_type.lower() == "use":
encoder_client = UseEncoderClient(_USE_PATH)
if cache_dir:
encoder_id = _USE_PATH.replace("/", "-")
encoder_client = CachingEncoderClient(
encoder_client, encoder_id, cache_dir)
elif encoder_type.lower() == "convert":
encoder_client = ConvertEncoderClient(_CONVERT_PATH)
if cache_dir:
encoder_id = _CONVERT_PATH.replace("/", "-")
encoder_client = CachingEncoderClient(
encoder_client, encoder_id, cache_dir)
elif encoder_type.lower() == "combined":
use_encoder = UseEncoderClient(_USE_PATH)
convert_encoder = ConvertEncoderClient(_CONVERT_PATH)
if cache_dir:
use_id = _USE_PATH.replace("/", "-")
use_encoder = CachingEncoderClient(
use_encoder, use_id, cache_dir)
convert_id = _CONVERT_PATH.replace("/", "-")
convert_encoder = CachingEncoderClient(
convert_encoder, convert_id, cache_dir)
encoder_client = CombinedEncoderClient([convert_encoder, use_encoder])
elif encoder_type.lower() == "bert":
encoder_client = BertEncoderClient(_BERT_PATH)
if cache_dir:
encoder_id = _BERT_PATH.replace("/", "-")
encoder_client = CachingEncoderClient(
encoder_client, encoder_id, cache_dir)
elif encoder_type.lower() == "laser_convert":
laser_encoder = LaserEncoderClient()
convert_encoder = ConvertEncoderClient(_CONVERT_PATH)
encoder_client = CombinedEncoderClient([convert_encoder, laser_encoder])
elif encoder_type.lower() == "laser_convert_use":
use_encoder = UseEncoderClient(_USE_PATH)
laser_encoder = LaserEncoderClient()
convert_encoder = ConvertEncoderClient(_CONVERT_PATH)
encoder_client = CombinedEncoderClient([convert_encoder, laser_encoder, use_encoder])
elif encoder_type.lower() == "sbert":
encoder_client = SbertEncoderClient()
elif encoder_type.lower() == "sbert_convert_use":
sbert_encoder = SbertEncoderClient()
use_encoder = UseEncoderClient(_USE_PATH)
convert_encoder = ConvertEncoderClient(_CONVERT_PATH)
encoder_client = CombinedEncoderClient([convert_encoder, sbert_encoder, use_encoder])
elif encoder_type.lower() == 'rf_tfidf':
encoder_client = None
elif encoder_type.lower() == 'sbert_cosine':
encoder_client = None
else:
raise ValueError(f"{encoder_type} is not a valid encoder type")
return encoder_client
class CachingEncoderClient(ClassificationEncoderClient):
"""Wrapper around an encoder to cache the encodings on disk"""
def __init__(self, encoder_client, encoder_id, cache_dir):
"""Create a new CachingEncoderClient object
Args:
encoder_client: An EncoderClient
encoder_id: An unique ID for the encoder
cache_dir: The directory where the encodings will be cached
"""
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self._encodings_dict_path = os.path.join(
cache_dir, encoder_id)
self._encoder_client = encoder_client
self._encodings_dict = self._load_or_create_encodings_dict()
def _load_or_create_encodings_dict(self):
if os.path.exists(self._encodings_dict_path):
with open(self._encodings_dict_path, "rb") as f:
encodings_dict = pickle.load(f)
else:
encodings_dict = {}
return encodings_dict
def _save_encodings_dict(self):
with open(self._encodings_dict_path, "wb") as f:
pickle.dump(self._encodings_dict, f)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
missing_sentences = [
sentence for sentence in sentences
if sentence not in self._encodings_dict]
if len(sentences) != len(missing_sentences):
glog.info(f"{len(sentences) - len(missing_sentences)} cached "
f"sentences will not be encoded")
if missing_sentences:
missing_encodings = self._encoder_client.encode_sentences(
missing_sentences)
for sentence, encoding in zip(missing_sentences,
missing_encodings):
self._encodings_dict[sentence] = encoding
self._save_encodings_dict()
encodings = np.array(
[self._encodings_dict[sentence] for sentence in sentences])
return encodings
class LaserEncoderClient(ClassificationEncoderClient):
"""A wrapper around ClassificationEncoderClient to normalise the output"""
def __init__(self, batch_size=100):
"""Create a new ConvertEncoderClient object
Args:
uri: The uri to the tensorflow_hub module
batch_size: maximum number of sentences to encode at once
"""
self._batch_size = batch_size
self._encoder_client = Laser()
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
encodings = []
glog.setLevel("ERROR")
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._encoder_client.embed_sentences(
sentences[i:i + self._batch_size], lang='en'))
glog.setLevel("INFO")
print('DEBUG LASER SIZE:',np.vstack(encodings).shape)
return l2_normalize(np.vstack(encodings))
class SbertEncoderClient(ClassificationEncoderClient):
"""A wrapper around ClassificationEncoderClient to normalise the output"""
def __init__(self, batch_size=100):
"""Create a new ConvertEncoderClient object
Args:
uri: The uri to the tensorflow_hub module
batch_size: maximum number of sentences to encode at once
"""
self._batch_size = batch_size
self._encoder_client = SentenceTransformer(sbert_model)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
encodings = []
glog.setLevel("ERROR")
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._encoder_client.encode(
sentences[i:i + self._batch_size]))
glog.setLevel("INFO")
print('DEBUG SBERT SIZE:',np.vstack(encodings).shape)
return l2_normalize(np.vstack(encodings))
class ConvertEncoderClient(ClassificationEncoderClient):
"""A wrapper around ClassificationEncoderClient to normalise the output"""
def __init__(self, uri, batch_size=100):
"""Create a new ConvertEncoderClient object
Args:
uri: The uri to the tensorflow_hub module
batch_size: maximum number of sentences to encode at once
"""
self._batch_size = batch_size
self._encoder_client = EncoderClient(uri)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
encodings = []
glog.setLevel("ERROR")
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._encoder_client.encode_sentences(
sentences[i:i + self._batch_size]))
glog.setLevel("INFO")
print('DEBUG CONVERT SIZE:', np.vstack(encodings).shape)
return l2_normalize(np.vstack(encodings))
class UseEncoderClient(ClassificationEncoderClient):
"""A Universal Sentence Encoder model loaded as a tensorflow hub module"""
def __init__(self, uri, batch_size=100):
"""Create a new UseEncoderClient object
Args:
uri: The uri to the tensorflow_hub USE module
batch_size: maximum number of sentences to encode at once
"""
self._batch_size = batch_size
self._session = tf.Session(graph=tf.Graph())
with self._session.graph.as_default():
glog.info("Loading %s model from tensorflow hub", uri)
embed_fn = tf_hub.Module(uri)
self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
self._embeddings = embed_fn(self._fed_texts)
encoding_info = embed_fn.get_output_info_dict().get('default')
if encoding_info:
self._encoding_dim = encoding_info.get_shape()[-1].value
init_ops = (
tf.global_variables_initializer(), tf.tables_initializer())
glog.info("Initializing graph.")
self._session.run(init_ops)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
encodings = []
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._session.run(
self._embeddings,
{self._fed_texts: sentences[i:i + self._batch_size]}))
print('DEBUG USE SIZE:', np.vstack(encodings).shape)
return np.vstack(encodings)
class BertEncoderClient(ClassificationEncoderClient):
"""The BERT encoder that is loaded as a module from tensorflow hub.
This class tokenizes the input text using the bert tokenization
library. The final encoding is computed as the sum of the token
embeddings.
Args:
uri: (string) the tensorflow hub URI for the model.
batch_size: maximum number of sentences to encode at once
"""
def __init__(self, uri, batch_size=100):
"""Create a new `BERTEncoder` object."""
if not tf.test.is_gpu_available():
glog.warning(
"No GPU detected, BERT will run a lot slower than with a GPU.")
self._batch_size = batch_size
self._session = tf.Session(graph=tf.Graph())
with self._session.graph.as_default():
glog.info("Loading %s model from tensorflow hub", uri)
embed_fn = tf_hub.Module(uri, trainable=False)
self._tokenizer = self._create_tokenizer_from_hub_module(uri)
self._input_ids = tf.placeholder(
name="input_ids", shape=[None, None], dtype=tf.int32)
self._input_mask = tf.placeholder(
name="input_mask", shape=[None, None], dtype=tf.int32)
self._segment_ids = tf.zeros_like(self._input_ids)
bert_inputs = dict(
input_ids=self._input_ids,
input_mask=self._input_mask,
segment_ids=self._segment_ids
)
embeddings = embed_fn(
inputs=bert_inputs, signature="tokens", as_dict=True)[
"sequence_output"
]
mask = tf.expand_dims(
tf.cast(self._input_mask, dtype=tf.float32), -1)
self._embeddings = tf.reduce_sum(mask * embeddings, axis=1)
init_ops = (
tf.global_variables_initializer(), tf.tables_initializer())
glog.info("Initializing graph.")
self._session.run(init_ops)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an array with shape (len(sentences), ENCODING_SIZE)
"""
encodings = []
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._session.run(
self._embeddings,
self._feed_dict(sentences[i:i + self._batch_size])))
return l2_normalize(np.vstack(encodings))
@staticmethod
def _create_tokenizer_from_hub_module(uri):
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = tf_hub.Module(uri, trainable=False)
tokenization_info = bert_module(
signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run(
[
tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]
])
return FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
def _feed_dict(self, texts, max_seq_len=128):
"""Create a feed dict for feeding the texts as input.
This uses dynamic padding so that the maximum sequence length is the
smaller of `max_seq_len` and the longest sequence actually found in the
batch. (The code in `bert.run_classifier` always pads up to the maximum
even if the examples in the batch are all shorter.)
"""
all_ids = []
for text in texts:
tokens = ["[CLS]"] + self._tokenizer.tokenize(text)
# Possibly truncate the tokens.
tokens = tokens[:(max_seq_len - 1)]
tokens.append("[SEP]")
ids = self._tokenizer.convert_tokens_to_ids(tokens)
all_ids.append(ids)
max_seq_len = max(map(len, all_ids))
input_ids = []
input_mask = []
for ids in all_ids:
mask = [1] * len(ids)
# Zero-pad up to the sequence length.
while len(ids) < max_seq_len:
ids.append(0)
mask.append(0)
input_ids.append(ids)
input_mask.append(mask)
return {self._input_ids: input_ids, self._input_mask: input_mask}
class CombinedEncoderClient(ClassificationEncoderClient):
"""concatenates the encodings of several ClassificationEncoderClients
Args:
encoders: A list of ClassificationEncoderClients
"""
def __init__(self, encoders: list):
"""constructor"""
self._encoders = encoders
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an array with shape (len(sentences), ENCODING_SIZE)
"""
encodings = np.hstack([encoder.encode_sentences(sentences)
for encoder in self._encoders])
print('DEBUG combined size:',encodings.shape)
return encodings
| 37.084233
| 93
| 0.626092
|
4a058d4fae9f89f4d109b48c8d20b8f43a03fe01
| 353
|
py
|
Python
|
tests/test_models.py
|
byteweaver/django-posts
|
9ba7f4a27e6437e15c9cc06a6046141264249adf
|
[
"BSD-3-Clause"
] | 2
|
2015-07-23T11:47:38.000Z
|
2016-09-26T06:55:00.000Z
|
tests/test_models.py
|
byteweaver/django-posts
|
9ba7f4a27e6437e15c9cc06a6046141264249adf
|
[
"BSD-3-Clause"
] | 1
|
2016-01-27T12:26:04.000Z
|
2016-01-27T12:26:04.000Z
|
tests/test_models.py
|
byteweaver/django-posts
|
9ba7f4a27e6437e15c9cc06a6046141264249adf
|
[
"BSD-3-Clause"
] | 2
|
2016-09-26T06:55:00.000Z
|
2020-08-31T10:22:37.000Z
|
from django.test import TestCase
from posts.factories import PostFactory
class PostTestCase(TestCase):
def test_create_post(self):
post = PostFactory.create()
self.assertTrue(post.pk)
self.assertTrue(post.author)
self.assertTrue(post.headline)
self.assertTrue(post.slug)
self.assertTrue(post.text)
| 25.214286
| 39
| 0.696884
|
4a058d51d256f1621e5245dd9fedb9b54ad36fab
| 5,320
|
py
|
Python
|
pullbug/github_bug.py
|
ncr4/pullbug
|
393722558b2e997051fe1c86ea04951f635ef5e7
|
[
"MIT"
] | null | null | null |
pullbug/github_bug.py
|
ncr4/pullbug
|
393722558b2e997051fe1c86ea04951f635ef5e7
|
[
"MIT"
] | null | null | null |
pullbug/github_bug.py
|
ncr4/pullbug
|
393722558b2e997051fe1c86ea04951f635ef5e7
|
[
"MIT"
] | null | null | null |
import os
import requests
import logging
from pullbug.logger import PullBugLogger
from pullbug.messages import Messages
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
GITHUB_OWNER = os.getenv('GITHUB_OWNER')
GITHUB_HEADERS = {
'Authorization': f'token {GITHUB_TOKEN}',
'Content-Type': 'application/json; charset=utf-8'
}
LOGGER = logging.getLogger(__name__)
class GithubBug():
@classmethod
def run(cls, github_owner, github_state, github_context, wip, slack, rocketchat):
"""Run the logic to get PR's from GitHub and
send that data via message.
"""
PullBugLogger._setup_logging(LOGGER)
repos = cls.get_repos(github_owner, github_context)
pull_requests = cls.get_pull_requests(repos, github_owner, github_state)
message_preamble = ''
if pull_requests == []:
message = 'No pull requests are available from GitHub.'
LOGGER.info(message)
return message
message_preamble = '\n:bug: *The following pull requests on GitHub are still open and need your help!*\n'
pull_request_messages = cls.iterate_pull_requests(pull_requests, wip)
final_message = message_preamble + pull_request_messages
if slack:
Messages.slack(final_message)
if rocketchat:
Messages.rocketchat(final_message)
LOGGER.info(final_message)
@classmethod
def get_repos(cls, github_owner, github_context=''):
"""Get all repos of the GITHUB_OWNER.
"""
LOGGER.info('Bugging GitHub for repos...')
try:
repos_response = requests.get(
f'https://api.github.com/{github_context}/{github_owner}/repos?per_page=100',
headers=GITHUB_HEADERS
)
LOGGER.debug(repos_response.text)
if 'Not Found' in repos_response.text:
error = f'Could not retrieve GitHub repos due to bad parameter: {github_owner} | {github_context}.'
LOGGER.error(error)
raise ValueError(error)
LOGGER.info('GitHub repos retrieved!')
except requests.exceptions.RequestException as response_error:
LOGGER.error(
f'Could not retrieve GitHub repos: {response_error}'
)
raise requests.exceptions.RequestException(response_error)
return repos_response.json()
@classmethod
def get_pull_requests(cls, repos, github_owner, github_state):
"""Grab all pull requests from each repo.
"""
LOGGER.info('Bugging GitHub for pull requests...')
pull_requests = []
for repo in repos:
try:
pull_response = requests.get(
f'https://api.github.com/repos/{github_owner}/{repo["name"]}/pulls?state={github_state}&per_page=100', # noqa
headers=GITHUB_HEADERS
)
LOGGER.debug(pull_response.text)
if pull_response.json():
for single_pull_request in pull_response.json():
pull_requests.append(single_pull_request)
else:
continue
except requests.exceptions.RequestException as response_error:
LOGGER.error(
f'Could not retrieve GitHub pull requests for {repo["name"]}: {response_error}'
)
raise requests.exceptions.RequestException(response_error)
except TypeError:
error = f'Could not retrieve GitHub pull requests due to bad parameter: {github_owner} | {github_state}.' # noqa
LOGGER.error(error)
raise TypeError(error)
LOGGER.info('Pull requests retrieved!')
return pull_requests
@classmethod
def iterate_pull_requests(cls, pull_requests, wip):
"""Iterate through each pull request of a repo
and send a message to Slack if a PR exists.
"""
final_message = ''
for pull_request in pull_requests:
if not wip and 'WIP' in pull_request['title'].upper():
continue
else:
message = cls.prepare_message(pull_request)
final_message += message
return final_message
@classmethod
def prepare_message(cls, pull_request):
"""Prepare the message with pull request data.
"""
# TODO: Check requested_reviewers array also
try:
if pull_request['assignees'][0]['login']:
users = ''
for assignee in pull_request['assignees']:
user = f"<{assignee['html_url']}|{assignee['login']}>"
users += user + ' '
else:
users = 'No assignee'
except IndexError:
users = 'No assignee'
# Truncate description after 120 characters
description = (pull_request['body'][:120] + '...') if len(pull_request
['body']) > 120 else pull_request['body']
message = f"\n:arrow_heading_up: *Pull Request:* <{pull_request['html_url']}|" + \
f"{pull_request['title']}>\n*Description:* {description}\n*Waiting on:* {users}\n"
return message
| 41.24031
| 130
| 0.594549
|
4a058e054523026ec602695ebc8977af45a26797
| 2,091
|
py
|
Python
|
semantic-segmentation/demo.py
|
LyMarco/Semantic-Mono-Depth
|
a4002e47e0749cb565720c3fb1c25d6ac2af547e
|
[
"MIT"
] | 1
|
2020-03-09T20:53:07.000Z
|
2020-03-09T20:53:07.000Z
|
semantic-segmentation/demo.py
|
LyMarco/Semantic-Mono-Depth
|
a4002e47e0749cb565720c3fb1c25d6ac2af547e
|
[
"MIT"
] | null | null | null |
semantic-segmentation/demo.py
|
LyMarco/Semantic-Mono-Depth
|
a4002e47e0749cb565720c3fb1c25d6ac2af547e
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
from PIL import Image
import numpy as np
import cv2
import torch
from torch.backends import cudnn
import torchvision.transforms as transforms
import network
from optimizer import restore_snapshot
from datasets import cityscapes
from config import assert_and_infer_cfg
parser = argparse.ArgumentParser(description='demo')
parser.add_argument('--demo-image', type=str, default='', help='path to demo image', required=True)
parser.add_argument('--snapshot', type=str, default='./pretrained_models/cityscapes_best.pth', help='pre-trained checkpoint', required=True)
parser.add_argument('--arch', type=str, default='network.deepv3.DeepWV3Plus', help='network architecture used for inference')
parser.add_argument('--save-dir', type=str, default='./save', help='path to save your results')
args = parser.parse_args()
assert_and_infer_cfg(args, train_mode=False)
cudnn.benchmark = False
torch.cuda.empty_cache()
# get net
args.dataset_cls = cityscapes
net = network.get_net(args, criterion=None)
net = torch.nn.DataParallel(net).cuda()
print('Net built.')
net, _ = restore_snapshot(net, optimizer=None, snapshot=args.snapshot, restore_optimizer_bool=False)
net.eval()
print('Net restored.')
# get data
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
img_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(*mean_std)])
img = Image.open(args.demo_image).convert('RGB')
img_tensor = img_transform(img)
# predict
with torch.no_grad():
img = img_tensor.unsqueeze(0).cuda()
pred = net(img)
print('Inference done.')
pred = pred.cpu().numpy().squeeze()
pred = np.argmax(pred, axis=0)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
colorized = args.dataset_cls.colorize_mask(pred)
colorized.save(os.path.join(args.save_dir, 'color_mask.png'))
label_out = np.zeros_like(pred)
for label_id, train_id in args.dataset_cls.id_to_trainid.items():
label_out[np.where(pred == train_id)] = label_id
cv2.imwrite(os.path.join(args.save_dir, 'pred_mask.png'), label_out)
print('Results saved.')
| 33.725806
| 140
| 0.760402
|
4a059192d7ef1b2695bd306fef839405756115be
| 2,172
|
py
|
Python
|
slimgbm/histogram.py
|
zgw21cn/slimgbm
|
576430b7122be1624842b92e89cccda64626eda1
|
[
"MIT"
] | 1
|
2017-11-17T13:54:55.000Z
|
2017-11-17T13:54:55.000Z
|
slimgbm/histogram.py
|
zgw21cn/slimgbm
|
576430b7122be1624842b92e89cccda64626eda1
|
[
"MIT"
] | null | null | null |
slimgbm/histogram.py
|
zgw21cn/slimgbm
|
576430b7122be1624842b92e89cccda64626eda1
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from pandas.tools.tile import _bins_to_cuts
from slimgbm.slimgbm.bin import Bin
import pandas as pd
import pandas.core.algorithms as algos
import numpy as np
class Histogram(object):
def __init__(self):
self.bins = []
def _qcut(self, x, q, labels=None, retbins=False, precision=3):
quantiles = np.linspace(0, 1, q + 1)
bins = algos.quantile(x, quantiles)
bins = np.unique(bins)
return _bins_to_cuts(x, bins, labels=labels, retbins=retbins, precision=precision, include_lowest=True)
def construct_bins(self, data, col, max_bins):
"""
:param data: the columns of the data: col, 'label', 'grad', 'hess'
:param max_bins:
:return:
"""
if not isinstance(data, pd.core.frame.DataFrame):
raise TypeError("data should be a pandas.core.series.Series")
bins_count = min(max_bins, len(data[col].unique()) - 1)
bins, cut_points = self._qcut(data[col].values, bins_count, retbins=True)
for i in range(len(cut_points) - 1):
mask = bins.codes == i
grad_sum = data.grad[mask].sum()
hess_sum = data.hess[mask].sum()
bin = Bin(cut_points[i], cut_points[i + 1], grad_sum, hess_sum)
self.bins.append(bin)
def find_best_split(self, reg_lambda, gamma):
best_gain = float("-inf")
best_split=None
for i in range(1, len(self.bins)):
GL = 0.0
HL = 0.0
GR = 0.0
HR = 0.0
for j in range(0, i):
GL = GL + self.bins[j].grad_sum
HL = HL + self.bins[j].hess_sum
for j in range(i, len(self.bins)):
GR = GR + self.bins[j].grad_sum
HR = HR + self.bins[j].hess_sum
gain = 0.5 * (GL ** 2 / (HL + reg_lambda) + GR ** 2 / (HR + reg_lambda)
- (GL + GR) ** 2 / (HL + HR + reg_lambda)) - gamma
if gain > best_gain:
best_split = self.bins[i].upper_bound
best_gain = gain
return best_split, best_gain
| 22.163265
| 111
| 0.544199
|
4a059267f261f503f3a42fc2e6f4a9cb3a8bbdd7
| 1,039
|
py
|
Python
|
preprocessing/preprocess2.py
|
ZuowenWang0000/GRUBERT-A-GRU-Based-Method-to-Fuse-BERT-Hidden-Layers
|
992967fe102493eadf37423de5710761f007bcb1
|
[
"MIT"
] | 4
|
2020-11-16T15:02:56.000Z
|
2022-02-07T06:39:32.000Z
|
preprocessing/preprocess2.py
|
ZuowenWang0000/GRUBERT-A-GRU-Based-Method-to-Fuse-BERT-Hidden-Layers
|
992967fe102493eadf37423de5710761f007bcb1
|
[
"MIT"
] | null | null | null |
preprocessing/preprocess2.py
|
ZuowenWang0000/GRUBERT-A-GRU-Based-Method-to-Fuse-BERT-Hidden-Layers
|
992967fe102493eadf37423de5710761f007bcb1
|
[
"MIT"
] | 2
|
2021-07-07T02:35:23.000Z
|
2021-11-10T16:54:38.000Z
|
import re
import sys
import pandas as pd
from joblib import Parallel, delayed
import multiprocessing
# start_index = int(input("start index\n"))
# end_index = int(input("end inex\n"))
file_to_process = str(sys.argv[1])
data = pd.read_csv(file_to_process)
# print(data)
tweets = data['text']
def process2_iter(tweet):
if type(tweet)==float:
return ""
tweet = tweet.strip()
if re.match(r"[^\S\n\t]+", tweet):
tweet = ""
return tweet
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs = num_cores)(delayed(process2_iter)(tweet) for tweet in tweets)
print("length before processing :{}".format(len(data['text'])))
data['text'] = results
# drop rows where tweet length == 0
drop_list = []
for i in range(len(data['text'])):
if len(data['text'][i]) == 0:
print(data['text'][i])
drop_list.extend([i])
# print(drop_list)
data = data.drop(drop_list)
# print(data)
print("length after processing :{}".format(len(data['text'])))
data.to_csv(file_to_process + "v2", index=False)
| 26.641026
| 89
| 0.6718
|
4a05938af587349c3a114d2efe75198b21d28d8b
| 532
|
py
|
Python
|
imagepy/menus/Window/widgets_plgs.py
|
dada1437903138/imagepy
|
65d9ce088894eef587054e04018f9d34ff65084f
|
[
"BSD-4-Clause"
] | 1,178
|
2017-05-25T06:59:01.000Z
|
2022-03-31T11:38:53.000Z
|
imagepy/menus/Window/widgets_plgs.py
|
TomisTony/imagepy
|
3c378ebaf72762b94f0826a410897757ebafe689
|
[
"BSD-4-Clause"
] | 76
|
2017-06-10T17:01:50.000Z
|
2021-12-23T08:13:29.000Z
|
imagepy/menus/Window/widgets_plgs.py
|
TomisTony/imagepy
|
3c378ebaf72762b94f0826a410897757ebafe689
|
[
"BSD-4-Clause"
] | 315
|
2017-05-25T12:59:53.000Z
|
2022-03-07T22:52:21.000Z
|
from sciapp.action import Free
class Widgets(Free):
"""ImageKiller: derived from sciapp.action.Free"""
title = 'Widgets'
asyn = False
def run(self, para = None):
self.app.switch_widget()
class ToolBar(Free):
title = 'Toolbar'
asyn = False
def run(self, para = None):
self.app.switch_toolbar()
class TableWindow(Free):
"""ImageKiller: derived from sciapp.action.Free"""
title = 'Tables Window'
asyn = False
#process
def run(self, para = None):
self.app.switch_table()
plgs = [Widgets, ToolBar, TableWindow]
| 19.703704
| 51
| 0.699248
|
4a05942756237e720646a132dca0c01362231399
| 617
|
py
|
Python
|
regression/issue_56.py
|
timgates42/pyahocorasick
|
3eab1ed66cea1dd30a48eff03927326a166502e5
|
[
"BSD-3-Clause"
] | 707
|
2015-03-26T02:43:06.000Z
|
2022-03-28T07:54:06.000Z
|
regression/issue_56.py
|
timgates42/pyahocorasick
|
3eab1ed66cea1dd30a48eff03927326a166502e5
|
[
"BSD-3-Clause"
] | 140
|
2015-04-18T04:47:19.000Z
|
2022-03-09T11:24:50.000Z
|
regression/issue_56.py
|
timgates42/pyahocorasick
|
3eab1ed66cea1dd30a48eff03927326a166502e5
|
[
"BSD-3-Clause"
] | 112
|
2015-06-17T03:51:32.000Z
|
2022-03-09T01:58:58.000Z
|
import ahocorasick
def iter_results(s):
r = []
for x in A.iter(teststr):
r.append(x)
return r
def find_all_results(s):
r = []
def append(x, s):
r.append((x, s))
A.find_all(s, append)
return r
A = ahocorasick.Automaton()
for word in ("poke", "go", "pokegois", "egoist"):
A.add_word(word, word)
A.make_automaton()
teststr = 'pokego pokego pokegoist'
expected = iter_results(teststr)
findall = find_all_results(teststr)
if findall != expected:
print("expected: %s" % expected)
print("findall : %s" % findall)
assert findall == expected
| 14.690476
| 49
| 0.612642
|
4a0594f20c70536297d32c6e7b4661d36cd52524
| 33,533
|
py
|
Python
|
gdf/_gdfnetcdf.py
|
dunkgray/gdf
|
7b39f0c90cf63d501b36ea9d754269616d79e0d4
|
[
"Apache-2.0"
] | 7
|
2015-08-27T09:20:55.000Z
|
2019-06-27T14:00:11.000Z
|
gdf/_gdfnetcdf.py
|
alex-ip/gdf
|
7b39f0c90cf63d501b36ea9d754269616d79e0d4
|
[
"Apache-2.0"
] | null | null | null |
gdf/_gdfnetcdf.py
|
alex-ip/gdf
|
7b39f0c90cf63d501b36ea9d754269616d79e0d4
|
[
"Apache-2.0"
] | 5
|
2015-05-13T05:58:13.000Z
|
2019-12-09T00:36:11.000Z
|
#!/usr/bin/env python
# Some code derived from hnetcdf_builder.py by Matt Paget & Edward King of CSIRO
# https://stash.csiro.au/projects/CMAR_RS/repos/netcdf-tools/browse/create/netcdf_builder.py
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Created on Jun 9, 2015
@author: Alex Ip (based on code by Matt Paget & Edward King)
'''
import netCDF4
import numpy as np
import os
import re
from collections import OrderedDict
import logging
from osgeo import gdal, gdalconst, osr
from datetime import datetime
from _gdfutils import log_multiline
# Only needed for testing
from pprint import pprint
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
try:
import netcdf_builder
except ImportError:
logger.error('Requires netcdf_builder.py (https://stash.csiro.au/projects/CMAR_RS/repos/netcdf-tools/browse/create/netcdf_builder.py)')
raise
class GDFNetCDF(object):
'''
Class GDFNetCDF - Class to manage GDF netCDF storage units
'''
def __init__(self, storage_config, netcdf_filename=None, netcdf_mode=None, netcdf_format=None, decimal_places=None):
'''
Constructor for class GDFNetCDF
Parameters:
storage_config: nested dict containing configuration for storage type (defined in class GDF)
netcdf_filename: Filename of netCDF file to be opened
netcdf_mode: Mode for netCDF file open
netcdf_format: Format for netCDF file open
'''
self._isopen = False
self.storage_config = storage_config
self.netcdf_filename = netcdf_filename
self.netcdf_mode = netcdf_mode or 'r' # Default to 'r' for reading
self.netcdf_format = netcdf_format or 'NETCDF4_CLASSIC'
self.decimal_places = decimal_places if decimal_places is not None else 6 # Default to 6 decimal places if no precision specified
if netcdf_filename is None:
self.netcdf_object = None
else:
self.open(netcdf_filename)
def __del__(self):
'''
Destructor for class GDFNetCDF
'''
self.close()
def close(self):
'''
Destructor for class GDFNetCDF
'''
self._isopen = False
try:
self.netcdf_object.close()
except:
pass
def open(self, netcdf_filename=None, netcdf_mode=None, netcdf_format=None):
'''
Constructor for class GDFNetCDF
Parameters:
storage_config: nested dict containing configuration for storage type (defined in class GDF)
netcdf_filename: Filename of netCDF file to be opened
netcdf_mode: Mode for netCDF file open
netcdf_format: Format for netCDF file open
'''
self._isopen = False
# Default to existing instance values
self.netcdf_filename = netcdf_filename or self.netcdf_filename
assert self.netcdf_filename, 'NetCDF filename not provided'
self.netcdf_mode = netcdf_mode or self.netcdf_mode
self.netcdf_format = netcdf_format or self.netcdf_format
if netcdf_mode == 'w':
self.netcdf_object = netCDF4.Dataset(self.netcdf_filename, mode=self.netcdf_mode, format=self.netcdf_format)
else:
# Format will be deduced by the netCDF modules
self.netcdf_object = netCDF4.Dataset(self.netcdf_filename, mode=self.netcdf_mode)
self.netcdf_format = self.netcdf_object.file_format
self._isopen = True
def create(self, netcdf_filename, index_tuple, dimension_index_dict={}, netcdf_format=None):
'''
Create new NetCDF File in 'w' mode with required dimensions
Parameters:
index_tuple = tuple of storage unit indices
dimension_index_dict: dict of iterables or 1D numpy arrays keyed by dimension_tag. Required for irregular dimensions (e.g. time)
'''
def set_dimension(dimension, dimension_config, index, dimension_index_vector=None):
'''
Parameters:
dimension: Dimension tag (e.g. X, Y, T, etc.)
dimension_config: Nested dict containing storage configuration from GDF.storage_config['<storage_type>']
index: index for storage unit
dimension_index_vector: Numpy array of index values for irregular dimension (e.g. time) or None for unlimited irregular dimension
'''
logger.debug('dimension = %s', dimension)
logger.debug('dimension_config = %s', dimension_config)
logger.debug('index = %s', index)
logger.debug('dimension_index_vector = %s', dimension_index_vector)
if dimension_config['indexing_type'] == 'regular' and not dimension_index_vector:
element_size = dimension_config['dimension_element_size']
dimension_min = index * dimension_config['dimension_extent'] + dimension_config['dimension_origin'] + element_size / 2.0 # Half pixel to account for netCDF centre of pixel reference
dimension_max = dimension_min + dimension_config['dimension_extent']
dimension_index_vector = np.around(np.arange(dimension_min, dimension_max, element_size), self.decimal_places)
# Cater for reversed index (e.g. positive Y index tends Southwards when image origin is in UL/NW corner)
if dimension_config['reverse_index']:
dimension_index_vector = dimension_index_vector[::-1]
#TODO: Implement fixed indexing type
log_multiline(logger.debug, dimension_index_vector, 'dimension_index_vector for %s' % dimension, '\t')
if dimension_index_vector is not None:
dimension_index_shape = dimension_index_vector.shape
assert len(dimension_index_shape) == 1, 'Invalid dimension_index_vector shape. Must be 1D'
assert dimension_index_shape[0] <= dimension_config['dimension_elements'], 'dimension_index_vector must have %d elements or fewer' % dimension_config['dimension_elements']
dimension_size = len(dimension_index_vector)
#TODO: Do range checks to ensure indices are within storage unit boundaries
else:
dimension_size = 0 # Unlimited dimension
dimension_name = dimension_config['dimension_name']
# Dimensions can be renamed with the 'renameDimension' method of the file
self.netcdf_object.createDimension(dimension_name, dimension_size)
variable = self.netcdf_object.createVariable(dimension_name,'f8',(dimension_name,))
for property_name, property_value in dimension_config['properties'].items():
logger.debug('property_name = %s, property_value = %s', property_name, property_value)
variable.__setattr__(property_name, property_value)
variable[:] = dimension_index_vector
def set_variable(variable_name, variable_config):
dimensions = self.storage_config['dimensions'].keys()
dimension_names = tuple([self.storage_config['dimensions'][dimension]['dimension_name']
for dimension in dimensions])
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
chunksizes = tuple([min(self.storage_config['dimensions'][dimension]['dimension_cache'], nc_shape_dict[dimension])
for dimension in dimensions])
logger.debug('Creating variable %s with dimensions %s and chunk sizes %s', variable_name, dimensions, chunksizes)
variable = self.netcdf_object.createVariable(variable_name, variable_config['netcdf_datatype_name'], dimensions=dimension_names,
chunksizes=chunksizes, fill_value=variable_config['nodata_value'], zlib=True)
logger.debug('variable = %s' % variable)
# Set variable metadata
metadata_dict = {variable_name + ':' + 'coordinates': ' '.join(dimension_names),
variable_name + ':' + 'grid_mapping': 'crs',
variable_name + ':' + 'standard_name': variable_name,
variable_name + ':' + 'long_name': variable_config['measurement_type_name']
}
self.set_attributes(metadata_dict)
self.netcdf_object.sync()
# Start of create function
# Default to existing instance value
self.netcdf_mode = 'w'
self.netcdf_format = netcdf_format or self.netcdf_format
self.open(netcdf_filename=netcdf_filename)
for dimension, dimension_config in self.storage_config['dimensions'].items():
set_dimension(dimension, dimension_config, index_tuple[self.storage_config['dimensions'].keys().index(dimension)], dimension_index_dict.get(dimension))
for variable, variable_config in self.storage_config['measurement_types'].items():
set_variable(variable, variable_config)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
creation_date = datetime.utcnow().strftime("%Y%m%d")
self.netcdf_object.history = 'NetCDF-CF file created %s.' %(creation_date)
self.netcdf_object.license = 'Generalised Data Framework NetCDF-CF Test File'
self.netcdf_object.spatial_coverage = '%f %s grid' % (self.storage_config['dimensions']['X']['dimension_extent'],
self.storage_config['dimensions']['X']['reference_system_unit'])
self.netcdf_object.featureType = 'grid'
self.sync()
def write_slice(self, variable_name, slice_array, indices_dict):
'''
Function to set a specified slice in the specified netCDF variable
Parameters:
variable_name: Name of variable to which slice array will be written
slice_array: Numpy array to be written to netCDF file
indices_dict: Dict keyed by dimension tag indicating the dimension(s) & index/indices to which the slice should be written
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
index_dimensions = indices_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('slice_array.shape = %s', slice_array.shape)
logger.debug('indices_dict = %s', indices_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(index_dimensions) <= set(dimensions), 'Invalid slice index dimension(s)'
assert len(slice_array.shape) + len(indices_dict) == len(dimensions), 'Indices must be provided for all dimensions not covered by the data array'
slice_shape = tuple(nc_shape_dict[dimension] for dimension in dimensions if dimension not in indices_dict)
assert slice_array.shape == slice_shape, 'Shape of data array %s does not match storage unit slice shape %s' % (slice_array.shape, slice_shape)
# Create slices for accessing netcdf array
slicing = [slice(indices_dict[dimension], indices_dict[dimension] + 1) if dimension in index_dimensions
else slice(0, nc_shape_dict[dimension]) for dimension in dimensions]
logger.debug('slicing = %s', slicing)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
logger.debug('slice_array = %s', slice_array)
variable[slicing] = slice_array
def read_slice(self, variable_name, indices_dict):
'''
Function to read a specified slice in the specified netCDF variable
Parameters:
variable_name: Name of variable from which the slice array will be read
indices_dict: Dict keyed by dimension tag indicating the dimension(s) & index/indices from which the slice should be read
Returns:
slice_array: Numpy array read from netCDF file
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
index_dimensions = indices_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('indices_dict = %s', indices_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(index_dimensions) <= set(dimensions), 'Invalid slice index dimension(s)'
# Create slices for accessing netcdf array
slicing = [slice(indices_dict[dimension], indices_dict[dimension] + 1) if dimension in index_dimensions
else slice(0, nc_shape_dict[dimension]) for dimension in dimensions]
logger.debug('slicing = %s', slicing)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
slice_array = variable[slicing]
logger.debug('slice_array = %s', slice_array)
return slice_array
def get_subset_indices(self, range_dict):
'''
Function to read an array subset of the specified netCDF variable
Parameters:
variable_name: Name of variable from which the subset array will be read
range_dict: Dict keyed by dimension tag containing the dimension(s) & range tuples from which the subset should be read
Returns:
dimension_indices_dict: Dict containing array indices for each dimension
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
range_dimensions = range_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('range_dict = %s', range_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(range_dimensions) <= set(dimensions), 'Invalid range dimension(s)'
# Create slices for accessing netcdf array
dimension_indices_dict = {} # Dict containing all indices for each dimension
for dimension_index in range(len(dimensions)):
dimension = dimensions[dimension_index]
dimension_array = self.netcdf_object.variables[dimension_names[dimension_index]][:]
if dimension in range_dimensions:
logger.debug('dimension_array = %s', dimension_array)
logger.debug('range = %s', range_dict[dimension])
mask_array = ((dimension_array > range_dict[dimension][0]) * (dimension_array <= range_dict[dimension][1]))
index_array = np.where(mask_array)
logger.debug('index_array = %s', index_array)
dimension_indices_dict[dimension] = dimension_array[mask_array]
if not index_array:
logger.warning('Invalid range %s for dimension %s', range_dict[dimension], dimension)
return None
else: # Range not defined for this dimension - take the whole lot
dimension_indices_dict[dimension] = dimension_array
return dimension_indices_dict
def read_subset(self, variable_name, range_dict, max_bytes=None):
'''
Function to read an array subset of the specified netCDF variable
Parameters:
variable_name: Name of variable from which the subset array will be read
range_dict: Dict keyed by dimension tag containing the dimension(s) & range tuples from which the subset should be read
Returns:
subset_array: Numpy array read from netCDF file
dimension_indices_dict: Dict containing array indices for each dimension
max_bytes: integer specifying maximum number of bytes per read. None = unlimited
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
range_dimensions = range_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('range_dict = %s', range_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(range_dimensions) <= set(dimensions), 'Invalid range dimension(s)'
# Create slices for accessing netcdf array
dimension_indices_dict = {} # Dict containing all indices for each dimension
slicing = []
for dimension_index in range(len(dimensions)):
dimension = dimensions[dimension_index]
dimension_array = self.netcdf_object.variables[dimension_names[dimension_index]][:]
if dimension in range_dimensions:
logger.debug('dimension_array = %s', dimension_array)
logger.debug('range = %s', range_dict[dimension])
mask_array = ((dimension_array > range_dict[dimension][0]) * (dimension_array <= range_dict[dimension][1]))
index_array = np.where(mask_array)
logger.debug('index_array = %s', index_array)
dimension_indices_dict[dimension] = dimension_array[mask_array]
try:
dimension_slice = slice(index_array[0][0], index_array[0][-1] + 1)
except IndexError:
logger.warning('Invalid range %s for dimension %s', range_dict[dimension], dimension)
return None
else: # Range not defined for this dimension
dimension_indices_dict[dimension] = dimension_array
dimension_slice = slice(0, nc_shape_dict[dimension])
slicing.append(dimension_slice)
logger.debug('slicing = %s', slicing)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
if max_bytes == None: # Unlimited read size
subset_array = variable[slicing]
else: # Break read operation into separate reads each under maximum size
#TODO: Allow for case where slice size is greater than max_bytes - i.e. partitioning in more than one dimension
subset_shape = tuple([s.stop - s.start for s in slicing])
logger.debug('subset_shape = %s', subset_shape)
slice_bytes = variable[[slice(0,1) for dimension in dimension_names]].itemsize * reduce(lambda x, y: x*y, [s.stop - s.start for s in slicing[1:]])
max_slices = (max_bytes //
slice_bytes //
self.storage_config['dimensions'][dimensions[0]]['dimension_cache'] *
self.storage_config['dimensions'][dimensions[0]]['dimension_cache'])
logger.debug('max_slices = %s', max_slices)
subset_array = np.zeros(shape=subset_shape, dtype=variable.dtype)
for source_start_index in range(slicing[0].start, slicing[0].stop, max_slices):
source_stop_index = min([source_start_index + max_slices, slicing[0].stop])
source_slicing = [slice(source_start_index, source_stop_index)] + slicing[1:]
destination_slicing = [slice(source_slicing[slice_index].start - slicing[slice_index].start, source_slicing[slice_index].stop - slicing[slice_index].start)
for slice_index in range(len(source_slicing))]
logger.debug('source_slicing = %s', source_slicing)
logger.debug('destination_slicing = %s', destination_slicing)
subset_array[destination_slicing] = variable[source_slicing]
logger.debug('subset_array = %s', subset_array)
return subset_array, dimension_indices_dict
def get_datatype(self, variable_name, convention='numpy'):
'''
Returns NetCDF datatype of specified variable
'''
return self.storage_config['measurement_types'][variable_name].get(convention + '_datatype_name')
def get_attributes(self, verbose=None, normalise=True):
"""
Copy the global and variable attributes from a netCDF object to an
OrderedDict. This is a little like 'ncdump -h' (without the formatting).
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
Normalise means that some NumPy types returned from the netCDF module are
converted to equivalent regular types.
Notes from the netCDF module:
The ncattrs method of a Dataset or Variable instance can be used to
retrieve the names of all the netCDF attributes.
The __dict__ attribute of a Dataset or Variable instance provides all
the netCDF attribute name/value pairs in an OrderedDict.
self.netcdf_object.dimensions.iteritems()
self.netcdf_object.variables
self.netcdf_object.ncattrs()
self.netcdf_object.__dict__
"""
return netcdf_builder.get_attributes(self.netcdf_object, verbose, normalise)
def set_attributes(self, ncdict, delval='DELETE'):
"""
Copy attribute names and values from a dict (or OrderedDict) to a netCDF
object.
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
If any value is equal to delval then, if the corresponding attribute exists
in the netCDF object, the corresponding attribute is removed from the
netCDF object. The default value of delval is 'DELETE'. For example,
nc3_set_attributes(self.netcdf_object, {'temperature:missing_value':'DELETE'})
will delete the missing_value attribute from the temperature variable.
A ValueError exception is raised if a key refers to a variable name that
is not defined in the netCDF object.
"""
netcdf_builder.set_attributes(self.netcdf_object, ncdict, delval)
def show_dimensions(self):
"""
Print the dimension names, lengths and whether they are unlimited.
"""
netcdf_builder.show_dimensions(self.netcdf_object)
def set_variable(self, varname, dtype='f4', dims=None, chunksize=None, fill=None, zlib=False, **kwargs):
"""
Define (create) a variable in a netCDF object. No data is written to the
variable yet. Give the variable's dimensions as a tuple of dimension names.
Dimensions must have been previously created with self.netcdf_object.createDimension
(e.g. see set_timelatlon()).
Recommended ordering of dimensions is:
time, height or depth (Z), latitude (Y), longitude (X).
Any other dimensions should be defined before (placed to the left of) the
spatio-temporal coordinates.
To create a scalar variable, use an empty tuple for the dimensions.
Variables can be renamed with the 'renameVariable' method of the netCDF
object.
Specify compression with zlib=True (default = False).
Specify the chunksize with a sequence (tuple, list) of the same length
as dims (i.e., the number of dimensions) where each element of chunksize
corresponds to the size of the chunk along the corresponding dimension.
There are some tips and tricks associated with chunking - see
http://data.auscover.org.au/node/73 for an overview.
The default behaviour is to create a floating-point (f4) variable
with dimensions ('time','latitude','longitude'), with no chunking and
no compression.
"""
netcdf_builder.set_variable(self.netcdf_object, varname, dtype=dtype, dims=dims, chunksize=chunksize, fill=fill, zlib=zlib, **kwargs)
def add_bounds(self, dimension_tag, bounds):
"""Add a bounds array of data to the netCDF object.
Bounds array can be a list, tuple or NumPy array.
A bounds array gives the values of the vertices corresponding to a dimension
variable (see the CF documentation for more information). The dimension
variable requires an attribute called 'bounds', which references a variable
that contains the bounds array. The bounds array has the same shape as the
corresponding dimension with an extra size for the number of vertices.
This function:
- Adds a 'bounds' attribute to the dimension variable if required.
If a bounds attribute exits then its value will be used for the bounds
variable (bndname). Otherwise if a bndname is given then this will be
used. Otherwise the default bndname will be '_bounds' appended to the
dimension name.
- If the bounds variable exists then a ValueError will be raised if its
shape does not match the bounds array.
- If the bounds variable does not exist then it will be created. If so
an exra dimension is required for the number of vertices. Any existing
dimension of the right size will be used. Otherwise a new dimension
will be created. The new dimension's name will be 'nv' (number of
vertices), unless this dimension name is already used in which case
'_nv' appended to the dimension name will be used instead.
- Lastly, the bounds array is written to the bounds variable. If the
corresponding dimension is time (name = 'time' or dim.axis = 't') then
the bounds array will be written as date2num data.
"""
dimension_tag = dimension_tag.upper()
dimension_name=self.storage_config['dimensions'][dimension_tag]['dimension_name']
bounds_name = dimension_name + '_bounds'
netcdf_builder.add_bounds(self.netcdf_object, dimension_name, bounds, bounds_name)
def georeference_from_file(self, gdal_dataset_path):
'''
Function to set georeferencing from template GDAL dataset
'''
def getMinMaxExtents(samples, lines, geoTransform):
"""
Calculates the min/max extents based on the input latitude and longitude vectors.
:param samples:
An integer representing the number of samples (columns) in an array.
:param lines:
An integer representing the number of lines (rows) in an array.
:param geoTransform:
A tuple containing the geotransform information returned by GDAL.
:return:
A tuple containing (min_lat, max_lat, min_lon, max_lat)
:notes:
Hasn't been tested for northern or western hemispheres.
"""
extents = []
x_list = [0,samples]
y_list = [0,lines]
for px in x_list:
for py in y_list:
x = geoTransform[0]+(px*geoTransform[1])+(py*geoTransform[2])
y = geoTransform[3]+(px*geoTransform[4])+(py*geoTransform[5])
extents.append([x,y])
extents = np.array(extents)
min_lat = np.min(extents[:,1])
max_lat = np.max(extents[:,1])
min_lon = np.min(extents[:,0])
max_lon = np.max(extents[:,0])
return (min_lat, max_lat, min_lon, max_lon)
# Start of georeference_from_file(self, gdal_dataset_path) definition
gdal_dataset = gdal.Open(gdal_dataset_path)
assert gdal_dataset, 'Unable to open file %s' % gdal_dataset_path
geotransform = gdal_dataset.GetGeoTransform()
logger.debug('geotransform = %s', geotransform)
projection = gdal_dataset.GetProjection()
logger.debug('projection = %s', projection)
# Set coordinate reference system metadata variable
spatial_reference = osr.SpatialReference()
spatial_reference.ImportFromWkt(projection)
crs_metadata = {'crs:name': spatial_reference.GetAttrValue('geogcs'),
'crs:longitude_of_prime_meridian': 0.0, #TODO: This needs to be fixed!!! An OSR object should have this, but maybe only for specific OSR references??
'crs:inverse_flattening': spatial_reference.GetInvFlattening(),
'crs:semi_major_axis': spatial_reference.GetSemiMajor(),
'crs:semi_minor_axis': spatial_reference.GetSemiMinor(),
}
self.set_variable('crs', dims=(), dtype='i4')
self.set_attributes(crs_metadata)
logger.debug('crs_metadata = %s', crs_metadata)
extents = getMinMaxExtents(gdal_dataset.RasterXSize, gdal_dataset.RasterYSize, geotransform)
#pdb.set_trace()
self.netcdf_object.geospatial_lat_min = extents[0]
self.netcdf_object.geospatial_lat_max = extents[1]
self.netcdf_object.geospatial_lat_units = 'degrees_north'
self.netcdf_object.geospatial_lat_resolution = geotransform[5]
self.netcdf_object.geospatial_lon_min = extents[2]
self.netcdf_object.geospatial_lon_max = extents[3]
self.netcdf_object.geospatial_lon_units = 'degrees_east'
self.netcdf_object.geospatial_lon_resolution = geotransform[1]
def sync(self):
'''
Function to sync file to disk
'''
if self._isopen:
self.netcdf_object.sync()
@property
def isopen(self):
return self._isopen
| 50.807576
| 197
| 0.649331
|
4a05958a2b59f7451164ed0e7d02f546940fd835
| 4,002
|
py
|
Python
|
test/test_acceptance.py
|
jmikedupont2/pythoscope
|
58a1149f204897e8f789d93ee7e49b6db0bd346f
|
[
"MIT"
] | 2
|
2020-04-06T11:02:46.000Z
|
2020-05-14T18:37:04.000Z
|
test/test_acceptance.py
|
jmikedupont2/pythoscope
|
58a1149f204897e8f789d93ee7e49b6db0bd346f
|
[
"MIT"
] | null | null | null |
test/test_acceptance.py
|
jmikedupont2/pythoscope
|
58a1149f204897e8f789d93ee7e49b6db0bd346f
|
[
"MIT"
] | null | null | null |
from pythoscope.inspector import inspect_project
from pythoscope.generator import add_tests_to_project
from pythoscope.util import read_file_contents, write_content_to_file
from nose import SkipTest
from .assertions import *
from .helper import get_test_module_contents, CapturedLogger, \
ProjectInDirectory, putfile, TempDirectory, read_data
class TestStaticAnalysis(CapturedLogger, TempDirectory):
def test_generates_test_stubs(self):
expected_result = read_data("static_analysis_output.py")
project = ProjectInDirectory(self.tmpdir)
module_path = putfile(project.path, "module.py", read_data("static_analysis_module.py"))
inspect_project(project)
add_tests_to_project(project, [module_path], 'unittest')
result = get_test_module_contents(project)
assert_equal_strings(expected_result, result)
class TestAppendingTestClasses(CapturedLogger, TempDirectory):
def test_appends_test_classes_to_existing_test_modules(self):
self._test_appending("appending_test_cases_module_modified.py",
"appending_test_cases_output_expected.py")
def test_appends_test_methods_to_existing_test_classes(self):
self._test_appending("appending_test_cases_module_added_method.py",
"appending_test_cases_added_method_output_expected.py")
def _test_appending(self, modified_input, expected_output):
project = ProjectInDirectory(self.tmpdir)
module_path = putfile(project.path, "module.py", read_data("appending_test_cases_module_initial.py"))
test_module_path = putfile(project.path, "test_module.py", read_data("appending_test_cases_output_initial.py"))
# Analyze the project with an existing test module.
inspect_project(project)
# Filesystem stat has resolution of 1 second, and we don't want to
# sleep in a test, so we just fake the original files creation time.
project["module"].created = 0
project["test_module"].created = 0
# Modify the application module and analyze it again.
putfile(project.path, "module.py", read_data(modified_input))
inspect_project(project)
# Regenerate the tests.
add_tests_to_project(project, [module_path], 'unittest')
project.save()
assert_length(project.get_modules(), 2)
result = read_file_contents(test_module_path)
expected_result = read_data(expected_output)
assert_equal_strings(expected_result, result)
class TestAcceptanceWithPointOfEntry(CapturedLogger, TempDirectory):
def execute_with_point_of_entry_and_assert(self, id):
expected_result = read_data("%s_output.py" % id)
project = ProjectInDirectory(self.tmpdir).with_points_of_entry(["poe.py"])
module_path = putfile(project.path, "module.py", read_data("%s_module.py" % id))
write_content_to_file(read_data("generic_acceptance_poe.py"), project.path_for_point_of_entry("poe.py"))
inspect_project(project)
add_tests_to_project(project, [module_path], 'unittest')
result = get_test_module_contents(project)
assert_equal_strings(expected_result, result)
class TestObjectsIdentityPreservation(TestAcceptanceWithPointOfEntry):
def test_preserves_identity_of_objects(self):
self.execute_with_point_of_entry_and_assert("objects_identity")
class TestSideEffectsCaptureAndGeneration(TestAcceptanceWithPointOfEntry):
def test_captures_and_generates_tests_for_code_with_side_effects_on_lists(self):
self.execute_with_point_of_entry_and_assert("side_effects_on_lists")
class TestGlobalVariables(TestAcceptanceWithPointOfEntry):
def test_handles_global_variables(self):
self.execute_with_point_of_entry_and_assert("global_variables")
class TestAttributesRebind(TestAcceptanceWithPointOfEntry):
def test_handles_attribute_rebind(self):
self.execute_with_point_of_entry_and_assert("attributes_rebind")
| 45.477273
| 119
| 0.76037
|
4a0595ecf104c35dd89de5d5f675d93575f6480f
| 7,411
|
py
|
Python
|
step.py
|
andrewpo456/pi-thrum
|
dcbb3576bdff7784ce6ad04e554209593db73e3e
|
[
"MIT"
] | 11
|
2017-11-01T01:21:37.000Z
|
2021-12-05T15:20:19.000Z
|
step.py
|
andrewpo456/pi-thrum
|
dcbb3576bdff7784ce6ad04e554209593db73e3e
|
[
"MIT"
] | null | null | null |
step.py
|
andrewpo456/pi-thrum
|
dcbb3576bdff7784ce6ad04e554209593db73e3e
|
[
"MIT"
] | 1
|
2020-11-05T18:03:08.000Z
|
2020-11-05T18:03:08.000Z
|
"""
This module defines the functionality behind running the 12-step sequencer.
For information regarding the circuit setup please refer to 'pi-thrum-schem'.
@author Andrew Pope
"""
import RPi.GPIO as GPIO
import pygame
import time
class Step:
# Map the GPIO pins to each button
# Note:
# * Buttons 0 - 5, play sound AND are used as part of the step seq
# * Buttons 6 - 11, are for the step seq only
__soundBNTs = [ 4, 18, 17, 27, 22, 23 ]
__stepBNTs = [ 24, 25, 5, 6, 12, 13 ]
__stepChannels = [ 4, 18, 17, 27, 22, 23, 24, 25, 5, 6, 12, 13 ]
__playBNT = 19
__recBNT = 16
__LED = 26
def __init__(self, verbose=False, bpm=120000.0):
"""
Initialise class variables, GPIO, and sound
@param verbose - True for verbose print statements
@param bpm - Beats per minute
"""
# Initialise class variables
self.__verbose = verbose
self.__playSteps = False
self.__recording = False
self.__bpm = bpm
self.__stepTime = 15000.0 / bpm
self.__stepPatterns = []
self.__samples = []
self.__currSamp = None
# Initialise pattern for each step (i.e. what sounds will play)
for i in range(12):
self.__stepPatterns.append([None])
# Initialise GPIO and sound samples
self.__GPIOInit()
self.__soundInit()
def run(self):
"""
Runs the main program (step-sequencer) when invoked
"""
# Initialise callbacks - which will start multi-threading
self.__initCBs()
step = -1
next_time = time.time()
# Begin main loop - will halt when user supplies CTRL+C
while True:
if self.__playSteps:
if time.time() >= next_time:
step = (step + 1) % 12
self.__playPattern(self.__stepPatterns[step])
next_time += self.__stepTime
def cleanup(self):
"""
Cleanup method which should be invoked before program exit
"""
# Destroy pygame objects and de-init GPIO pins
pygame.quit()
GPIO.output(self.__LED, GPIO.LOW)
GPIO.cleanup()
def __playPattern(self, pattern):
"""
Plays a collection of sounds called a 'pattern'
@param pattern - The collection of sounds
"""
for sound in pattern:
if sound != None: sound.play()
def __GPIOInit(self):
"""
Initialises the GPIO pins for the pi
(tested on the Pi3 Model B+)
"""
# Set mode PIN numbering to BCM, and define GPIO pin functions
GPIO.setmode(GPIO.BCM)
# Setup Function for input Pins
inputBNTs = (self.__soundBNTs + self.__stepBNTs)
inputBNTs.append(self.__playBNT)
inputBNTs.append(self.__recBNT)
for b in inputBNTs:
GPIO.setup(b, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Func for ouput Pins
GPIO.setup(self.__LED, GPIO.OUT)
def __soundInit(self):
"""
Initialises the pygame module and loads the sound samples
"""
# Initialise pygame module
pygame.mixer.pre_init(44100, -16, 12, 512) # TODO: Tweak values?
pygame.init()
# Load sounds from samples folder
self.__samples.append(pygame.mixer.Sound('samples/Blop-01.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Glitch-02.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Vocal-03.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Noise-04.wav'))
self.__samples.append(pygame.mixer.Sound('samples/SFX-05.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Strike-06.wav'))
for sample in self.__samples:
sample.set_volume(.95)
def __initCBs(self):
"""
Initialises the Callback functions for each Input IO pin
"""
# Sound Button Callbacks:
for i in range(6):
bnt = self.__soundBNTs[i]
smp = self.__samples[i]
GPIO.add_event_detect(bnt, GPIO.RISING, callback=lambda x,y=smp:
self.__soundCB(x, y), bouncetime=200)
# Step Button Callbacks:
for bnt in self.__stepBNTs:
GPIO.add_event_detect(bnt, GPIO.RISING, callback=lambda x:
self.__stepCB(x), bouncetime=200)
# Play Button Callback:
GPIO.add_event_detect(self.__playBNT, GPIO.RISING, callback=lambda x:
self.__playCB(x), bouncetime=200)
# Record Button Callback:
GPIO.add_event_detect(self.__recBNT, GPIO.RISING, callback=lambda x:
self.__recCB(x), bouncetime=200)
def __soundCB(self, channel, sound):
"""
Callback for sound button (a sound button also doubles as a step
button)
@param channel - The GPIO PIN that the signal was sent on
@param sound - The sound to play
"""
step = self.__stepChannels.index(channel)
self.__prtVerb("Sound bnt IO-{0}, Step={1}".format(channel, step))
if self.__recording:
self.__toggleStepSample(step, self.__currSamp)
else:
sound.play()
self.__currSamp = sound
def __stepCB(self, channel):
"""
Callback for step button
@param channel - The GPIO PIN that the signal was sent on
"""
step = self.__stepChannels.index(channel)
self.__prtVerb("Step bnt IO-{0}, Step={1}".format(channel, step))
if self.__recording:
self.__toggleStepSample(step, self.__currSamp)
def __playCB(self, channel):
"""
Callback for play button
@param channel - The GPIO PIN that the signal was sent on
"""
self.__prtVerb("Play bnt IO-{0}".format(channel))
self.__playSteps = not self.__playSteps # Toggle playing
def __recCB(self, channel):
"""
Callback for record button
@param channel - The GPIO PIN that the signal was sent on
"""
self.__prtVerb("Record bnt IO-{0}".format(channel))
GPIO.output(self.__LED, not GPIO.input(self.__LED)) # Toggle LED
self.__recording = not self.__recording # Toggle recording
def __toggleStepSample(self, step, sample):
"""
Will either add or remove a sound sample to/from a step 'pattern'
@param step - The step to check
@param sample - The sample to add or remove
"""
# Determine if the currently selected sample is 'on' the step
# if so - remove it, if not - add it
if sample in self.__stepPatterns[step]:
self.__stepPatterns[step].remove(sample)
else:
self.__stepPatterns[step].append(sample)
def __prtVerb(self, mesg):
"""
Verbose message print method
@param mesg - The message to print
"""
if self.__verbose:
print(mesg)
| 34.152074
| 77
| 0.564431
|
4a05961e2c1ac44a4693fce5918d35e34e34d7ca
| 2,707
|
py
|
Python
|
tools/tensorflow_pb_tester.py
|
hequn/keras-transfer-learning
|
194e98db7d9eb2bd58ab51f67e98470635bbb215
|
[
"MIT"
] | 6
|
2018-11-28T06:32:58.000Z
|
2021-12-28T12:43:36.000Z
|
tools/tensorflow_pb_tester.py
|
hequn/keras-transfer-learning
|
194e98db7d9eb2bd58ab51f67e98470635bbb215
|
[
"MIT"
] | 3
|
2019-01-08T07:35:41.000Z
|
2019-11-28T03:13:03.000Z
|
tools/tensorflow_pb_tester.py
|
hequn/keras-transfer-learning
|
194e98db7d9eb2bd58ab51f67e98470635bbb215
|
[
"MIT"
] | 2
|
2019-02-18T04:28:20.000Z
|
2020-05-08T10:22:26.000Z
|
from tensorflow.python.platform import gfile
import tensorflow as tf
import numpy as np
import os
from PIL import Image
import config
import util
from sklearn.externals import joblib
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# load the model which will be executed
config.model = 'antnet'
util.set_img_format()
model_module = util.get_model_class_instance()
config.classes = joblib.load(config.get_classes_path())
tfconfig = tf.ConfigProto(allow_soft_placement=True)
# tfconfig.gpu_options.allow_growth=True
pb_model_path = "pbsave/frozen_test_ant.pb"
sess = tf.Session(config=tfconfig)
# load back the pb file
with gfile.FastGFile(pb_model_path, "rb") as f:
output_graph_def = tf.GraphDef()
output_graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(output_graph_def, name="")
# variable_name = [v.name for v in tf.all_variables()]
# f = open("pbsave/tensor_restore.txt", "w+")
# print(output_graph_def, file=f)
# init session
sess.run(tf.global_variables_initializer())
# get the key tensors
input1 = sess.graph.get_tensor_by_name('input_1:0')
input2 = sess.graph.get_tensor_by_name("input_2:0")
classify_dense = sess.graph.get_tensor_by_name("classify_dense/Softmax:0")
# noticed: center loss version will have the embedding_1/embedding_lookup:0 as the center vector according to the label,
# and embedding/Elu:0 as the image feature
# triplet loss version will have it as the image feature vector;
embedding_1 = sess.graph.get_tensor_by_name("embedding_1/embedding_lookup:0")
def load_image_into_numpy_array(image, im_width, im_height, input_type):
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(input_type)
# load the image and resize to fixed size, together preprocess it
img = Image.open('pbsave/test_class_101217_1.jpg')
img = img.resize((224,224))
input_data = load_image_into_numpy_array(img, 224, 224, np.float32)
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
# expand the dim
input_data = np.expand_dims(preprocess_input(input_data), axis=0)
classes_in_keras_format = util.get_classes_in_keras_format()
# prepare the dict and run the session
feed_dict = {input1: input_data, input2: [[1]]}
classify_result, embedding = sess.run([classify_dense, embedding_1], feed_dict=feed_dict)
# print the results
print(classes_in_keras_format.keys(), classes_in_keras_format.values())
print(classify_result, embedding)
#print(list(classes_in_keras_format.keys())[list(classes_in_keras_format.values()).index(np.argmax(classify_result[0], axis=1))])
print(list(classes_in_keras_format.keys())[list(classes_in_keras_format.values()).index(np.argmax(classify_result[0]))])
| 37.597222
| 129
| 0.773181
|
4a0596cc07d5bf38001eea3f11bfb094789a554b
| 2,243
|
py
|
Python
|
Programas_1/sagj_bcd.py
|
ShosuaXD/ProgramasFSEmb
|
58fabc3b080cd2423e1b3d7d77c23b98a207316b
|
[
"MIT"
] | null | null | null |
Programas_1/sagj_bcd.py
|
ShosuaXD/ProgramasFSEmb
|
58fabc3b080cd2423e1b3d7d77c23b98a207316b
|
[
"MIT"
] | null | null | null |
Programas_1/sagj_bcd.py
|
ShosuaXD/ProgramasFSEmb
|
58fabc3b080cd2423e1b3d7d77c23b98a207316b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ################################
# Author: Santillan Garcia Josue
# Codigo modificado de bcd.py
# ################################
# Future imports (Python 2.7 compatibility)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import Raspberry Pi's GPIO control library
import RPi.GPIO as GPIO
# Imports sleep functon
from time import sleep
# Initializes virtual board (comment for hardware deploy)
import virtualboard
# Disable warnings
# GPIO.setwarnings(False)
# Set up Rpi.GPIO library to use physical pin numbers
GPIO.setmode(GPIO.BOARD)
# Set up pins 36, 38, 40 and 37 as output and default them to low
GPIO.setup(36, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(38, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(40, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(37, GPIO.OUT, initial=GPIO.LOW)
# Configuramos los pines correspondientes a los 4 leds
GPIO.setup(10, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(12, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(16, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(18, GPIO.OUT, initial=GPIO.LOW)
def bcd7(num):
"""Converts num to a BCD representation"""
GPIO.output(36, GPIO.HIGH if (num & 0x00000001) > 0 else GPIO.LOW )
GPIO.output(38, GPIO.HIGH if (num & 0x00000002) > 0 else GPIO.LOW )
GPIO.output(40, GPIO.HIGH if (num & 0x00000004) > 0 else GPIO.LOW )
GPIO.output(37, GPIO.HIGH if (num & 0x00000008) > 0 else GPIO.LOW )
# Declaramos esta funcion para mostrar los ultimos cuatro leds, mostrando
def numToBin(num):
GPIO.output(18, GPIO.HIGH if (num & 0x00000001) > 0 else GPIO.LOW)
GPIO.output(16, GPIO.HIGH if (num & 0x00000002) > 0 else GPIO.LOW)
GPIO.output(12, GPIO.HIGH if (num & 0x00000004) > 0 else GPIO.LOW)
GPIO.output(10, GPIO.HIGH if (num & 0x00000008) > 0 else GPIO.LOW)
flag = True
while flag:
try:
numero = int(input("Dame un numero del 0 al 15: ")) # pedimos un numero entre 0 y 15
bcd7(numero) # Llamamos a la funcion que controla los pines correspondientes al controlador TTL 74LS47
numToBin(numero) # Llamamos a la funcion que controla los ultimos 4 pines que muestran el numero a codigo binario
except:
flag = False
GPIO.cleanup()
| 38.016949
| 121
| 0.705305
|
4a05970ef5a40fd9d697ce0de9a42cdde1e268e5
| 568
|
gyp
|
Python
|
binding.gyp
|
orta/apple-unified-logging-node
|
c9a590b5c96651390ff32434b7e5b25d1af33fb3
|
[
"MIT"
] | 4
|
2020-04-16T12:57:47.000Z
|
2020-04-18T09:12:26.000Z
|
binding.gyp
|
orta/apple-unified-logging-node
|
c9a590b5c96651390ff32434b7e5b25d1af33fb3
|
[
"MIT"
] | null | null | null |
binding.gyp
|
orta/apple-unified-logging-node
|
c9a590b5c96651390ff32434b7e5b25d1af33fb3
|
[
"MIT"
] | null | null | null |
{
"make_global_settings": [
["CC", "/usr/bin/clang"],
["CXX", "/usr/bin/clang++"]
],
"targets": [
{
"target_name": "OSLogger",
"sources": ["OSLogger.mm", "functions.cc"],
"include_dirs": ["<!(node -e \"require('nan')\")"],
"xcode_settings": {
"CC": "clang",
"MACOSX_DEPLOYMENT_TARGET": "10.14",
"CLANG_CXX_LIBRARY": "libc++",
"OTHER_CPLUSPLUSFLAGS" : ["-std=gnu++14", "-stdlib=libc++"],
"OTHER_CFLAGS": [
"-ObjC",
"-xobjective-c"
],
}
}
]
}
| 22.72
| 68
| 0.463028
|
4a05974558a39a975df0076bdbff99ff9b099883
| 22,548
|
py
|
Python
|
emiproc/grids.py
|
jmhaussaire/cosmo-emission-processing
|
bb05ca5e6d26836a89929369ba38f2bc8151e4a2
|
[
"CC-BY-4.0"
] | null | null | null |
emiproc/grids.py
|
jmhaussaire/cosmo-emission-processing
|
bb05ca5e6d26836a89929369ba38f2bc8151e4a2
|
[
"CC-BY-4.0"
] | null | null | null |
emiproc/grids.py
|
jmhaussaire/cosmo-emission-processing
|
bb05ca5e6d26836a89929369ba38f2bc8151e4a2
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Classes handling different grids, namely the COSMO simulation grid and
grids used in different emissions inventories.
"""
import numpy as np
import cartopy.crs as ccrs
from netCDF4 import Dataset
from shapely.geometry import Polygon
class Grid:
"""Abstract base class for a grid.
Derive your own grid implementation from this and make sure to provide
an appropriate implementation of the required methods.
As an example you can look at TNOGrid.
"""
def __init__(self, name, projection):
"""
Parameters
----------
name : str
name of the inventory
projection : cartopy.crs.Projection
Projection used for the inventory grid. Used to transform points to
other coordinate systems.
"""
self.name = name
self.projection = projection
def get_projection(self):
"""Returns a copy of the projection"""
return self.projection
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
The points are ordered clockwise, starting in the top
left:
4. 1.
^ v
3. < 2.
Returns a tuple of arrays with shape (4,). The first
tuple element are the x-coordinates of the corners,
the second are the y-coordinates.
The coordinates are in the projection of the grid, so
to work with them you might have to transform them to
the desired projection. For example, to be sure you're
working with regular (lon, lat) coordinates:
>>> corners = ccrs.PlateCarree().transform_points(
... grid.get_projection(),
... *grid.cell_corners(i,j)
...)
The clunky return type is necessary because the corners
are transformed after by cartopy.crs.CRS.transform_points.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
raise NotImplementedError("Method not implemented")
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
raise NotImplementedError("Method not implemented")
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
raise NotImplementedError("Method not implemented")
class TNOGrid(Grid):
"""Contains the grid from the TNO emission inventory"""
def __init__(self, dataset_path, name="TNO"):
"""Open the netcdf-dataset and read the relevant grid information.
Parameters
----------
dataset_path : str
name : str, optional
"""
self.dataset_path = dataset_path
with Dataset(dataset_path) as dataset:
self.lon_var = np.array(dataset["longitude"][:])
self.lat_var = np.array(dataset["latitude"][:])
self.nx = len(self.lon_var)
self.ny = len(self.lat_var)
# The lat/lon values are the cell-centers
self.dx = (self.lon_var[-1] - self.lon_var[0]) / (self.nx - 1)
self.dy = (self.lat_var[-1] - self.lat_var[0]) / (self.ny - 1)
# Compute the cell corners
x = self.lon_var
y = self.lat_var
dx2 = self.dx / 2
dy2 = self.dy / 2
self.cell_x = np.array([x + dx2, x + dx2, x - dx2, x - dx2])
self.cell_y = np.array([y + dy2, y - dy2, y - dy2, y + dy2])
super().__init__(name, ccrs.PlateCarree())
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
return self.cell_x[:,i], self.cell_y[:,j]
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
return self.lon_var
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
return self.lat_var
class EDGARGrid(Grid):
"""Contains the grid from the EDGAR emission inventory
The grid is similar to the TNO grid in that it uses a regular lat/lon
coordinate system. However, the gridpoints are the lower left corners
of the cell.
"""
xmin: float
xmax: float
ymin: float
ymax: float
dx: float
dy: float
def __init__(self, xmin, xmax, ymin, ymax, dx, dy, name="EDGAR"):
"""Store the grid information.
Parameters
----------
xmin : float
Longitude of bottom left gridpoint in degrees
xmax : float
Longitude of top right gridpoint in degrees
ymin : float
Latitude of bottom left gridpoint in degrees
ymax : float
Latitude of top right gridpoint in degrees
dx : float
Longitudinal size of a gridcell in degrees
dy : float
Latitudinal size of a gridcell in degrees
name : str, optional
"""
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.dx = dx
self.dy = dy
self.lon_vals = np.arange(self.xmin, self.xmax, self.dx)
self.lat_vals = np.arange(self.ymin, self.ymax, self.dy)
x = self.lon_vals
y = self.lat_vals
self.cell_x = np.array([x + self.dx, x + self.dx, x, x])
self.cell_y = np.array([y + self.dy, y, y, y + self.dy])
super().__init__(name, ccrs.PlateCarree())
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
return self.cell_x[:,i], self.cell_y[:,j]
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
return self.lon_vals
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
return self.lat_vals
class VPRMGrid(Grid):
"""Contains the grid from the VPRM emission inventory.
The grid projection is LambertConformal with a nonstandard globe.
This means to generate the gridcell-corners a bit of work is
required, as well as that the gridcell-sizes can't easily be read
from a dataset.
Be careful, the lon/lat_range methods return the gridpoint coordinates
in the grid-projection (and likely have to be transformed to be usable).
"""
def __init__(self, dataset_path, dx, dy, name):
"""Store the grid information.
Parameters
----------
dataset_path : str
Is used to read the gridcell coordinates
dx : float
Longitudinal size of a gridcell in meters
dy : float
Latitudinal size of a gridcell in meters
name : str, optional
"""
self.dx = dx
self.dy = dy
projection = ccrs.LambertConformal(
central_longitude=12.5,
central_latitude=51.604,
standard_parallels=[51.604],
globe=ccrs.Globe(
ellipse=None, semimajor_axis=6370000, semiminor_axis=6370000
),
)
# Read grid-values in lat/lon, which are distorted, then
# project them to LambertConformal where the grid is
# regular and rectangular.
with Dataset(dataset_path) as dataset:
proj_lon = np.array(dataset["lon"][:])
proj_lat = np.array(dataset["lat"][:])
self.lon_vals = projection.transform_points(
ccrs.PlateCarree(), proj_lon[0, :], proj_lat[0, :]
)[:, 0]
self.lat_vals = projection.transform_points(
ccrs.PlateCarree(), proj_lon[:, 0], proj_lat[:, 0]
)[:, 1]
# Cell corners
x = self.lon_vals
y = self.lat_vals
dx2 = self.dx / 2
dy2 = self.dy / 2
self.cell_x = np.array([x + dx2, x + dx2, x - dx2, x - dx2])
self.cell_y = np.array([y + dy2, y - dy2, y - dy2, y + dy2])
super().__init__(name, projection)
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
return self.cell_x[:,i], self.cell_y[:,j]
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
return self.lon_vals
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
return self.lat_vals
class SwissGrid(Grid):
"""Represent a grid used by swiss inventories, such as meteotest, maiolica
or carbocount."""
nx: int
ny: int
dx: float
dy: float
xmin: float
ymin: float
def __init__(
self,
name,
nx,
ny,
dx,
dy,
xmin,
ymin,
):
"""Store the grid information.
Swiss grids use LV03 coordinates, which switch the axes:
x <-> Northing
y <-> Easting
For consistency with the other Grids, we use:
x <-> Longitude ~ "swiss y"
y <-> Latitude ~ "swiss x"
Thus, a header of a .asc file translates as follows:
ncols -> nx
nrows -> ny
xllcorner -> ymin
yllcorner -> xmin
cellsize -> dx, dy
Parameters
----------
dx : float
EASTERLY size of a gridcell in meters
dy : float
NORTHLY size of a gridcell in meters
nx : int
Number of cells in EASTERLY direction
ny : int
Number of cells in NORTHLY direction
xmin : float
EASTERLY distance of bottom left gridpoint in meters
ymin : float
NORTHLY distance of bottom left gridpoint in meters
"""
self.nx = nx
self.ny = ny
self.dx = dx
self.dy = dy
self.xmin = xmin
self.ymin = ymin
# The swiss grid is not technically using a PlateCarree projection
# (in fact it is not using any projection implemented by cartopy),
# however the points returned by the cell_corners() method are in
# WGS84, which PlateCarree defaults to.
super().__init__(name, ccrs.PlateCarree())
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
x1, y1 = self._LV03_to_WGS84(
self.xmin + i * self.dx, self.ymin + j * self.dy
)
x2, y2 = self._LV03_to_WGS84(
self.xmin + (i + 1) * self.dx, self.ymin + (j + 1) * self.dy
)
cell_x = np.array([x2, x2, x1, x1])
cell_y = np.array([y2, y1, y1, y2])
return cell_x, cell_y
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
return np.array([self.xmin + i * self.dx for i in range(self.nx)])
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
return np.array([self.ymin + j * self.dy for j in range(self.ny)])
def _LV03_to_WGS84(self, y, x):
"""Convert LV03 to WSG84.
Based on swisstopo approximated solution (0.1" accuracy)
For better comparability with other implementations, here:
x <-> Northing
y <-> Easting,
contrary to the rest of this class.
Parameters
----------
y : float
y coordinate in meters
x : float
x coordinate in meters
Returns
-------
tuple(float, float)
The coordinates of the point in WGS84 (lon, lat)
"""
x = (x - 200_000) / 1_000_000
y = (y - 600_000) / 1_000_000
lon = (
2.6779094
+ 4.728982 * y
+ 0.791484 * y * x
+ 0.1306 * y * x ** 2
- 0.0436 * y ** 3
) / 0.36
lat = (
16.9023892
+ 3.238272 * x
- 0.270978 * y ** 2
- 0.002528 * x ** 2
- 0.0447 * y ** 2 * x
- 0.0140 * x ** 3
) / 0.36
return lon, lat
class COSMOGrid(Grid):
"""Class to manage a COSMO-domain"""
nx: int
ny: int
dx: float
dy: float
xmin: float
ymin: float
pollon: float
pollat: float
def __init__(self, nx, ny, dx, dy, xmin, ymin, pollon, pollat):
"""Store the grid information.
Parameters
----------
nx : int
Number of cells in longitudinal direction
ny : int
Number of cells in latitudinal direction
dx : float
Longitudinal size of a gridcell in degrees
dy : float
Latitudinal size of a gridcell in degrees
xmin : float
Longitude of bottom left gridpoint in degrees
ymin : float
Latitude of bottom left gridpoint in degrees
pollon : float
Longitude of the rotated pole
pollat : float
Latitude of the rotated pole
"""
self.nx = nx
self.ny = ny
self.dx = dx
self.dy = dy
self.xmin = xmin
self.ymin = ymin
self.pollon = pollon
self.pollat = pollat
# cell corners
x = self.xmin + np.arange(self.nx) * self.dx
y = self.ymin + np.arange(self.ny) * self.dy
dx2 = self.dx / 2
dy2 = self.dy / 2
self.cell_x = np.array([x + dx2, x + dx2, x - dx2, x - dx2])
self.cell_y = np.array([y + dy2, y - dy2, y - dy2, y + dy2])
super().__init__(
"COSMO",
ccrs.RotatedPole(pole_longitude=pollon, pole_latitude=pollat),
)
def gridcell_areas(self):
"""Calculate 2D array of the areas (m^2) of a regular rectangular grid
on earth.
Returns
-------
np.array
2D array containing the areas of the gridcells in m^2
shape: (nx, ny)
"""
radius = 6375000.0 # the earth radius in meters
dlon = np.deg2rad(self.dx)
dlat = np.deg2rad(self.dy)
# Cell area at equator
dd = 2.0 * pow(radius, 2) * dlon * np.sin(0.5 * dlat)
# Cell areas in y-direction
areas = dd * np.cos(np.deg2rad(self.ymin) + np.arange(self.ny) * dlat)
return np.broadcast_to(areas, (self.nx, self.ny))
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
# Because of floating point math the original arange is not guaranteed
# to contain the expected number of points.
# This way we are sure that we generate at least the required number of
# points and discard the possibly generated superfluous one.
# Compared to linspace this method generates more exact steps at
# the cost of a less accurate endpoint.
try:
lon_vals = self.lon_vals
except AttributeError:
self.lon_vals = np.arange(
self.xmin, self.xmin + (self.nx + 0.5) * self.dx, self.dx
)[: self.nx]
lon_vals = self.lon_vals
return lon_vals
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
# See the comment in lon_range
try:
lat_vals = self.lat_vals
except AttributeError:
self.lat_vals = np.arange(
self.ymin, self.ymin + (self.ny + 0.5) * self.dy, self.dy
)[: self.ny]
lat_vals = self.lat_vals
return lat_vals
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
return self.cell_x[:,i], self.cell_y[:,j]
def indices_of_point(self, lon, lat, proj=ccrs.PlateCarree()):
"""Return the indices of the grid cell that contains the point (lon, lat)
Parameters
----------
lat : float
The latitude of the point source
lon : float
The longitude of the point source
proj : cartopy.crs.Projection
The cartopy projection of the lat/lon of the point source
Default: cartopy.crs.PlateCarree
Returns
-------
tuple(int, int)
(cosmo_indx,cosmo_indy),
the indices of the cosmo grid cell containing the source.
Raises
------
IndexError
If the point lies outside the grid.
"""
point = self.projection.transform_point(lon, lat, proj)
indx = np.floor((point[0] - self.xmin) / self.dx)
indy = np.floor((point[1] - self.ymin) / self.dy)
if indx < 0 or indy < 0 or indx > self.nx - 1 or indy > self.ny - 1:
raise IndexError("Point lies outside the COSMO Grid")
return int(indx), int(indy)
def intersected_cells(self, corners):
"""Given a inventory cell, return a list of cosmo-cell-indices and
intersection fractions.
The inventory cell is specified by it's corners. The output is a list
of tuples, specifying the indices and overlap as a fraction of the
inventory cell area.
Parameters
----------
corners : np.array(shape=(4,2), dtype=float)
The corners of the inventory cell in the COSMO coordinate system
Returns
-------
list(tuple(int, int, float))
A list containing triplets (x,y,r)
- x : longitudinal index of cosmo grid cell
- y : latitudinal index of cosmo grid cell
- r : ratio of the area of the intersection compared to the total
area of the inventory cell.
r is in (0,1] (only nonzero intersections are reported)
"""
# Find around which cosmo grid index the inventory cell lies.
# Since the inventory cell is in general not rectangular because
# of different projections, we add a margin of to the extremal indices.
# This way we're sure we don't miss any intersection.
cell_xmin = min(k[0] for k in corners)
lon_idx_min = int((cell_xmin - self.xmin) / self.dx) - 2
if lon_idx_min > self.nx:
# The inventory cell lies outside the cosmo grid
return []
cell_xmax = max(k[0] for k in corners)
lon_idx_max = int((cell_xmax - self.xmin) / self.dx) + 3
if lon_idx_max < 0:
# The inventory cell lies outside the cosmo grid
return []
cell_ymin = min(k[1] for k in corners)
lat_idx_min = int((cell_ymin - self.ymin) / self.dy) - 2
if lat_idx_min > self.ny:
# The inventory cell lies outside the cosmo grid
return []
cell_ymax = max(k[1] for k in corners)
lat_idx_max = int((cell_ymax - self.ymin) / self.dy) + 3
if lat_idx_max < 0:
# The inventory cell lies outside the cosmo grid
return []
# Here we assume a flat earth. The error is less than 1% for typical
# grid sizes over europe. Since we're interested in the ratio of areas,
# we can calculate in degrees^2.
inv_cell = Polygon(corners)
intersections = []
# make sure we iterate only over valid gridpoint indices
for i in range(max(0, lon_idx_min), min(self.nx, lon_idx_max)):
for j in range(max(0, lat_idx_min), min(self.ny, lat_idx_max)):
corners = list(zip(*self.cell_corners(i, j)))
cosmo_cell = Polygon(corners)
if cosmo_cell.intersects(inv_cell):
overlap = cosmo_cell.intersection(inv_cell)
intersections.append((i, j, overlap.area / inv_cell.area))
return intersections
| 29.785997
| 81
| 0.555127
|
4a0597c4e19aa37f8aca366917f8852f13d2f206
| 1,330
|
py
|
Python
|
main.py
|
MayThuHtun/Z2U
|
4fd4756c8e43a0547e4940793c357a0cb6c4c5aa
|
[
"MIT"
] | 1
|
2021-04-14T06:16:36.000Z
|
2021-04-14T06:16:36.000Z
|
main.py
|
MayThuHtun/ufc
|
4fd4756c8e43a0547e4940793c357a0cb6c4c5aa
|
[
"MIT"
] | null | null | null |
main.py
|
MayThuHtun/ufc
|
4fd4756c8e43a0547e4940793c357a0cb6c4c5aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Usage: main.py inputfilename.ext outputfilename.ext
# Example: main.py zawgyi.txt unicode.txt
import codecs
import zg2uni
import uni2zg
import win2uni
import uni2win
import sys
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
input_file = codecs.open(input_file_name, encoding='utf-8')
output_file = codecs.open(output_file_name, encoding='utf-8', mode='w')
for input_line in input_file:
input_line = zg2uni.convert(input_line)
output_file.write(input_line)
output_file.flush()
for input_line in input_file:
input_line = uni2zg.convert(input_line)
output_file.write(input_line)
output_file.flush()
for input_line in input_file:
input_line = win2uni.convert(input_line)
output_file.write(input_line)
output_file.flush()
for input_line in input_file:
input_line = uni2win.convert(input_line)
output_file.write(input_line)
output_file.flush()
for input_line in input_file:
input_line = zg2uni.convert(input_line)
input_line = uni2win.convert(input_line)
output_file.write(input_line)
output_file.flush()
for input_line in input_file:
input_line = win2uni.convert(input_line)
input_line = uni2zg.convert(input_line)
output_file.write(input_line)
output_file.flush()
input_file.close()
output_file.close()
| 25.09434
| 71
| 0.756391
|
4a0597e2650dca9561fdfaafdf05f5f442c71141
| 446
|
py
|
Python
|
examples/timescale_listener_ex.py
|
masonCaminer/locust-plugins
|
d5f461d098f67a3693a6db6a786c65e112216920
|
[
"Apache-2.0"
] | 2
|
2020-10-06T20:52:56.000Z
|
2020-10-07T11:34:46.000Z
|
examples/timescale_listener_ex.py
|
masonCaminer/locust-plugins
|
d5f461d098f67a3693a6db6a786c65e112216920
|
[
"Apache-2.0"
] | null | null | null |
examples/timescale_listener_ex.py
|
masonCaminer/locust-plugins
|
d5f461d098f67a3693a6db6a786c65e112216920
|
[
"Apache-2.0"
] | null | null | null |
from locust_plugins.listeners import TimescaleListener
from locust import HttpUser, task, events
class MyHttpUser(HttpUser):
@task
def index(self):
self.client.post("/authentication/1.0/getResults", {"username": "something"})
host = "http://example.com"
@events.init.add_listener
def on_locust_init(environment, **_kwargs):
TimescaleListener(env=environment, testplan="timescale_listener_ex", target_env="myTestEnv")
| 27.875
| 96
| 0.748879
|
4a059a3313e0d0663b4b57648e22c7ad25c3ec93
| 2,144
|
py
|
Python
|
configs/_base_/datasets/imagenet_bs64_swin_224.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 1,190
|
2020-07-10T01:16:01.000Z
|
2022-03-31T09:48:38.000Z
|
configs/_base_/datasets/imagenet_bs64_swin_224.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 702
|
2020-07-13T13:31:33.000Z
|
2022-03-31T06:48:04.000Z
|
configs/_base_/datasets/imagenet_bs64_swin_224.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 502
|
2020-07-10T02:40:55.000Z
|
2022-03-31T02:07:09.000Z
|
_base_ = ['./pipelines/rand_aug.py']
# dataset settings
dataset_type = 'ImageNet'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
size=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(
type='RandAugment',
policies={{_base_.rand_increasing_policies}},
num_policies=2,
total_level=10,
magnitude_level=9,
magnitude_std=0.5,
hparams=dict(
pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
interpolation='bicubic')),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=1 / 3,
fill_color=img_norm_cfg['mean'][::-1],
fill_std=img_norm_cfg['std'][::-1]),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='Resize',
size=(256, -1),
backend='pillow',
interpolation='bicubic'),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
data = dict(
samples_per_gpu=64,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_prefix='data/imagenet/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_prefix='data/imagenet/val',
ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline),
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
data_prefix='data/imagenet/val',
ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline))
evaluation = dict(interval=10, metric='accuracy')
| 29.777778
| 77
| 0.604011
|
4a059b35f7105672bc3580f008cafc5ece8b8658
| 3,789
|
py
|
Python
|
socceraction/data/opta/parsers/f24_json.py
|
pientist/socceraction
|
7f8e666ee5da7c1890c72a2c72042d4c73b90fda
|
[
"MIT"
] | 371
|
2019-07-25T07:35:00.000Z
|
2022-03-25T11:13:56.000Z
|
socceraction/data/opta/parsers/f24_json.py
|
pientist/socceraction
|
7f8e666ee5da7c1890c72a2c72042d4c73b90fda
|
[
"MIT"
] | 145
|
2019-08-29T12:49:55.000Z
|
2022-03-31T09:35:05.000Z
|
socceraction/data/opta/parsers/f24_json.py
|
pientist/socceraction
|
7f8e666ee5da7c1890c72a2c72042d4c73b90fda
|
[
"MIT"
] | 101
|
2019-08-20T21:07:34.000Z
|
2022-03-26T10:00:00.000Z
|
"""JSON parser for Opta F24 feeds."""
from datetime import datetime
from typing import Any, Dict
from ...base import MissingDataError
from .base import OptaJSONParser, _get_end_x, _get_end_y, assertget
class F24JSONParser(OptaJSONParser):
"""Extract data from a Opta F24 data stream.
Parameters
----------
path : str
Path of the data file.
"""
def _get_doc(self) -> Dict[str, Any]:
for node in self.root:
if 'Games' in node['data'].keys():
return node
raise MissingDataError
def extract_games(self) -> Dict[int, Dict[str, Any]]:
"""Return a dictionary with all available games.
Returns
-------
dict
A mapping between game IDs and the information available about
each game in the data stream.
"""
f24 = self._get_doc()
data = assertget(f24, 'data')
games = assertget(data, 'Games')
game = assertget(games, 'Game')
attr = assertget(game, '@attributes')
game_id = int(assertget(attr, 'id'))
game_dict = {
game_id: dict(
competition_id=int(assertget(attr, 'competition_id')),
game_id=game_id,
season_id=int(assertget(attr, 'season_id')),
game_day=int(assertget(attr, 'matchday')),
home_team_id=int(assertget(attr, 'home_team_id')),
away_team_id=int(assertget(attr, 'away_team_id')),
)
}
return game_dict
def extract_events(self) -> Dict[int, Dict[str, Any]]:
"""Return a dictionary with all available events.
Returns
-------
dict
A mapping between event IDs and the information available about
each event in the data stream.
"""
f24 = self._get_doc()
data = assertget(f24, 'data')
games = assertget(data, 'Games')
game = assertget(games, 'Game')
game_attr = assertget(game, '@attributes')
game_id = int(assertget(game_attr, 'id'))
events = {}
for element in assertget(game, 'Event'):
attr = element['@attributes']
timestamp = attr['TimeStamp'].get('locale') if attr.get('TimeStamp') else None
timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
qualifiers = {
int(q['@attributes']['qualifier_id']): q['@attributes']['value']
for q in element.get('Q', [])
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
event_id = int(assertget(attr, 'event_id'))
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(attr, 'type_id')),
period_id=int(assertget(attr, 'period_id')),
minute=int(assertget(attr, 'min')),
second=int(assertget(attr, 'sec')),
timestamp=timestamp,
player_id=int(assertget(attr, 'player_id')),
team_id=int(assertget(attr, 'team_id')),
outcome=bool(int(attr.get('outcome', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
| 34.445455
| 90
| 0.534178
|
4a059f1e4c8040490ee319fb4a7fcafd27e93539
| 827
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/__init__.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/__init__.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/__init__.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 39.380952
| 80
| 0.702539
|
4a059fcb84d3f5c38d8c17ca1dc293ed2241852c
| 35,496
|
py
|
Python
|
src/fparser/common/tests/test_readfortran.py
|
p-vitt/fparser
|
94644d5df6af242b4a93593f0157479ba2989dd8
|
[
"BSD-3-Clause"
] | null | null | null |
src/fparser/common/tests/test_readfortran.py
|
p-vitt/fparser
|
94644d5df6af242b4a93593f0157479ba2989dd8
|
[
"BSD-3-Clause"
] | null | null | null |
src/fparser/common/tests/test_readfortran.py
|
p-vitt/fparser
|
94644d5df6af242b4a93593f0157479ba2989dd8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) 2017-2020 Science and Technology Facilities Council
#
# All rights reserved.
#
# Modifications made as part of the fparser project are distributed
# under the following license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##############################################################################
# Modified M. Hambley and P. Elson, Met Office
# Modified R. W. Ford and A. R. Porter, STFC Daresbury Lab
##############################################################################
'''
Test battery associated with fparser.common.readfortran package.
'''
from __future__ import print_function
import io
import os.path
import tempfile
import re
import six
import pytest
from fparser.common.readfortran import FortranFileReader, \
FortranStringReader, FortranReaderBase, Line, extract_label, \
extract_construct_name
from fparser.common.sourceinfo import FortranFormat
def test_empty_line_err():
''' Check that we raise the expected error if we try and create
an empty Line '''
from fparser.common.readfortran import FortranReaderError
with pytest.raises(FortranReaderError) as err:
_ = Line(" ", 1, "", "a_name", None)
assert "Got empty line: \' \'" in str(err.value)
def test_111fortranreaderbase(log, monkeypatch):
'''
Tests the FortranReaderBase class.
Currently only tests logging functionality.
'''
class FailFile(object):
'''
A "file-like" object which returns a line of Fortran source followed
by raising a StopIteration exception.
'''
_stuff = ['x=1']
def next(self):
'''
Used by Python 2.7.
'''
return self.__next__()
def __next__(self):
'''
Used by Python 3.
'''
return self._stuff.pop()
monkeypatch.setattr('fparser.common.readfortran.FortranReaderBase.id',
lambda x: 'foo', raising=False)
mode = FortranFormat(True, False)
unit_under_test = FortranReaderBase(FailFile(), mode, True)
assert str(unit_under_test.next()) == "line #1'x=1'"
with pytest.raises(StopIteration):
unit_under_test.next()
assert log.messages['info'] == []
assert log.messages['warning'] == []
assert log.messages['error'] == []
result = log.messages['critical'][0].split('\n')[1]
assert result == ' 1:x=1 <== while processing line'
assert log.messages['critical'][1] == 'STOPPED READING'
expected = 'Traceback\n'
assert log.messages['debug'][0][:len(expected)] == expected
def test_include_not_found():
'''Tests that FortranReaderBase.next() provides the include line when
the included file is not found.
'''
code = "include 'nonexistant.f90'"
unit_under_test = FortranStringReader(code)
line = unit_under_test.next()
assert str(line.line) == code
def test_base_next_good_include(log):
'''
Tests that FortranReaderBase.next() causes a message to be logged when a
file is included.
'''
code = "include 'modfile.f95'\nx=2"
include_directories = [os.path.dirname(__file__)]
unit_under_test = FortranStringReader(
code, include_dirs=include_directories, ignore_comments=False)
line = unit_under_test.next()
assert str(line)[:19] == "Comment('! Modified" # First line of inclusion
assert log.messages['debug'] == []
assert log.messages['error'] == []
assert log.messages['warning'] == []
assert log.messages['critical'] == []
expected = " 1:include 'modfile.f95' " \
+ "<== including file '{path}/modfile.f95'"
result = log.messages['info'][0].split('\n')[1]
assert re.sub("u", "", result) == \
re.sub("u", "", expected.format(path=include_directories[0]))
def test_fortranreaderbase_info(log):
'''
Tests that FortranReaderBase.info() causes a message to be logged.
'''
unit_under_test = FortranStringReader('x=3')
thing = unit_under_test.get_source_item()
unit_under_test.info('Mighty Whirlitzer', thing)
assert log.messages['debug'] == []
assert log.messages['error'] == []
assert log.messages['warning'] == []
assert log.messages['critical'] == []
expected = ' 1:x=3 <== Mighty Whirlitzer'
result = log.messages['info'][0].split('\n')[1]
assert result == expected
def test_fortranreaderbase_error(log):
'''
Tests that FortranReaderBase.error() causes a message to be logged.
'''
unit_under_test = FortranStringReader('x=2')
thing = unit_under_test.get_source_item()
with pytest.raises(SystemExit):
unit_under_test.error('Thundering Chalmer', thing)
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['warning'] == []
assert log.messages['critical'] == []
expected = ' 1:x=2 <== Thundering Chalmer'
result = log.messages['error'][0].split('\n')[1]
assert result == expected
def test_fortranreaderbase_warning(log):
'''
Tests that FortranReaderBase.warning() causes a message to be logged.
'''
unit_under_test = FortranStringReader('x=1')
thing = unit_under_test.get_source_item()
unit_under_test.warning('Flatulent Hermit', thing)
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
expected = ' 1:x=1 <== Flatulent Hermit'
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_base_handle_multilines(log):
'''
Tests that FortranReaderBase.get_source_item() logs the correct messages
when there are quote discrepancies.
'''
code = 'character(8) :: test = \'foo"""bar'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(True, True)
unit_under_test.set_format(mode) # Force strict free format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
expected = 'multiline prefix contains odd number of "\'" characters'
result = log.messages['warning'][0].split('<==')[1].lstrip()
assert result == expected
code = 'goo """boo\n doo""" soo \'foo'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(True, True)
unit_under_test.set_format(mode) # Force strict free format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
expected = 'following character continuation: "\'", expected None.'
result = log.messages['warning'][0].split('<==')[1].lstrip()
assert result == expected
def test_base_fixed_nonlabel(log):
'''
Tests that FortranReaderBase.get_source_item() logs the correct messages
when there is an unexpected character in the initial 6 columns.
'''
# Checks that a bad character in the first column causes an event to be
# logged.
code = 'w integer :: i'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(False, True)
unit_under_test.set_format(mode) # Force fixed format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
result = log.messages['warning'][0].split('<==')[1].lstrip()
expected = "non-space/digit char 'w' found in column 1 of fixed " \
+ "Fortran code, interpreting line as comment line"
assert result == expected
# Checks a bad character in columns 2-6
for i in range(1, 5):
code = ' '*i + 'w' + ' '*(5-i) + 'integer :: i'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(False, True)
unit_under_test.set_format(mode) # Force strict fixed format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
result = log.messages['warning'][0].split('<==')[1].lstrip()
expected = "non-space/digit char 'w' found in column {col} " \
+ "of fixed Fortran code"
assert result == expected.format(col=i+1)
# Checks for a bad character, not in the first column, with "sloppy" mode
# engaged.
code = ' w integer :: i'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(False, False)
unit_under_test.set_format(mode) # Force sloppy fixed format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
expected = "non-space/digit char 'w' found in column 2 " \
+ "of fixed Fortran code, switching to free format mode"
result = log.messages['warning'][0].split('<==')[1].lstrip()
assert result == expected
def test_base_fixed_continuation(log):
'''
Tests that FortranReaderBase.get_source_item() logs the correct messages
when there are quote mismatches across a continuation in fixed format.
'''
code = ' character(4) :: cheese = "a & !\n & b'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(False, False)
unit_under_test.set_format(mode) # Force sloppy fixed format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
expected = 'following character continuation: \'"\', expected None.'
result = log.messages['warning'][0].split('<==')[1].lstrip()
assert result == expected
code = ' x=1 &\n +1 &\n -2'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(False, False)
unit_under_test.set_format(mode) # Force sloppy fixed format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['error'] == []
assert log.messages['critical'] == []
expected = 'free format line continuation character `&\' detected ' \
+ 'in fix format code\n 2: +1 &\n 3: -2'
result = log.messages['warning'][0].split('<==')[1].lstrip()
assert result == expected
def test_base_free_continuation(log):
'''
Tests that FortranReaderBase.get_source_item() logs the correct messages
when there are quote mismatches across a continuation in free format.
'''
code = 'character(4) :: "boo & que'
log.reset()
unit_under_test = FortranStringReader(code)
mode = FortranFormat(True, False)
unit_under_test.set_format(mode) # Force sloppy free format
unit_under_test.get_source_item()
assert log.messages['debug'] == []
assert log.messages['info'] == []
assert log.messages['warning'] == []
assert log.messages['critical'] == []
expected = 'following character continuation: \'"\', expected None.'
result = log.messages['error'][0].split('<==')[1].lstrip()
assert result == expected
def check_include_works(fortran_filename, fortran_code, include_info,
expected, tmpdir, ignore_comments=True):
'''Utility function used by a number of tests to check that include
files work as expected.
:param str fortran_filename: the name of the fortran file that is \
going to be created in the 'tmpdir' directory.
:param str fortran_code: the fortran code to put in the fortran \
file specified by 'fortran_filename'.
:param include_info: a list of 2-tuples each with an include \
filename as a string followed by include code as a string.
:type include_info: list of (str, str)
:param str expected: the expected output after parsing the code.
:param str tmpdir: the temporary directory in which to create and \
process the Fortran files.
:param bool ignore_comments: whether to ignore (skip) comments in \
the Fortran code or not. Defaults to ignore them.
'''
try:
oldpwd = tmpdir.chdir()
cwd = str(tmpdir)
# Create the program
with open(os.path.join(cwd, fortran_filename), "w") as cfile:
cfile.write(fortran_code)
for include_filename in include_info.keys():
with open(os.path.join(cwd, include_filename), "w") as cfile:
cfile.write(include_info[include_filename])
reader = FortranFileReader(fortran_filename,
ignore_comments=ignore_comments)
for orig_line in expected.split("\n"):
new_line = reader.next().line
assert new_line == orig_line
with pytest.raises(StopIteration):
reader.next()
finally:
oldpwd.chdir()
FORTRAN_CODE = ("program test\n"
" ! prog comment 1\n"
" print *, 'Hello'\n"
" ! prog comment 2\n"
"end program")
EXPECTED_CODE = ("program test\n"
"print *, 'Hello'\n"
"end program")
def test_include1(tmpdir):
'''Test that FortranReaderBase can parse an include file when the
original program consists only of an include.
'''
fortran_filename = "prog.f90"
include_filename = "prog.inc"
fortran_code = ("include '{0}'".format(include_filename))
include_code = EXPECTED_CODE
include_info = {include_filename: include_code}
check_include_works(fortran_filename, fortran_code, include_info,
EXPECTED_CODE, tmpdir)
def test_include2(tmpdir):
'''Test that FortranReaderBase can parse an include file when the
original and include files both have multiple lines.
'''
fortran_filename = "prog.f90"
include_filename = "my-include.h"
fortran_code = ("module include_test\n"
" include '{0}'\n"
"end module include_test".format(include_filename))
include_code = ("interface mpi_sizeof\n"
"subroutine simple()\n"
"end subroutine simple\n"
"end interface mpi_sizeof")
split_code = fortran_code.split("\n")
expected = split_code[0] + "\n" + include_code + "\n" + split_code[2]
include_info = {include_filename: include_code}
check_include_works(fortran_filename, fortran_code, include_info,
expected, tmpdir)
def test_include3(tmpdir):
'''Test that FortranReaderBase can parse an include file when the
original program is invalid without the include.
'''
fortran_filename = "prog.f90"
include_filename = "prog.inc"
fortran_code = ("program test\n"
"include '{0}'".format(include_filename))
include_code = ("print *, 'Hello'\n"
"end program")
include_info = {include_filename: include_code}
check_include_works(fortran_filename, fortran_code, include_info,
EXPECTED_CODE, tmpdir)
def test_include4(tmpdir):
'''Test that FortranReaderBase can parse input containing multiple
include files.
'''
fortran_filename = "prog.f90"
include_filename1 = "prog.inc1"
include_filename2 = "prog.inc2"
fortran_code = ("program test\n"
"include '{0}'\n"
"include '{1}'".format(include_filename1,
include_filename2))
include_code1 = ("print *, 'Hello'\n")
include_code2 = ("end program")
expected = fortran_code.split("\n")[0] + "\n" + include_code1 + \
include_code2
include_info = {include_filename1: include_code1,
include_filename2: include_code2}
check_include_works(fortran_filename, fortran_code, include_info,
expected, tmpdir)
def test_include5(tmpdir):
'''Test that FortranReaderBase can parse nested include files.'''
fortran_filename = "prog.f90"
include_filename1 = "prog.inc1"
include_filename2 = "prog.inc2"
fortran_code = ("program test\n"
"include '{0}'".format(include_filename1))
include_code1 = ("print *, 'Hello'\n"
"include '{0}'".format(include_filename2))
include_code2 = ("end program")
include_info = {include_filename1: include_code1,
include_filename2: include_code2}
check_include_works(fortran_filename, fortran_code, include_info,
EXPECTED_CODE, tmpdir)
def test_include6(tmpdir, ignore_comments):
'''Check that FortranReaderBase can parse an include file correctly
when it contains comments. Test with and without comments being
ignored.
'''
fortran_filename = "prog.f90"
include_filename = "prog.inc"
fortran_code = ("program test\n"
" ! prog comment 1\n"
" include '{0}'\n"
" ! prog comment 2\n"
"end program".format(include_filename))
include_code = ("! include comment 1\n"
"print *, 'Hello'\n"
"! include comment 2")
include_info = {include_filename: include_code}
if ignore_comments:
expected = EXPECTED_CODE
else:
expected = ("program test\n"
"! prog comment 1\n"
"! include comment 1\n"
"print *, 'Hello'\n"
"! include comment 2\n"
"! prog comment 2\n"
"end program")
check_include_works(fortran_filename, fortran_code, include_info,
expected, tmpdir, ignore_comments=ignore_comments)
def test_multi_stmt_line1():
'''Check that simple statements separated by ; on a single line are
correctly split into multiple lines by FortranReaderBase
'''
code = "do i=1,10;b=20 ; c=30"
reader = FortranStringReader(code)
mode = FortranFormat(True, False)
reader.set_format(mode)
line1 = reader.next()
assert isinstance(line1, Line)
assert line1.line == "do i=1,10"
assert line1.span == (1, 1)
assert line1.label is None
assert line1.name is None
assert line1.reader is reader
line2 = reader.next()
assert isinstance(line2, Line)
assert line2.line == "b=20"
assert line2.span is line1.span
assert line2.label is None
assert line2.name is None
assert line2.reader is reader
line3 = reader.next()
assert isinstance(line3, Line)
assert line3.line == "c=30"
assert line3.span is line1.span
assert line3.label is None
assert line3.name is None
assert line3.reader is reader
def test_multi_stmt_line2():
'''Check that format statements separated by ; on a single line are
correctly split into multiple lines by FortranReaderBase
'''
code = "10 format(a); 20 format(b)"
reader = FortranStringReader(code)
mode = FortranFormat(True, False)
reader.set_format(mode)
line1 = reader.next()
assert isinstance(line1, Line)
assert line1.line == "format(a)"
assert line1.span == (1, 1)
assert line1.label == 10
assert line1.name is None
assert line1.reader is reader
line2 = reader.next()
assert line2.line == "format(b)"
assert line2.span is line1.span
assert line2.label == 20
assert line2.name is None
assert line2.reader is reader
def test_multi_stmt_line3():
'''Check that named do loops separated by ; on a single line are correctly
split into multiple lines by FortranReaderBase
'''
code = "name:do i=1,10;name2 : do j=1,10"
reader = FortranStringReader(code)
mode = FortranFormat(True, False)
reader.set_format(mode)
line1 = reader.next()
assert isinstance(line1, Line)
assert line1.line == "do i=1,10"
assert line1.span == (1, 1)
assert line1.label is None
assert line1.name == "name"
assert line1.reader is reader
line2 = reader.next()
assert line2.line == "do j=1,10"
assert line2.span is line1.span
assert line2.label is None
assert line2.name == "name2"
assert line2.reader is reader
def test_get_item(ignore_comments):
'''Check the get_item() function works as expected. Test with and
without comments being ignored.
'''
if ignore_comments:
expected = EXPECTED_CODE
else:
expected = ("program test\n"
"! prog comment 1\n"
"print *, 'Hello'\n"
"! prog comment 2\n"
"end program")
reader = FortranStringReader(FORTRAN_CODE, ignore_comments=ignore_comments)
for expected_line in expected.split("\n"):
output_line = reader.get_item()
assert expected_line in output_line.line
assert not reader.get_item()
def test_put_item(ignore_comments):
'''Check that when a line is consumed it can be pushed back so it can
be consumed again. Test with and without comments being
ignored.
'''
reader = FortranStringReader(FORTRAN_CODE, ignore_comments=ignore_comments)
while True:
orig_line = reader.get_item()
if not orig_line:
break
reader.put_item(orig_line)
fifo_line = reader.get_item()
assert fifo_line == orig_line
def test_put_item_include(ignore_comments):
'''Check that when a line that has been included via an include
statement is consumed it can be pushed back so it can be consumed
again. Test with and without ignoring comments.
'''
reader = FortranStringReader(FORTRAN_CODE, ignore_comments=ignore_comments)
while True:
orig_line = reader.get_item()
if not orig_line:
break
reader.put_item(orig_line)
fifo_line = reader.get_item()
assert fifo_line == orig_line
def test_multi_put_item(ignore_comments):
'''Check that multiple lines can be pushed back and will be returned
correctly in the specified order (actually the reverse of the
original). Test with and without ignoring comments.
'''
reader = FortranStringReader(FORTRAN_CODE, ignore_comments=ignore_comments)
orig_lines = []
while True:
orig_line = reader.get_item()
if not orig_line:
break
# Make sure our original lines are kept in reverse order.
orig_lines.insert(0, orig_line)
# Put back original lines in reverse order as that is what we
# would expect when processing and rolling back.
for line in orig_lines:
reader.put_item(line)
# Lines should now be returned in the correct order (so compare in
# reverse order with the original line list)
while True:
filo_line = reader.get_item()
if not filo_line:
break
assert filo_line == orig_lines.pop(-1)
assert not orig_lines
# Issue 177: get_item(ignore_comments) - how does ignore_comments affect
# processing?
# Issue 178: Why is there a next() as well as a get_item()? How do they
# (and put_item()) interact?
##############################################################################
FULL_FREE_SOURCE = u'''
!> Unicode comment: ❤ ✓ ☂ ♞ ☯
program test
implicit none
character, parameter :: nature = 'free format'
end program test
'''
FULL_FREE_EXPECTED = [u'!> Unicode comment: ❤ ✓ ☂ ♞ ☯',
'program test',
' implicit none',
" character, parameter :: nature = 'free format'",
'end program test']
##############################################################################
def test_filename_reader():
'''
Tests that a Fortran source file can be read given its filename.
'''
handle, filename = tempfile.mkstemp(suffix='.f90', text=True)
os.close(handle)
try:
with io.open(filename, mode='w', encoding='UTF-8') as source_file:
source_file.write(FULL_FREE_SOURCE)
unit_under_test = FortranFileReader(filename)
expected = FortranFormat(True, False)
assert unit_under_test.format == expected
for expected in FULL_FREE_EXPECTED:
found = unit_under_test.get_single_line(ignore_empty=True)
assert found == expected
except Exception:
os.unlink(filename)
raise
##############################################################################
def test_file_reader():
'''
Tests that a Fortran source file can be read given a file object of it.
'''
handle, filename = tempfile.mkstemp(suffix='.f90', text=True)
os.close(handle)
try:
with io.open(filename, mode='w', encoding='UTF-8') as source_file:
source_file.write(FULL_FREE_SOURCE)
with io.open(filename, mode='r', encoding='UTF-8') as source_file:
unit_under_test = FortranFileReader(source_file)
expected = FortranFormat(True, False)
assert unit_under_test.format == expected
for expected in FULL_FREE_EXPECTED:
assert unit_under_test.get_single_line(ignore_empty=True) \
== expected
except Exception:
os.unlink(filename)
raise
##############################################################################
def test_bad_file_reader():
'''
Tests that the file reader can spot when it is given something to read
which is neither file nor filename.
'''
with pytest.raises(ValueError) as ex:
_ = FortranFileReader(42)
expected = 'FortranFileReader is used with a filename or file-like object.'
assert expected in str(ex.value)
##############################################################################
def test_string_reader():
'''
Tests that Fortran source can be read from a string.
'''
unit_under_test = FortranStringReader(FULL_FREE_SOURCE)
expected = FortranFormat(True, False)
assert unit_under_test.format == expected
for expected in FULL_FREE_EXPECTED:
assert unit_under_test.get_single_line(ignore_empty=True) == expected
##############################################################################
def test_inherited_f77():
'''
A grab bag of functional tests inherited from readfortran.py.
'''
string_f77 = """c -*- f77 -*-
c12346 comment
subroutine foo
call foo
'bar
a 'g
abc=2
cf2py call me ! hey
call you ! hi
end
'"""
expected = ["Comment('c -*- f77 -*-',(1, 1))",
"Comment('c12346 comment',(2, 2))",
"line #3'subroutine foo'",
"line #4'call foobar'",
'Comment("a \'g",(6, 6))',
"line #7'abc=2'",
"line #9'call you ! hi'",
"line #10'end'"]
# Reading from buffer
reader = FortranStringReader(
string_f77, ignore_comments=False)
assert reader.format.mode == 'f77', repr(reader.format.mode)
stack = expected[:]
for item in reader:
assert str(item) == stack.pop(0)
# Reading from file
handle, filename = tempfile.mkstemp(suffix='.f', text=True)
os.close(handle)
with open(filename, 'w') as fortran_file:
print(string_f77, file=fortran_file)
reader = FortranFileReader(
filename, ignore_comments=False)
stack = expected[:]
for item in reader:
assert str(item) == stack.pop(0)
def test_pyf():
'''
Tests inherited from implementation.
'''
string_pyf = """! -*- pyf -*-
python module foo
interface
beginml '''1st line
2nd line
end line'''endml='tere!fake comment'!should be a comment
a = 2
'charc\"onstant' ''' single line mline '''a='hi!fake comment'!should be a comment
a=\\\\\\\\\\'''not a multiline'''
!blah='''never ending multiline
b=3! hey, fake line continuation:&
c=4& !line cont
&45
thisis_label_2 : c = 3
xxif_isotropic_2 : if ( string_upper_compare ( o%opt_aniso, 'ISOTROPIC' ) ) then
g=3
endif
end interface
if ( pc_get_lun() .ne. 6) &
write ( pc_get_lun(), '( &
& /, a, /, " p=", i4, " stopping c_flag=", a, &
& /, " print unit=", i8)') &
trim(title), pcpsx_i_pel(), trim(c_flag), pc_get_lun()
end python module foo
! end of file
"""
expected = ["Comment('! -*- pyf -*-',(1, 1))",
"line #2'python module foo'",
"line #3'interface'",
"MultiLine(' beginml ',"
+ "['1st line', ' 2nd line', ' end line'],"
+ "\"endml='tere!fake comment'\",(4, 6))",
"Comment('!should be a comment',(6, 6))",
"line #7'a = 2'",
"MultiLine(' \\'charc\"onstant\\' ',"
+ "[' single line mline '],"
+ "\"a='hi!fake comment'\",(8, 8))",
"Comment('!should be a comment',(8, 8))",
'line #9"a=\\\\\\\\\\\\\\\\\\\\\'\'\'not a multiline\'\'\'"',
'Comment("!blah=\'\'\'never ending multiline",(10, 10))',
"line #11'b=3'",
"Comment('! hey, fake line continuation:&',(11, 11))",
"line #12'c=445'",
"Comment('!line cont',(12, 12))",
"line #14thisis_label_2: 'c = 3'",
'line #15xxif_isotropic_2: '
+ '"if ( string_upper_compare ( o%opt_aniso,'
+ ' \'ISOTROPIC\' ) ) then"',
"line #16'g=3'",
"line #17'endif'",
"line #18'end interface'",
'line #19\'if ( pc_get_lun() .ne. 6)'
+ ' write ( pc_get_lun(), \\\'( /, a, /, " p=", i4,'
+ ' " stopping c_flag=", a, /, " print unit=", i8)\\\')'
+ ' trim(title), pcpsx_i_pel(), trim(c_flag),'
+ ' pc_get_lun()\'',
"line #25'end python module foo'",
"Comment('! end of file',(26, 26))"]
reader = FortranStringReader(
string_pyf, ignore_comments=False)
assert reader.format.mode == 'pyf', repr(reader.format.mode)
for item in reader:
# Remove 'u's to allow for py2/3 unicode differences
assert re.sub("u", "", str(item)) == re.sub("u", "", expected.pop(0))
def test_fix90():
'''
Tests inherited from implementation.
'''
string_fix90 = """c -*- fix -*-
subroutine foo
cComment
1234 a = 3 !inline comment
b = 3
!
!4!line cont. with comment symbol
&5
a = 3!f2py.14 ! pi!
! KDMO
write (obj%print_lun, *) ' KDMO : '
write (obj%print_lun, *) ' COORD = ',coord, ' BIN_WID = ', &
obj%bin_wid,' VEL_DMO = ', obj%vel_dmo
end subroutine foo
subroutine
& foo
end
"""
expected = ["Comment('c -*- fix -*-',(1, 1))",
"line #2'subroutine foo'",
"Comment('cComment',(3, 3))",
"line #4 1234 'a = 3'",
"Comment('!inline comment',(4, 4))",
"line #5'b = 345'",
"Comment('!',(6, 6))",
"Comment('!line cont. with comment symbol',(7, 7))",
"line #9'a = 3.14'",
"Comment('! pi!',(9, 9))",
"Comment('! KDMO',(10, 10))",
'line #11"write (obj%print_lun, *) \' KDMO : \'"',
'line #12"write (obj%print_lun, *) \' COORD = \',coord,'
+ ' \' BIN_WID = \', &"',
'line #13"obj%bin_wid,\' VEL_DMO = \', obj%vel_dmo"',
"line #14'end subroutine foo'",
"line #15'subroutine foo'",
"Comment('',(16, 16))",
"line #18'end'"]
reader = FortranStringReader(
string_fix90, ignore_comments=False)
assert reader.format.mode == 'fix', repr(reader.format.mode)
for item in reader:
# Remove 'u's to allow for py2/3 unicode differences
assert re.sub("u", "", six.text_type(item)) == \
re.sub("u", "", expected.pop(0))
def test_utf_char_in_code(log):
''' Check that we cope with Fortran code that contains UTF characters. This
is not valid Fortran but most compilers cope with it. '''
log.reset()
fort_file = os.path.join(os.path.dirname(__file__), "utf_in_code.f90")
reader = FortranFileReader(fort_file,
ignore_comments=True)
out_line = reader.get_item()
while out_line:
out_line = reader.get_item()
assert log.messages['critical'] == []
def test_extract_label():
''' Test the extract label function in readfortran.py.'''
text_input = "no label"
label, text_result = extract_label(text_input)
assert label is None
assert text_result is text_input
text_input = " 80stuff"
label, text_result = extract_label(text_input)
assert label is None
assert text_result is text_input
text_input = " 80 stuff"
label, text_result = extract_label(text_input)
assert label == 80
assert text_result == "stuff"
def test_extract_construct_name():
'''Test the extract construct name function in readfortran.py.'''
text_input = "no construct name"
name, text_result = extract_construct_name(text_input)
assert name is None
assert text_result is text_input
text_input = "name:stuff"
name, text_result = extract_construct_name(text_input)
assert name == "name"
assert text_result == "stuff"
text_input = " name : stuff"
name, text_result = extract_construct_name(text_input)
assert name == "name"
assert text_result == "stuff"
| 35.818365
| 87
| 0.608463
|
4a05a014f1c3e159764a83a28485bae27e005661
| 754
|
py
|
Python
|
tests/test_predefined_model.py
|
IncyLiu/autokeras
|
e9dbf66b005e2ffaabe29bc366bb4e72fa79add8
|
[
"MIT"
] | 1
|
2019-01-25T02:20:37.000Z
|
2019-01-25T02:20:37.000Z
|
tests/test_predefined_model.py
|
IncyLiu/autokeras
|
e9dbf66b005e2ffaabe29bc366bb4e72fa79add8
|
[
"MIT"
] | 1
|
2019-03-06T02:20:08.000Z
|
2019-03-06T02:20:08.000Z
|
tests/test_predefined_model.py
|
IncyLiu/autokeras
|
e9dbf66b005e2ffaabe29bc366bb4e72fa79add8
|
[
"MIT"
] | 2
|
2019-05-13T20:20:17.000Z
|
2020-01-01T04:49:59.000Z
|
from unittest.mock import patch
from autokeras.predefined_model import *
from tests.common import clean_dir, mock_train, TEST_TEMP_DIR, MockProcess
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_predict_save(_, _1):
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
for Model in [PredefinedResnet, PredefinedDensenet]:
clf = Model(verbose=True)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert all(map(lambda result: result in train_y, results))
score = clf.evaluate(train_x, train_y)
assert score <= 1.0
clean_dir(TEST_TEMP_DIR)
| 35.904762
| 75
| 0.725464
|
4a05a0c2708b6ec3b4fcba2ab6ac9fd553cebcfd
| 554
|
py
|
Python
|
src/USER_DATA/utils/assignment_prototype/ex_1/test_files/test.py
|
graipher/TeaRoom
|
0beb8c2c889b8685d5b7a463206de41947c2d669
|
[
"MIT"
] | null | null | null |
src/USER_DATA/utils/assignment_prototype/ex_1/test_files/test.py
|
graipher/TeaRoom
|
0beb8c2c889b8685d5b7a463206de41947c2d669
|
[
"MIT"
] | 15
|
2015-05-20T12:55:13.000Z
|
2022-03-11T23:26:40.000Z
|
src/USER_DATA/utils/assignment_prototype/ex_1/test_files/test.py
|
graipher/TeaRoom
|
0beb8c2c889b8685d5b7a463206de41947c2d669
|
[
"MIT"
] | 2
|
2016-11-17T11:07:41.000Z
|
2017-07-07T11:18:36.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: salvo
# @Date: 2015-05-18 13:44:07
# @Last Modified by: salvo
# @Last Modified time: 2015-05-24 20:23:32
# add external folder to import path
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from user_files.calculate_g import g_values
try:
assert round(g_values[0], 2) == 6.67
assert round(g_values[1], 1) == 0.2
print 'Nice Job.'
except:
print 'Wrong values, try again.'
| 27.7
| 71
| 0.67509
|
4a05a2404710a62ff7d903449f7f9aba7ca55585
| 12,709
|
py
|
Python
|
src/deeprace/models/tf_details/cifar10_main.py
|
psteinb/deeprace
|
a3e6cbc83e466f86a76d91c2077349e4d69495c2
|
[
"BSD-3-Clause"
] | 11
|
2018-03-02T15:21:26.000Z
|
2020-02-13T20:38:15.000Z
|
src/deeprace/models/tf_details/cifar10_main.py
|
psteinb/deeprace
|
a3e6cbc83e466f86a76d91c2077349e4d69495c2
|
[
"BSD-3-Clause"
] | 13
|
2018-03-23T16:45:27.000Z
|
2019-10-11T12:54:29.000Z
|
src/deeprace/models/tf_details/cifar10_main.py
|
psteinb/deeprace
|
a3e6cbc83e466f86a76d91c2077349e4d69495c2
|
[
"BSD-3-Clause"
] | 1
|
2018-11-26T23:20:10.000Z
|
2018-11-26T23:20:10.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a ResNet model on the CIFAR-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import tensorflow as tf
from deeprace.models.tf_details import resnet_model
from deeprace.models.tf_details import resnet_run_loop
_HEIGHT = 32
_WIDTH = 32
_NUM_CHANNELS = 3
_DEFAULT_IMAGE_BYTES = _HEIGHT * _WIDTH * _NUM_CHANNELS
# The record is the image plus a one-byte label
_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1
_NUM_CLASSES = 10
_NUM_DATA_FILES = 5
_NUM_IMAGES = {
'train': 50000,
'validation': 10000,
}
###############################################################################
# Data processing
###############################################################################
def get_filenames(is_training, data_dir):
"""Returns a list of filenames."""
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
assert os.path.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.')
if is_training:
return [
os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, _NUM_DATA_FILES + 1)
]
else:
return [os.path.join(data_dir, 'test_batch.bin')]
def preprocess_image(image, is_training):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(
image, _HEIGHT + 8, _WIDTH + 8)
# Randomly crop a [_HEIGHT, _WIDTH] section of the image.
image = tf.random_crop(image, [_HEIGHT, _WIDTH, _NUM_CHANNELS])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
def parse_record(raw_record, is_training):
"""Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
label = tf.one_hot(label, _NUM_CLASSES)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],
[_NUM_CHANNELS, _HEIGHT, _WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
image = preprocess_image(image, is_training)
return {'img': image, 'lbl': label}
################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,
parse_record_fn, num_epochs=1, num_parallel_calls=1,
examples_per_epoch=0, multi_gpu=False):
"""Given a Dataset with raw records, parse each record into images and labels,
and return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
num_parallel_calls: The number of records that are processed in parallel.
This can be optimized per data set but for generally homogeneous data
sets, should be approximately the number of available CPU cores.
examples_per_epoch: The number of examples in the current set that
are processed each epoch. Note that this is only used for multi-GPU mode,
and only to handle what will eventually be handled inside of Estimator.
multi_gpu: Whether this is run multi-GPU. Note that this is only required
currently to handle the batch leftovers (see below), and can be removed
when that is handled directly by Estimator.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# We prefetch a batch at a time, This can help smooth out the time taken to
# load input files as we go through shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffle the records. Note that we shuffle before repeating to ensure
# that the shuffling respects epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# If we are training over multiple epochs before evaluating, repeat the
# dataset for the appropriate number of epochs.
dataset = dataset.repeat(num_epochs)
# Currently, if we are using multiple GPUs, we can't pass in uneven batches.
# (For example, if we have 4 GPUs, the number of examples in each batch
# must be divisible by 4.) We already ensured this for the batch_size, but
# we have to additionally ensure that any "leftover" examples-- the remainder
# examples (total examples % batch_size) that get called a batch for the very
# last batch of an epoch-- do not raise an error when we try to split them
# over the GPUs. This will likely be handled by Estimator during replication
# in the future, but for now, we just drop the leftovers here.
if multi_gpu:
total_examples = num_epochs * examples_per_epoch
dataset = dataset.take(batch_size * (total_examples // batch_size))
# Parse the raw records into images and labels
dataset = dataset.map(lambda value: parse_record_fn(value, is_training),
num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size)
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path.
dataset = dataset.prefetch(1)
return dataset
def input_fn(is_training, data_dir, batch_size, num_epochs=1,
num_parallel_calls=1, multi_gpu=False):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
num_parallel_calls: The number of records that are processed in parallel.
This can be optimized per data set but for generally homogeneous data
sets, should be approximately the number of available CPU cores.
multi_gpu: Whether this is run multi-GPU. Note that this is only required
currently to handle the batch leftovers, and can be removed
when that is handled directly by Estimator.
Returns:
A dataset that can be used for iteration.
"""
logging.info('searching %s for content', data_dir)
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)
num_images = is_training and _NUM_IMAGES['train'] or _NUM_IMAGES['validation']
ds = process_record_dataset(dataset, is_training, batch_size,
_NUM_IMAGES['train'],
parse_record, num_epochs, num_parallel_calls,
examples_per_epoch=num_images, multi_gpu=multi_gpu)
logging.info("DATASET obtained")
for k, v in ds.output_shapes.items():
logging.info(">> {0} {1} {2}".format(k, ds.output_types[k], v))
return ds
###############################################################################
# Running the model
###############################################################################
class Cifar10Model(resnet_model.Model):
def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
version=resnet_model.DEFAULT_VERSION):
"""These are the parameters that work for CIFAR-10 data.
Args:
resnet_size: The number of convolutional layers needed in the model.
data_format: Either 'channels_first' or 'channels_last', specifying which
data format to use when setting up the model.
num_classes: The number of output classes needed from the model. This
enables users to extend the same model to their own datasets.
version: Integer representing which version of the ResNet network to use.
See README for details. Valid values: [1, 2]
"""
if resnet_size % 6 != 2:
raise ValueError('resnet_size must be 6n + 2:', resnet_size)
num_blocks = (resnet_size - 2) // 6
super(Cifar10Model, self).__init__(
resnet_size=resnet_size,
bottleneck=False,
num_classes=num_classes,
num_filters=16,
kernel_size=3,
conv_stride=1,
first_pool_size=None,
first_pool_stride=None,
second_pool_size=8,
second_pool_stride=1,
block_sizes=[num_blocks] * 3,
block_strides=[1, 2, 2],
final_size=64,
version=version,
data_format=data_format)
def cifar10_model_fn(features, labels, mode, params):
"""Model function for CIFAR-10."""
if not labels and isinstance(features, dict):
labels = features['lbl']
features = tf.reshape(features['img'] if isinstance(features, dict) else features,
[-1, _HEIGHT, _WIDTH, _NUM_CHANNELS])
learning_rate_fn = resnet_run_loop.learning_rate_with_decay(
batch_size=params['batch_size'], batch_denom=128,
num_images=_NUM_IMAGES['train'], boundary_epochs=[100, 150, 200],
decay_rates=[1, 0.1, 0.01, 0.001])
# We use a weight decay of 0.0002, which performs better
# than the 0.0001 that was originally suggested.
weight_decay = 2e-4
# Empirical testing showed that including batch_normalization variables
# in the calculation of regularized loss helped validation accuracy
# for the CIFAR-10 dataset, perhaps because the regularization prevents
# overfitting on the small data set. We therefore include all vars when
# regularizing and computing loss during training.
def loss_filter_fn(name):
return True
return resnet_run_loop.resnet_model_fn(features,
labels,
mode, Cifar10Model,
resnet_size=params['resnet_size'],
weight_decay=weight_decay,
learning_rate_fn=learning_rate_fn,
momentum=0.9,
data_format=params['data_format'],
version=params['version'],
loss_filter_fn=loss_filter_fn,
multi_gpu=params['multi_gpu'])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(argv=sys.argv)
| 43.081356
| 86
| 0.641199
|
4a05a2823a9f05cd2b34ff17c5c4fdcae2d979a0
| 326
|
py
|
Python
|
ex44c.py
|
Zinmarlwin711/python-exercises
|
361cb426a8bc03760906e25b6cb6a4a458260bfc
|
[
"MIT"
] | null | null | null |
ex44c.py
|
Zinmarlwin711/python-exercises
|
361cb426a8bc03760906e25b6cb6a4a458260bfc
|
[
"MIT"
] | null | null | null |
ex44c.py
|
Zinmarlwin711/python-exercises
|
361cb426a8bc03760906e25b6cb6a4a458260bfc
|
[
"MIT"
] | null | null | null |
class Parent(object):
def altered(self):
print("PARENT altered()")
class Child(Parent):
def altered(self):
print("CHILD, BEFORE PARENT altered9)")
super(Child, self).altered()
print("CHILD, AFTER PARENT altered()")
dad = Parent()
son = Child()
dad.altered()
son.altered()
| 17.157895
| 47
| 0.59816
|
4a05a2f672dfbf76a18ca707d19894f39aac1387
| 14,156
|
py
|
Python
|
baselines/common/models.py
|
thangduong/baselines
|
fbab8aedadbecea59d6c33ba4f2a0955c782780c
|
[
"MIT"
] | null | null | null |
baselines/common/models.py
|
thangduong/baselines
|
fbab8aedadbecea59d6c33ba4f2a0955c782780c
|
[
"MIT"
] | null | null | null |
baselines/common/models.py
|
thangduong/baselines
|
fbab8aedadbecea59d6c33ba4f2a0955c782780c
|
[
"MIT"
] | 1
|
2019-02-22T09:24:15.000Z
|
2019-02-22T09:24:15.000Z
|
import numpy as np
import tensorflow as tf
from baselines.a2c import utils
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch
from baselines.common.mpi_running_mean_std import RunningMeanStd
import tensorflow.contrib.layers as layers
mapping = {}
def register(name):
def _thunk(func):
mapping[name] = func
return func
return _thunk
def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
@register("mlp")
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn
@register("simple_rms")
def simple_rms(**simple_rms_kwargs):
def network_fn(X):
# receive_rate * .9 - send_rate
# change = X[-1][3] - X[-1][5]
received_rate = X[:,-1,3]
send_rate = X[:,-1,5]
delta = received_rate * tf.Variable([1.0]) - send_rate
return delta
return network_fn
@register("cnn")
def cnn(**conv_kwargs):
def network_fn(X):
return nature_cnn(X, **conv_kwargs)
return network_fn
@register("cnn_1d")
def cnn_qd(**conv_kwargs):
def network_fn(X):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 5, 3, scope='cnn1d_c1')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 1, 3, scope='cnn1d_c3')
net = tf.reshape(net, [-1,buffer_size])
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_small_ac_hybrid.actor")
def cnn_qd1(**conv_kwargs):
def network_fn(X):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 15, 3, scope='cnn1d_c1')
net = layers.conv1d(net, 10, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c3')
net = layers.conv1d(net, 1, 3, scope='cnn1d_cf')
net = tf.reshape(net, [-1,buffer_size])
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = fc(net, 'cnn1d_fc2', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_small_ac_hybrid.critic")
def cnn_qd1(**conv_kwargs):
def network_fn(X, action):
num_layers = 2
activation = tf.tanh
num_hidden = 64
layer_norm = False
net = tf.layers.flatten(X)
for i in range(num_layers):
net = fc(net, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
net = tf.contrib.layers.layer_norm(net, center=True, scale=True)
net = activation(net)
net = tf.concat([net,action], 1)
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_large_ac_hybrid.actor")
def cnn_qd1(**conv_kwargs):
def network_fn(X):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 100, 3, scope='cnn1d_c1')
net = layers.conv1d(net, 50, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 25, 3, scope='cnn1d_c3')
net = layers.conv1d(net, 1, 3, scope='cnn1d_cf')
net = tf.reshape(net, [-1,buffer_size])
net = fc(net, 'cnn1d_fc1', nh=32, init_scale=np.sqrt(2))
net = fc(net, 'cnn1d_fc2', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_large_ac_hybrid.critic")
def cnn_qd1(**conv_kwargs):
def network_fn(X, action):
num_layers = 2
activation = tf.tanh
num_hidden = 64
layer_norm = False
net = tf.layers.flatten(X)
for i in range(num_layers):
net = fc(net, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
net = tf.contrib.layers.layer_norm(net, center=True, scale=True)
net = activation(net)
net = tf.concat([net,action], 1)
net = fc(net, 'cnn1d_fc1', nh=32, init_scale=np.sqrt(2))
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_small_ac.actor")
def cnn_qd1(**conv_kwargs):
def network_fn(X):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 5, 3, scope='cnn1d_c1')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 1, 3, scope='cnn1d_c3')
net = tf.reshape(net, [-1,buffer_size])
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_small_ac.critic")
def cnn_qd1(**conv_kwargs):
def network_fn(X, action):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 5, 3, scope='cnn1d_c1')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 1, 3, scope='cnn1d_c3')
net = tf.reshape(net, [-1,buffer_size])
net = tf.concat([net,action], 1)
net = fc(net, 'cnn1d_fc1', nh=32, init_scale=np.sqrt(2))
net = fc(net, 'cnn1d_fc3', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_ac.actor")
def cnn_qd1(**conv_kwargs):
def network_fn(X):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 20, 5, scope='cnn1d_c1')
net = layers.conv1d(net, 15, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 10, 3, scope='cnn1d_c3')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c4')
net = layers.conv1d(net, 1, 3, scope='cnn1d_c5')
net = tf.reshape(net, [-1,buffer_size])
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_ac.critic")
def cnn_qd1(**conv_kwargs):
def network_fn(X, action):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 20, 5, scope='cnn1d_c1')
net = layers.conv1d(net, 15, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 10, 3, scope='cnn1d_c3')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c4')
net = layers.conv1d(net, 1, 3, scope='cnn1d_c5')
net = tf.reshape(net, [-1,buffer_size])
net = tf.concat([net,action], 1)
net = fc(net, 'cnn1d_fc1', nh=32, init_scale=np.sqrt(2))
net = fc(net, 'cnn1d_fc2', nh=24, init_scale=np.sqrt(2))
net = fc(net, 'cnn1d_fc3', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_1d_v1")
def cnn_qd1(**conv_kwargs):
def network_fn(X):
buffer_size = X.shape[1]
net = X
net = layers.conv1d(net, 20, 5, scope='cnn1d_c1')
net = layers.conv1d(net, 15, 3, scope='cnn1d_c2')
net = layers.conv1d(net, 10, 3, scope='cnn1d_c3')
net = layers.conv1d(net, 5, 3, scope='cnn1d_c4')
net = layers.conv1d(net, 1, 3, scope='cnn1d_c5')
net = tf.reshape(net, [-1,buffer_size])
net = fc(net, 'cnn1d_fc1', nh=16, init_scale=np.sqrt(2))
net = tf.tanh(net)
# tf.nn.conv1d(X, w, stride, 'SAME')
# print(X)
return net
return network_fn
@register("cnn_small")
def cnn_small(**conv_kwargs):
def network_fn(X):
h = tf.cast(X, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(h, 'c1', nf=8, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h = activ(conv(h, 'c2', nf=16, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h = conv_to_fc(h)
h = activ(fc(h, 'fc1', nh=128, init_scale=np.sqrt(2)))
return h
return network_fn
@register("lstm")
def lstm(nlstm=128, layer_norm=False):
"""
Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder
"""
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn
@register("cnn_lstm")
def cnn_lstm(nlstm=128, layer_norm=False, **conv_kwargs):
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = nature_cnn(X, **conv_kwargs)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn
@register("cnn_lnlstm")
def cnn_lnlstm(nlstm=128, **conv_kwargs):
return cnn_lstm(nlstm, layer_norm=True, **conv_kwargs)
@register("conv_only")
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn
def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))
return norm_x, rms
def get_network_builder(name):
"""
If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn
"""
if callable(name):
return name
elif name in mapping:
return mapping[name]
else:
raise ValueError('Unknown network type: {}'.format(name))
| 33.704762
| 140
| 0.591693
|
4a05a32288ff9d1dbfaea803a15ba3c73328b9a9
| 1,295
|
py
|
Python
|
mat/mat/modules/android/static/unencrypted_communications.py
|
minkione/mat
|
8aaec2c9abbc1b837749e5fb084df4665e7ec35c
|
[
"BSD-3-Clause"
] | null | null | null |
mat/mat/modules/android/static/unencrypted_communications.py
|
minkione/mat
|
8aaec2c9abbc1b837749e5fb084df4665e7ec35c
|
[
"BSD-3-Clause"
] | null | null | null |
mat/mat/modules/android/static/unencrypted_communications.py
|
minkione/mat
|
8aaec2c9abbc1b837749e5fb084df4665e7ec35c
|
[
"BSD-3-Clause"
] | 1
|
2019-09-21T16:12:04.000Z
|
2019-09-21T16:12:04.000Z
|
from mat.utils.utils import Utils, Issue
IGNORE = ['www.w3', 'xmlpull.org', 'www.slf4j']
class Issue(Issue):
TITLE = 'Unencrypted Communications Check'
DESCRIPTION = 'Checks if the application accesses content view unencrypted communications'
ID = 'unencrypted-download'
ISSUE_TITLE = 'Application Accesses Content Via Unencrypted Channel'
FINDINGS = 'The Team found the application accessed the following content over unenecrypted communications:\n'
REGEX = r'http://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?'
def dependencies(self):
return self.ANALYSIS.UTILS.check_dependencies(['static'])
def run(self):
remove_urls = []
urls = Utils.grep(self.REGEX, self.ANALYSIS.LOCAL_SMALI + "*")
if urls:
for f in urls:
for finding in urls[f]:
if any(ignore in finding['code'] for ignore in IGNORE) or any(e == finding['code'] for e in ['http://', 'https://']):
urls[f].remove(finding)
if not urls[f]:
remove_urls += [f]
for f in remove_urls:
urls.pop(f)
if urls:
self.REPORT = True
self.DETAILS = Utils.grep_details(urls, self.ANALYSIS.LOCAL_SMALI)
| 34.078947
| 137
| 0.579151
|
4a05a34f8b20db71ba9640a2f3717f8cd7f187d5
| 520
|
py
|
Python
|
themata/fandango/__init__.py
|
Thecarisma/thememania
|
12e5b62506418b0a5a1b32a189796fd0ca031584
|
[
"CC0-1.0"
] | 1
|
2020-03-01T06:29:56.000Z
|
2020-03-01T06:29:56.000Z
|
themata/fandango/__init__.py
|
Thecarisma/milkish
|
12e5b62506418b0a5a1b32a189796fd0ca031584
|
[
"CC0-1.0"
] | null | null | null |
themata/fandango/__init__.py
|
Thecarisma/milkish
|
12e5b62506418b0a5a1b32a189796fd0ca031584
|
[
"CC0-1.0"
] | null | null | null |
import os
import themata
def get_path():
"""
Return the path to the fandango theme
"""
return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def setup(app):
if hasattr(app, "add_html_theme"):
theme_path = os.path.abspath(os.path.dirname(__file__))
app.add_html_theme("fandango", theme_path)
app.connect("html-page-context", themata.update_context)
app.connect('build-finished', themata.copy_custom_files)
return {"version": themata.__version}
| 32.5
| 71
| 0.680769
|
4a05a5f2badb8f683035a091a42770d5372382d6
| 7,783
|
py
|
Python
|
docs/conf.py
|
sunnnymskang/ChillPillV1
|
8c1f129141719f755290df7aa3413b6be86ea887
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
sunnnymskang/ChillPillV1
|
8c1f129141719f755290df7aa3413b6be86ea887
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
sunnnymskang/ChillPillV1
|
8c1f129141719f755290df7aa3413b6be86ea887
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# chillpillv1 documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'chillpillv1'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'chillpillv1doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'chillpillv1.tex',
u'chillpillv1 Documentation',
u"Sunny ms Kang", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'chillpillv1', u'chillpillv1 Documentation',
[u"Sunny ms Kang"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'chillpillv1', u'chillpillv1 Documentation',
u"Sunny ms Kang", 'chillpillv1',
'chill pill version 1', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.767347
| 80
| 0.707054
|
4a05a62d63c23143818cd4b8fe7924bcdedeb863
| 22,913
|
py
|
Python
|
perses/analysis/analysis.py
|
schallerdavid/perses
|
58bd6e626e027879e136f56e175683893e016f8c
|
[
"MIT"
] | 99
|
2016-01-19T18:10:37.000Z
|
2022-03-26T02:43:08.000Z
|
perses/analysis/analysis.py
|
schallerdavid/perses
|
58bd6e626e027879e136f56e175683893e016f8c
|
[
"MIT"
] | 878
|
2015-09-18T19:25:30.000Z
|
2022-03-31T02:33:04.000Z
|
perses/analysis/analysis.py
|
schallerdavid/perses
|
58bd6e626e027879e136f56e175683893e016f8c
|
[
"MIT"
] | 30
|
2015-09-21T15:26:35.000Z
|
2022-01-10T20:07:24.000Z
|
"""
Analysis tools for perses automated molecular design.
TODO
----
* Analyze all but last iteration to ensure we can analyze a running simulation?
"""
__author__ = 'John D. Chodera'
################################################################################
# IMPORTS
################################################################################
import numpy as np
import itertools
import pymbar
from perses import storage
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
################################################################################
# LOGGER
################################################################################
import logging
logger = logging.getLogger(__name__)
################################################################################
# ANALYSIS
################################################################################
class Analysis(object):
"""Analysis tools for perses automated design.
"""
def __init__(self, storage_filename):
"""Open a storage file for analysis.
"""
# TODO: Replace this with calls to storage API
self._storage = storage.NetCDFStorage(storage_filename, mode='r')
self._ncfile = self._storage._ncfile
self.storage_filename = storage_filename
self._environments = self.get_environments()
self._n_exen_iterations = {}
for environment in self._environments:
self._n_exen_iterations[environment] = len(self._ncfile.groups[environment]['ExpandedEnsembleSampler']['logP_accept'])
self._state_transitions, self._visited_states = self._get_state_transitions()
self._logP_accepts = {}
def get_environments(self):
"""Return a list of environments in storage file.
Returns
-------
environments : list of str
List of environment names in storage (e.g. []'explicit-complex', 'explicit-ligand'])
"""
environments = list()
for group in self._ncfile.groups:
environments.append(str(group))
return environments
def _state_transition_to_iupac(self, state_transition):
"""
Convenience function to convert SMILES to IUPAC names
Parameters
----------
state_transition : (str, str)
Pair of smiles strings for the state transition
Returns
-------
state_transition_iupac : [str, str]
The pair of molecules in IUPAC names
"""
from openeye import oeiupac, oechem
state_transition_iupac = []
for state in state_transition:
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, state)
iupac = oeiupac.OECreateIUPACName(mol)
state_transition_iupac.append(iupac)
return state_transition_iupac
def plot_work_trajectories(self, environment, filename):
"""
Plot the NCMC work trajectories for the given environment and each attempted transition
Parameters
----------
environment : str
Name of environment
filename : str
Name of output file
"""
w_t = {state_transition : [] for state_transition in self._state_transitions[environment]}
for iteration in range(self._n_exen_iterations[environment]):
logP_ncmc_trajectory = self._ncfile.groups[environment]['NCMCEngine']['protocolwork'][iteration, :]
state_key = self._storage.get_object(environment, "ExpandedEnsembleSampler", "state_key", iteration)
proposed_state_key = self._storage.get_object(environment, "ExpandedEnsembleSampler", "proposed_state_key", iteration)
if state_key == proposed_state_key:
continue
w_t[(state_key, proposed_state_key)].append(-logP_ncmc_trajectory)
w_t_stacked = {state_transition: np.stack(work_trajectories) for state_transition, work_trajectories in w_t.items()}
with PdfPages(filename) as pdf:
sns.set(font_scale=2)
for state_transition, work_array in w_t_stacked.items():
fig = plt.figure(figsize=(28, 12))
ax1 = sns.tsplot(work_array, color="Blue")
iupac_transition = self._state_transition_to_iupac(state_transition)
plt.title("{} => {} transition {} work trajectory".format(iupac_transition[0], iupac_transition[1], "NCMC"))
plt.xlabel("step (1fs)")
plt.ylabel("Work / kT")
plt.tight_layout()
pdf.savefig(fig)
plt.close()
def plot_sams_weights(self, environment):
"""
Plot the trajectory of SAMS weights
:param environment:
:return:
"""
def plot_chemical_trajectory(self, environment, filename):
"""
Plot the trajectory through chemical space.
Parameters
----------
environment : str
the name of the environment for which the chemical space trajectory is desired
"""
chemical_state_trajectory = self.extract_state_trajectory(environment)
visited_states = list(set(chemical_state_trajectory))
state_trajectory = np.zeros(len(chemical_state_trajectory))
for idx, chemical_state in enumerate(chemical_state_trajectory):
state_trajectory[idx] = visited_states.index(chemical_state)
with PdfPages(filename) as pdf:
sns.set(font_scale=2)
fig = plt.figure(figsize=(28, 12))
plt.subplot2grid((1,2), (0,0))
ax = sns.scatterplot(np.arange(len(state_trajectory)), state_trajectory)
plt.yticks(np.arange(len(visited_states)), visited_states)
plt.title("Trajectory through chemical space in {}".format(environment))
plt.xlabel("iteration")
plt.ylabel("chemical state")
plt.tight_layout()
plt.subplot2grid((1,2), (0,1))
ax = sns.countplot(y=state_trajectory)
pdf.savefig(fig)
plt.close()
def get_free_energies(self, environment):
"""
Estimate the free energies between all pairs with bidirectional transitions of chemical states in the
given environment
Parameters
----------
environment : str
The name of the environment for which free energies are desired
Returns
-------
free_energies : dict of (str, str): [float, float]
Dictionary of pairwaise free energies and their uncertainty, computed with bootstrapping
"""
logP_without_sams = self.extract_logP_values(environment, "logP_accept", subtract_sams=True)
free_energies = {}
n_bootstrap_iterations = 10000000
for state_pair, logP_accepts in logP_without_sams.items():
w_F = logP_accepts[0]
w_R = -logP_accepts[1]
bootstrapped_bar = np.zeros(n_bootstrap_iterations)
for i in range(n_bootstrap_iterations):
resampled_w_F = np.random.choice(w_F, len(w_F), replace=True)
resampled_w_R = np.random.choice(w_R, len(w_R), replace=True)
[df, ddf] = pymbar.BAR(resampled_w_F, resampled_w_R)
bootstrapped_bar[i] = df
free_energies[state_pair] = [np.mean(bootstrapped_bar), np.std(bootstrapped_bar)]
return free_energies
def _get_state_transitions(self):
"""
Find the set of unique state transitions in each environment. This will be useful to retrieve various
logP quantities.
Returns
-------
state_transitions_dict : dict of str: set of (str, str) tuple
The set of state transitions that were attempted in each environment. This counts (s1, s2) and (s2, s1) as separate.
visited_states_dict : dict of str: set of str
The set of states that were actually visited in each environment.
"""
state_transitions_dict = {}
visited_states_dict = {}
for environment in self._environments:
# first, find the set of unique state transitions:
state_transition_list = []
visited_states = []
n_iterations = self._n_exen_iterations[environment]
for iteration in range(n_iterations):
state_key = self._storage.get_object(environment, "ExpandedEnsembleSampler", "state_key", iteration)
proposed_state_key = self._storage.get_object(environment, "ExpandedEnsembleSampler",
"proposed_state_key", iteration)
visited_states.append(state_key)
# if they are the same (a self-proposal) just continue
if state_key == proposed_state_key:
continue
state_transition_list.append((state_key, proposed_state_key))
# get the unique transitions:
state_transition_set = set(state_transition_list)
state_transitions_dict[environment] = state_transition_set
visited_states_dict[environment] = set(visited_states)
return state_transitions_dict, visited_states_dict
def write_trajectory(self, environmnent, pdb_filename):
"""Write the trajectory of sampled configurations and chemical states.
Returns
-------
environment : str
Environment name to write trajectory for
pdbfile : str
Name of PDB file to generate.
"""
# TODO
def extract_logP_values(self, environment, logP_accept_component, subtract_sams=False):
"""
Extract the requested logP_accept component from the ExpandedEnsembleSampler
in the requested environment
Parameters
----------
environment : str
The name of the environment
logP_accept_component : str
The name of the component of the acceptance probability that we want
subtract_sams : bool, optional, default False
Whether to subtract the SAMS weights corresponding to the same iteration. Useful for logP_accept.
Returns
-------
logP_values : dict of (str, str) : list of float
A dictionary for each state transition, with a list of the requested logP_accept component
"""
n_iterations = self._n_exen_iterations[environment]
logP_values = {state_transition: [] for state_transition in self._state_transitions[environment]}
#loop through the iterations and
for iteration in range(n_iterations):
state_key = self._storage.get_object(environment, "ExpandedEnsembleSampler", "state_key", iteration)
proposed_state_key = self._storage.get_object(environment, "ExpandedEnsembleSampler", "proposed_state_key", iteration)
#if they are the same (a self-proposal) just continue
if state_key == proposed_state_key:
continue
#retreive the work value (negative logP_work) and add it to the list of work values for that transition
logP = self._ncfile.groups[environment]['ExpandedEnsembleSampler'][logP_accept_component][iteration]
if subtract_sams:
sams_weight = self._ncfile.groups[environment]['ExpandedEnsembleSampler']['logP_sams_weight'][iteration]
logP = logP - sams_weight
logP_values[(state_key, proposed_state_key)].append(logP)
return logP_values
def _prepare_logP_accept(self, environment):
"""
Organize and retrieve the log acceptance probabilities for each of the transitions in the environment.
Parameters
----------
environment : str
The name of the environment
Returns
-------
logP_accept_dict : dict of (str, str) : list of 2 np.array
A dictionary with a list of 2 np.arrays, one for s1->s2 logP_accept, another for s2->s1
logP_accepts have had their SAMS weights subtracted if relevant
"""
logP_accept_values = self.extract_logP_values(environment, "logP_accept", subtract_sams=True)
logP_accept_dict = {}
for state_pair in itertools.combinations(self._visited_states, 2):
try:
forward_logP = np.array(logP_accept_values[(state_pair[0], state_pair[1])])
reverse_logP = np.array(logP_accept_values[(state_pair[1], state_pair[0])])
except KeyError:
continue
logP_accept_dict[state_pair] = [forward_logP, reverse_logP]
return logP_accept_dict
def extract_state_trajectory(self, environment):
"""
Extract the trajectory in chemical state space
Parameters
----------
environment : str
The environment for which the chemical state is desired
chemical_state_trajectory : list of str
The trajectory in chemical space for the given environment
Returns
-------
chemical_state_traj : list of str
List of chemical states that were visited
"""
n_iterations = self._n_exen_iterations[environment]
chemical_state_traj = []
for iteration in range(n_iterations):
chemical_state = self._storage.get_object(environment, "ExpandedEnsembleSampler", "state_key", iteration)
chemical_state_traj.append(chemical_state)
return chemical_state_traj
def plot_ncmc_work_distributions(self, environment, output_filename):
"""
Plot the forward and reverse work distributions for NCMC switching in the given environment
Parameters
----------
environment : str
The name of the environment for which NCMC work should be plotted
output_filename : str
The name of the PDF file to output
"""
#get the unique transitions:
state_transition_set = self._state_transitions[environment]
visited_states_set = self._visited_states[environment]
logP_values = self.extract_logP_values(environment, "logP_ncmc_work")
#now loop through all the state pairs to plot each
with PdfPages(output_filename) as pdf:
sns.set(font_scale=2)
for state_pair in itertools.combinations(visited_states_set, 2):
iupac_pair = self._state_transition_to_iupac(state_pair)
try:
#use the negative for the forward work because the logP contribution of the work is -work
forward_work = -np.array(logP_values[(state_pair[0], state_pair[1])])
reverse_work = np.array(logP_values[(state_pair[1], state_pair[0])])
except KeyError:
continue
fig = plt.figure(figsize=(28, 12))
ax1 = sns.distplot(forward_work, kde=True, color="Blue")
ax2 = sns.distplot(-reverse_work, color='Red', kde=True)
plt.title("{} => {} transition {} work".format(iupac_pair[0], iupac_pair[1], "NCMC"))
plt.xlabel("Work / kT")
plt.tight_layout()
pdf.savefig(fig)
plt.close()
def plot_exen_logp_components(self, environment, filename_prefix=None, logP_range=20, nbins=20):
"""
Generate histograms of each component of Expanded Ensemble log acceptance probability
Parameters
----------
environment : str
The environment to use
filename_prefix : str, OPTIONAL, default = None
if specified, each plot is saved as '{0}-{1}'.format(filename_prefix, component)
logP__range : float, optional, default=None
If specified, will set logP range to [-logP_range, +logP_range]
nbins : int, optional, default=20
Number of bins to use for histogram.
Each histogram will be saved to {component name}.png
TODO: include input filename
storage ncfile has different hierarchy depending on which samplers are defined;
this probably only works without SAMS sampling (otherwise top level groups are
environments)
"""
ee_sam = self._ncfile.groups[environment]['ExpandedEnsembleSampler']
# Build a list of all logP components to plot:
components = list()
# Always show logP_accept
components.append('logP_accept')
# Summarize other logP groups
for name in ee_sam.variables.keys():
if name.startswith('logP_groups'):
components.append(name)
if filename_prefix is None:
filename_prefix = self.storage_filename.split('.')[0]
filename = '{0}-logP-components.pdf'.format(filename_prefix)
with PdfPages(filename) as pdf:
logps = dict()
for component in components:
try:
niterations = ee_sam.variables[component].shape[0]
except:
continue
logps[component] = np.zeros(niterations, np.float64)
for n in range(niterations):
logps[component][n] = ee_sam.variables[component][n]
# Drop NaNs
logps[component] = logps[component][~np.isnan(logps[component][:])]
plt.figure(figsize=(8,12))
nrows = len(logps.keys())
ncols = 2
for row, component in enumerate(components):
# Full range
try:
col = 0
plt.subplot2grid((nrows,ncols),(row,col))
plt.hist(logps[component], bins=nbins)
plt.title(component)
except Exception as e:
print(e)
# Limited range
try:
col = 1
plt.subplot2grid((nrows,ncols),(row,col))
plt.hist(logps[component], range=[-logP_range, +logP_range], bins=nbins)
plt.title(component)
except Exception as e:
print(e)
plt.tight_layout()
pdf.savefig()
plt.close()
def plot_ncmc_work_old(self, filename):
"""Generate plots of NCMC work.
Parameters
----------
filename : str
File to write PDF of NCMC work plots to.
"""
with PdfPages(filename) as pdf:
for envname in ['NCMCEngine', 'NCMCHybridEngine']: #self.get_environments():
modname = envname
work = dict()
for direction in ['delete', 'insert']:
varname = '/' + modname + '/' + 'total_work_' + direction
try:
# TODO: For now, we analyze all but the last sample, so that this can be run on active simulations.
# Later, we should find some way to omit the last sample only if it is nonsensical.
work[direction] = self._ncfile[varname][:-1,:]
print('Found %s' % varname)
except Exception as e:
pass
def plot_work_trajectories(pdf, work, title=""):
"""Generate figures for the specified switching legs.
"""
plt.figure(figsize=(12, 8))
nrows = len(work.keys())
ncols = 6
workcols = 2
for (row, direction) in enumerate(work.keys()):
#
# Plot work vs step
#
col = 0
plt.subplot2grid((nrows,ncols), (row, col), colspan=(ncols-workcols))
# Plot average work distribution in think solid line
plt.plot(work[direction].mean(0), 'k-', linewidth=1.0, alpha=1.0)
# Plot bundle of work trajectories in transparent lines
plt.plot(work[direction].T, 'k-', linewidth=0.5, alpha=0.3)
# Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)
workvals = np.ravel(np.abs(work[direction]))
worklim = np.percentile(workvals, 98)
nsteps = work[direction].shape[1]
plt.axis([0, nsteps, -worklim, +worklim])
# Label plot
if row == 1: plt.xlabel('steps')
plt.ylabel('work / kT')
plt.title("%s NCMC in environment '%s' : %s" % (title, envname, direction))
plt.legend(['average work', 'NCMC attempts'])
#
# Plot work histogram
#
col = ncols - workcols
plt.subplot2grid((nrows,ncols), (row, col), colspan=workcols)
# Plot average work distribution in think solid line
#nbins = 40
workvals = work[direction][:-1,-1]
#plt.hist(workvals, nbins)
if workvals.std() != 0.0:
sns.distplot(workvals, rug=True)
else:
print('workvals has stddev of zero')
print(workvals)
# Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)
#worklim = np.percentile(workvals, 98)
#oldaxis = plt.axis()
#plt.axis([-worklim, +worklim, 0, oldaxis[3]])
# Label plot
if row == 1: plt.xlabel('work / kT')
plt.title("total %s work" % direction)
plt.tight_layout()
pdf.savefig() # saves the current figure into a pdf page
plt.close()
if len(work) > 0:
# Plot work for all chemical transformations.
plot_work_trajectories(pdf, work, title='(all transformations)')
# Plot work separated out for each chemical transformation
#[niterations, nsteps] = work.shape
#transformations = dict()
#for iteration in range(niterations):
# plot_work_trajectories(pdf, work, title='(all transformations)')
| 40.268893
| 130
| 0.572819
|
4a05a630fe1c0d276ca1a5ff24fe562fec99d39a
| 4,874
|
py
|
Python
|
pyhcup/tests/test_reading.py
|
jburke5/pyhcup
|
463bab2dcf7e191609a0430e4ea8b7b4f88806e5
|
[
"MIT"
] | 7
|
2018-01-08T18:28:29.000Z
|
2021-10-12T20:21:24.000Z
|
pyhcup/tests/test_reading.py
|
jburke5/pyhcup
|
463bab2dcf7e191609a0430e4ea8b7b4f88806e5
|
[
"MIT"
] | null | null | null |
pyhcup/tests/test_reading.py
|
jburke5/pyhcup
|
463bab2dcf7e191609a0430e4ea8b7b4f88806e5
|
[
"MIT"
] | 5
|
2018-02-05T04:26:50.000Z
|
2022-03-19T22:07:27.000Z
|
import unittest
import pandas as pd
import pyhcup
from pyhcup import hachoir, sas, tx
from pyhcup.config import DEFAULT_DATA_SOURCES, BUNDLED_SID_SAMPLES_DIR, BUNDLED_LOADFILE_DIR, KNOWN_MISSING_LOADFILES
DISCOVERED_LOADFILES = hachoir.discover(root_path=BUNDLED_LOADFILE_DIR)
CONTENT_SOURCES = [i for i in DEFAULT_DATA_SOURCES
if 'content' in i['content']]
BUNDLED_SID_SAMPLES = hachoir.discover(root_path=BUNDLED_SID_SAMPLES_DIR,
sources=CONTENT_SOURCES)
# TODO: split these cases into separate files?
class TestDiscover(unittest.TestCase):
def test_is_list_of_dicts(self):
[self.assertEqual(type(i), dict) for i in DISCOVERED_LOADFILES]
def test_proper_keys(self):
expected = sorted(
['category', 'file_extension', 'size_on_disk', 'source',
'state_abbr', 'file', 'year', 'directory', 'filename',
'full_path', 'content']
)
for i in DISCOVERED_LOADFILES:
k = sorted(i.keys())
self.assertEqual(k,
expected,
'Key mismatch for {0}, expected {1}'\
.format(k, expected)
)
class TestMeta(unittest.TestCase):
def setUp(self):
self.sas_meta_paths = [i for i in DISCOVERED_LOADFILES
if i['file_extension'].lower() == 'sas']
self.pudf_meta_paths = [i for i in DISCOVERED_LOADFILES
if i['file_extension'].lower() == 'txt'
and i['state_abbr'].lower() == 'tx']
def test_sas_meta_are_dataframes(self):
for i in self.sas_meta_paths:
fp = i['full_path']
product = sas.meta_from_sas(fp)
err = '''
Not DataFrame: sas.meta_from_sas('{fp}') ({t})
'''.format(fp=fp, m=m, t=type(product))
yield check_is_dataframe, product, err
def test_tx_meta_are_dataframes(self):
for i in self.pudf_meta_paths:
fp = i['full_path']
product = tx.meta_from_txt(fp)
err = '''
Not DataFrame: tx.meta_from_txt('{fp}') ({t})
'''.format(fp=fp, t=type(product))
self.assertTrue(is_dataframe(product))
def test_get_meta_vs_meta_from_sas(self):
"""
pyhcup.get_meta() is a shortcut that helps to call
pyhcup.sas.meta_from_sas(). Given certain inputs, these should
generate identical DataFrames for the meta data (loading file) in
question.
"""
for i in self.sas_meta_paths:
fp = i['full_path']
mfs = sas.meta_from_sas(fp)
gm = pyhcup.get_meta(state=i['state_abbr'], year=i['year'],
category=i['category'], datafile=i['file'])
self.assertTrue((mfs.values == gm.values).all(),
'''
sas.meta_from_sas('{fp}') != pyhcup.get_meta(state={st}, year={yr}, category={c}, datafile={d})
'''.format(fp=fp, st=i['state_abbr'],
yr=i['year'], c=i['category'],
d=i['file'])
)
def test_uncompressed_hcup_samples_generator():
targets = [i for i in BUNDLED_SID_SAMPLES
if i['source'] == 'HCUP'
and i['content'] == 'contentnh_uncompressed'
]
for i in targets:
yield check_hcup_sample, i
def check_hcup_sample(target_dict):
"""
Verifies that the provided HCUP sample data can be read by the included
loading program files.
target_dict: dictionary produced by pyhcup.hachoir.discover
"""
# skip test if known to be missing a loadfile
for missing in KNOWN_MISSING_LOADFILES:
if all(missing[k] == target_dict[k] for k in missing.keys()):
err = '''
Known missing loading program: {d}
'''.format(d=target_dict)
raise unittest.SkipTest(err)
fp = target_dict['full_path']
m = pyhcup.get_meta(state=target_dict['state_abbr'],
year=target_dict['year'],
category=target_dict['category'],
datafile=target_dict['file'])
product = pyhcup.read(fp, m)
err = '''
Not DataFrame: pyhcup.read('{fp}', {m}) ({t})
'''.format(fp=fp, m=m, t=type(product))
assert is_dataframe(product), err
def is_dataframe(obj):
return type(obj) == pd.DataFrame
def check_is_dataframe(obj, err='Object type is not pandas.DataFrame'):
assert is_dataframe(obj), err
if __name__ == '__main__':
unittest.main()
| 37.782946
| 123
| 0.551908
|
4a05a86fbd506541c23b34b064335f59229841a0
| 1,560
|
py
|
Python
|
app/main/api.py
|
A1014280203/elearning
|
6419b86411d0fd67b299d9ec666422326062ff99
|
[
"MIT"
] | 24
|
2017-09-12T04:44:29.000Z
|
2021-04-14T09:09:15.000Z
|
app/main/api.py
|
A1014280203/elearning
|
6419b86411d0fd67b299d9ec666422326062ff99
|
[
"MIT"
] | 1
|
2022-03-20T04:25:16.000Z
|
2022-03-20T04:25:16.000Z
|
app/main/api.py
|
A1014280203/elearning
|
6419b86411d0fd67b299d9ec666422326062ff99
|
[
"MIT"
] | 21
|
2017-09-12T03:57:31.000Z
|
2019-08-22T12:16:05.000Z
|
from flask import render_template, request, jsonify, url_for
from ..models import User, School, Course, Paper
from . import main
@main.route('/')
def index():
return render_template('main/index.html')
@main.route('/mall')
def mall():
p = int(request.args.get('p') or 1)
p -= 1
items = Course.query_range(Course.c_off == 0, start=p*10, stop=p*10+10)
return render_template('main/mall.html', items=items)
@main.route('/schools')
def show_schools():
p = int(request.args.get('p') or 1)
p -= 1
items = School.query_range(start=p*10, stop=p*10+10)
if items:
combined = list()
if len(items) % 2:
items.append(School(s_name=''))
for i in range(0, len(items), 2):
combined.append(dict(odd=items[i], even=items[i+1]))
else:
combined = items
return render_template('main/school.html', items=combined)
@main.route('/teachers')
def show_teachers():
p = int(request.args.get('p') or 1)
p -= 1
items = User.query_range(User.u_role == 2, start=p*10, stop=p*10+10)
if items:
combined = list()
if len(items) % 2:
items.append(School(s_name=''))
for i in range(0, len(items), 2):
combined.append(dict(odd=items[i], even=items[i+1]))
else:
combined = items
return render_template('main/teacher.html', items=combined)
@main.after_app_request
def allow_cors(resp):
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
| 27.857143
| 76
| 0.594872
|
4a05a8fd4517f908d8c85be229b36a8761e728c4
| 6,547
|
py
|
Python
|
lib/galaxy/managers/configuration.py
|
ClayBirkett/galaxy
|
b5afa3c1a90d269f1d438ffde481ff2e4178a72b
|
[
"CC-BY-3.0"
] | 1
|
2019-11-15T01:50:38.000Z
|
2019-11-15T01:50:38.000Z
|
lib/galaxy/managers/configuration.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/managers/configuration.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Serializers for Galaxy config file data: ConfigSerializer for all users
and a more expanded set of data for admin in AdminConfigSerializer.
Used by both the API and bootstrapped data.
"""
# TODO: this is a bit of an odd duck. It uses the serializer structure from managers
# but doesn't have a model like them. It might be better in config.py or a
# totally new area, but I'm leaving it in managers for now for class consistency.
import logging
from galaxy.managers import base
from galaxy.web.framework.base import server_starttime
log = logging.getLogger(__name__)
class ConfigSerializer(base.ModelSerializer):
"""Configuration (galaxy.ini) settings viewable by all users"""
def __init__(self, app):
super(ConfigSerializer, self).__init__(app)
self.default_view = 'all'
self.add_view('all', list(self.serializers.keys()))
def default_serializer(self, config, key):
return getattr(config, key, None)
def add_serializers(self):
def _defaults_to(default):
return lambda item, key, **context: getattr(item, key, default)
def _required_attribute(item, key, **context):
assert hasattr(item, key)
return getattr(item, key)
self.serializers = {
# TODO: this is available from user data, remove
'is_admin_user' : lambda *a, **c: False,
'brand' : _required_attribute,
# TODO: this doesn't seem right
'logo_url' : lambda item, key, **context: self.url_for(item.get(key, '/')),
'logo_src' : lambda item, key, **context: self.url_for('/static/favicon.png'),
'terms_url' : _required_attribute,
'myexperiment_target_url' : _required_attribute,
'wiki_url' : _required_attribute,
'search_url' : _required_attribute,
'mailing_lists' : _defaults_to(self.app.config.mailing_lists_url),
'screencasts_url' : _required_attribute,
'genomespace_ui_url' : _required_attribute,
'citation_url' : _required_attribute,
'support_url' : _required_attribute,
'helpsite_url' : _required_attribute,
'lims_doc_url' : _defaults_to("https://usegalaxy.org/u/rkchak/p/sts"),
'default_locale' : _required_attribute,
'enable_openid' : _required_attribute,
'enable_communication_server' : _required_attribute,
'communication_server_port' : _required_attribute,
'communication_server_host' : _required_attribute,
'persistent_communication_rooms' : _required_attribute,
'allow_user_impersonation' : _required_attribute,
'allow_user_creation' : _defaults_to(False), # schema default is True
'use_remote_user' : _defaults_to(None), # schema default is False; or config.single_user
'enable_oidc' : _required_attribute,
'oidc' : _required_attribute,
'enable_quotas' : _required_attribute,
'remote_user_logout_href' : _required_attribute,
'datatypes_disable_auto' : _required_attribute,
'allow_user_dataset_purge' : _defaults_to(False), # schema default is True
'ga_code' : _required_attribute,
'enable_unique_workflow_defaults' : _required_attribute,
'has_user_tool_filters' : _defaults_to(False),
# TODO: is there no 'correct' way to get an api url? controller='api', action='tools' is a hack
# at any rate: the following works with path_prefix but is still brittle
# TODO: change this to (more generic) upload_path and incorporate config.nginx_upload_path into building it
'nginx_upload_path' : lambda item, key, **context: getattr(item, key, False),
'chunk_upload_size' : _required_attribute,
'ftp_upload_site' : _required_attribute,
'version_major' : _defaults_to(None),
'require_login' : _required_attribute,
'inactivity_box_content' : _required_attribute,
'visualizations_visible' : _required_attribute,
'interactivetools_enable' : _required_attribute,
'message_box_content' : _required_attribute,
'message_box_visible' : _required_attribute,
'message_box_class' : _required_attribute,
'server_startttime' : lambda item, key, **context: server_starttime,
'mailing_join_addr' : _defaults_to('galaxy-announce-join@bx.psu.edu'), # should this be the schema default?
'server_mail_configured' : lambda item, key, **context: bool(getattr(item, 'smtp_server')),
'registration_warning_message' : _required_attribute,
'welcome_url' : _required_attribute,
'show_welcome_with_login' : _defaults_to(True), # schema default is False
'cookie_domain' : _required_attribute,
}
class AdminConfigSerializer(ConfigSerializer):
"""Configuration attributes viewable only by admin users"""
def add_serializers(self):
super(AdminConfigSerializer, self).add_serializers()
def _defaults_to(default):
return lambda item, key, **context: getattr(item, key, default)
self.serializers.update({
# TODO: this is available from user serialization: remove
'is_admin_user' : lambda *a: True,
'library_import_dir' : _defaults_to(None),
'user_library_import_dir' : _defaults_to(None),
'allow_library_path_paste' : _defaults_to(False),
'allow_user_deletion' : _defaults_to(False),
})
| 55.957265
| 136
| 0.57049
|
4a05aa0bb3d18920aeba286988e9d527050d0bd7
| 1,050
|
py
|
Python
|
experiments/data_augmentation/main.py
|
ClementPla/Retinal-Lesions-Segmentation
|
20fa4ac8eae24814470095bb6e7f08d6751c4e11
|
[
"MIT"
] | 1
|
2021-10-10T02:44:13.000Z
|
2021-10-10T02:44:13.000Z
|
experiments/data_augmentation/main.py
|
ClementPla/Retinal-Lesions-Segmentation
|
20fa4ac8eae24814470095bb6e7f08d6751c4e11
|
[
"MIT"
] | null | null | null |
experiments/data_augmentation/main.py
|
ClementPla/Retinal-Lesions-Segmentation
|
20fa4ac8eae24814470095bb6e7f08d6751c4e11
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../../')
from experiment import RetinExp, DA, Dataset
from nntools.utils import Config
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="Path to the configuration file")
parser.add_argument("--models", help="List of models to train", nargs='+')
args = parser.parse_args()
config_path = args.config
models = args.models
config = Config(config_path)
DA_test = [DA.NONE]
if not isinstance(models, list):
models = [models]
for m in models:
for d in DA_test:
config['Manager']['run'] = '%s-DA: %s' % (m, d.name)
config['Network']['architecture'] = m
experiment = RetinExp(config, train_sets=Dataset.IDRID | Dataset.MESSIDOR,
da_level=d,
test_sets=Dataset.RETINAL_LESIONS | Dataset.IDRID | Dataset.DDR | Dataset.FGADR,
cache=True)
experiment.start()
| 32.8125
| 114
| 0.589524
|
4a05aa4c5a0753c6b87efc399bd0e4417a414753
| 54,720
|
py
|
Python
|
Bio/SeqRecord.py
|
Manoo-hao/biopython
|
5a55811d57880cbc9e6f198374e81ef3a4fbfa05
|
[
"BSD-3-Clause"
] | 1
|
2021-07-09T23:35:05.000Z
|
2021-07-09T23:35:05.000Z
|
Bio/SeqRecord.py
|
Manoo-hao/biopython
|
5a55811d57880cbc9e6f198374e81ef3a4fbfa05
|
[
"BSD-3-Clause"
] | null | null | null |
Bio/SeqRecord.py
|
Manoo-hao/biopython
|
5a55811d57880cbc9e6f198374e81ef3a4fbfa05
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2000-2002 Andrew Dalke. All rights reserved.
# Copyright 2002-2004 Brad Chapman. All rights reserved.
# Copyright 2006-2020 by Peter Cock. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Represent a Sequence Record, a sequence with annotation."""
# NEEDS TO BE SYNCH WITH THE REST OF BIOPYTHON AND BIOPERL
# In particular, the SeqRecord and BioSQL.BioSeq.DBSeqRecord classes
# need to be in sync (this is the BioSQL "Database SeqRecord").
from io import StringIO
import numbers
from Bio import StreamModeError
from Bio.Seq import UndefinedSequenceError
_NO_SEQRECORD_COMPARISON = "SeqRecord comparison is deliberately not implemented. Explicitly compare the attributes of interest."
class _RestrictedDict(dict):
"""Dict which only allows sequences of given length as values (PRIVATE).
This simple subclass of the Python dictionary is used in the SeqRecord
object for holding per-letter-annotations. This class is intended to
prevent simple errors by only allowing python sequences (e.g. lists,
strings and tuples) to be stored, and only if their length matches that
expected (the length of the SeqRecord's seq object). It cannot however
prevent the entries being edited in situ (for example appending entries
to a list).
>>> x = _RestrictedDict(5)
>>> x["test"] = "hello"
>>> x
{'test': 'hello'}
Adding entries which don't have the expected length are blocked:
>>> x["test"] = "hello world"
Traceback (most recent call last):
...
TypeError: We only allow python sequences (lists, tuples or strings) of length 5.
The expected length is stored as a private attribute,
>>> x._length
5
In order that the SeqRecord (and other objects using this class) can be
pickled, for example for use in the multiprocessing library, we need to
be able to pickle the restricted dictionary objects.
Using the default protocol, which is 3 on Python 3,
>>> import pickle
>>> y = pickle.loads(pickle.dumps(x))
>>> y
{'test': 'hello'}
>>> y._length
5
Using the highest protocol, which is 4 on Python 3,
>>> import pickle
>>> z = pickle.loads(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
>>> z
{'test': 'hello'}
>>> z._length
5
"""
def __init__(self, length):
"""Create an EMPTY restricted dictionary."""
dict.__init__(self)
self._length = int(length)
def __setitem__(self, key, value):
# The check hasattr(self, "_length") is to cope with pickle protocol 2
# I couldn't seem to avoid this with __getstate__ and __setstate__
if (
not hasattr(value, "__len__")
or not hasattr(value, "__getitem__")
or (hasattr(self, "_length") and len(value) != self._length)
):
raise TypeError(
"We only allow python sequences (lists, tuples or strings) "
f"of length {self._length}."
)
dict.__setitem__(self, key, value)
def update(self, new_dict):
# Force this to go via our strict __setitem__ method
for (key, value) in new_dict.items():
self[key] = value
class SeqRecord:
"""A SeqRecord object holds a sequence and information about it.
Main attributes:
- id - Identifier such as a locus tag (string)
- seq - The sequence itself (Seq object or similar)
Additional attributes:
- name - Sequence name, e.g. gene name (string)
- description - Additional text (string)
- dbxrefs - List of database cross references (list of strings)
- features - Any (sub)features defined (list of SeqFeature objects)
- annotations - Further information about the whole sequence (dictionary).
Most entries are strings, or lists of strings.
- letter_annotations - Per letter/symbol annotation (restricted
dictionary). This holds Python sequences (lists, strings
or tuples) whose length matches that of the sequence.
A typical use would be to hold a list of integers
representing sequencing quality scores, or a string
representing the secondary structure.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly (see the __init__ method for further details):
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF"),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> print(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF')
If you want to save SeqRecord objects to a sequence file, use Bio.SeqIO
for this. For the special case where you want the SeqRecord turned into
a string in a particular file format there is a format method which uses
Bio.SeqIO internally:
>>> print(record.format("fasta"))
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
You can also do things like slicing a SeqRecord, checking its length, etc
>>> len(record)
44
>>> edited = record[:10] + record[11:]
>>> print(edited.seq)
MKQHKAMIVAIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
>>> print(record.seq)
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
"""
def __init__(
self,
seq,
id="<unknown id>",
name="<unknown name>",
description="<unknown description>",
dbxrefs=None,
features=None,
annotations=None,
letter_annotations=None,
):
"""Create a SeqRecord.
Arguments:
- seq - Sequence, required (Seq or MutableSeq)
- id - Sequence identifier, recommended (string)
- name - Sequence name, optional (string)
- description - Sequence description, optional (string)
- dbxrefs - Database cross references, optional (list of strings)
- features - Any (sub)features, optional (list of SeqFeature objects)
- annotations - Dictionary of annotations for the whole sequence
- letter_annotations - Dictionary of per-letter-annotations, values
should be strings, list or tuples of the same length as the full
sequence.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly.
Note that while an id is optional, we strongly recommend you supply a
unique id string for each record. This is especially important
if you wish to write your sequences to a file.
You can create a 'blank' SeqRecord object, and then populate the
attributes later.
"""
if id is not None and not isinstance(id, str):
# Lots of existing code uses id=None... this may be a bad idea.
raise TypeError("id argument should be a string")
if not isinstance(name, str):
raise TypeError("name argument should be a string")
if not isinstance(description, str):
raise TypeError("description argument should be a string")
self._seq = seq
self.id = id
self.name = name
self.description = description
# database cross references (for the whole sequence)
if dbxrefs is None:
dbxrefs = []
elif not isinstance(dbxrefs, list):
raise TypeError("dbxrefs argument should be a list (of strings)")
self.dbxrefs = dbxrefs
# annotations about the whole sequence
if annotations is None:
annotations = {}
elif not isinstance(annotations, dict):
raise TypeError("annotations argument must be a dict or None")
self.annotations = annotations
if letter_annotations is None:
# annotations about each letter in the sequence
if seq is None:
# Should we allow this and use a normal unrestricted dict?
self._per_letter_annotations = _RestrictedDict(length=0)
else:
try:
self._per_letter_annotations = _RestrictedDict(length=len(seq))
except TypeError:
raise TypeError(
"seq argument should be a Seq object or similar"
) from None
else:
# This will be handled via the property set function, which will
# turn this into a _RestrictedDict and thus ensure all the values
# in the dict are the right length
self.letter_annotations = letter_annotations
# annotations about parts of the sequence
if features is None:
features = []
elif not isinstance(features, list):
raise TypeError(
"features argument should be a list (of SeqFeature objects)"
)
self.features = features
# TODO - Just make this a read only property?
def _set_per_letter_annotations(self, value):
if not isinstance(value, dict):
raise TypeError(
"The per-letter-annotations should be a (restricted) dictionary."
)
# Turn this into a restricted-dictionary (and check the entries)
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
# e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
self._per_letter_annotations.update(value)
letter_annotations = property(
fget=lambda self: self._per_letter_annotations,
fset=_set_per_letter_annotations,
doc="""Dictionary of per-letter-annotation for the sequence.
For example, this can hold quality scores used in FASTQ or QUAL files.
Consider this example using Bio.SeqIO to read in an example Solexa
variant FASTQ file as a SeqRecord:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
The letter_annotations get sliced automatically if you slice the
parent SeqRecord, for example taking the last ten bases:
>>> sub_record = record[-10:]
>>> print("%s %s" % (sub_record.id, sub_record.seq))
slxa_0001_1_0001_01 ACGTNNNNNN
>>> print(sub_record.letter_annotations["solexa_quality"])
[4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Any python sequence (i.e. list, tuple or string) can be recorded in
the SeqRecord's letter_annotations dictionary as long as the length
matches that of the SeqRecord's sequence. e.g.
>>> len(sub_record.letter_annotations)
1
>>> sub_record.letter_annotations["dummy"] = "abcdefghij"
>>> len(sub_record.letter_annotations)
2
You can delete entries from the letter_annotations dictionary as usual:
>>> del sub_record.letter_annotations["solexa_quality"]
>>> sub_record.letter_annotations
{'dummy': 'abcdefghij'}
You can completely clear the dictionary easily as follows:
>>> sub_record.letter_annotations = {}
>>> sub_record.letter_annotations
{}
Note that if replacing the record's sequence with a sequence of a
different length you must first clear the letter_annotations dict.
""",
)
def _set_seq(self, value):
# TODO - Add a deprecation warning that the seq should be write only?
if self._per_letter_annotations:
if len(self) != len(value):
# TODO - Make this a warning? Silently empty the dictionary?
raise ValueError("You must empty the letter annotations first!")
else:
# Leave the existing per letter annotations unchanged:
self._seq = value
else:
self._seq = value
# Reset the (empty) letter annotations dict with new length:
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
# e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
seq = property(
fget=lambda self: self._seq,
fset=_set_seq,
doc="The sequence itself, as a Seq or MutableSeq object.",
)
def __getitem__(self, index):
"""Return a sub-sequence or an individual letter.
Slicing, e.g. my_record[5:10], returns a new SeqRecord for
that sub-sequence with some annotation preserved as follows:
* The name, id and description are kept as-is.
* Any per-letter-annotations are sliced to match the requested
sub-sequence.
* Unless a stride is used, all those features which fall fully
within the subsequence are included (with their locations
adjusted accordingly). If you want to preserve any truncated
features (e.g. GenBank/EMBL source features), you must
explicitly add them to the new SeqRecord yourself.
* With the exception of any molecule type, the annotations
dictionary and the dbxrefs list are not used for the new
SeqRecord, as in general they may not apply to the
subsequence. If you want to preserve them, you must explicitly
copy them to the new SeqRecord yourself.
Using an integer index, e.g. my_record[5] is shorthand for
extracting that letter from the sequence, my_record.seq[5].
For example, consider this short protein and its secondary
structure as encoded by the PDB (e.g. H for alpha helices),
plus a simple feature for its histidine self phosphorylation
site:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> rec = SeqRecord(Seq("MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLAT"
... "EMMSEQDGYLAESINKDIEECNAIIEQFIDYLR"),
... id="1JOY", name="EnvZ",
... description="Homodimeric domain of EnvZ from E. coli")
>>> rec.letter_annotations["secondary_structure"] = " S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT "
>>> rec.features.append(SeqFeature(FeatureLocation(20, 21),
... type = "Site"))
Now let's have a quick look at the full record,
>>> print(rec)
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR')
>>> rec.letter_annotations["secondary_structure"]
' S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT '
>>> print(rec.features[0].location)
[20:21]
Now let's take a sub sequence, here chosen as the first (fractured)
alpha helix which includes the histidine phosphorylation site:
>>> sub = rec[11:41]
>>> print(sub)
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('RTLLMAGVSHDLRTPLTRIRLATEMMSEQD')
>>> sub.letter_annotations["secondary_structure"]
'HHHHHTTTHHHHHHHHHHHHHHHHHHHHHH'
>>> print(sub.features[0].location)
[9:10]
You can also of course omit the start or end values, for
example to get the first ten letters only:
>>> print(rec[:10])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('MAAGVKQLAD')
Or for the last ten letters:
>>> print(rec[-10:])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('IIEQFIDYLR')
If you omit both, then you get a copy of the original record (although
lacking the annotations and dbxrefs):
>>> print(rec[:])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR')
Finally, indexing with a simple integer is shorthand for pulling out
that letter from the sequence directly:
>>> rec[5]
'K'
>>> rec.seq[5]
'K'
"""
if isinstance(index, numbers.Integral):
# NOTE - The sequence level annotation like the id, name, etc
# do not really apply to a single character. However, should
# we try and expose any per-letter-annotation here? If so how?
return self.seq[index]
elif isinstance(index, slice):
if self.seq is None:
raise ValueError("If the sequence is None, we cannot slice it.")
parent_length = len(self)
try:
from BioSQL.BioSeq import DBSeqRecord
biosql_available = True
except ImportError:
biosql_available = False
if biosql_available and isinstance(self, DBSeqRecord):
answer = SeqRecord(
self.seq[index],
id=self.id,
name=self.name,
description=self.description,
)
else:
answer = self.__class__(
self.seq[index],
id=self.id,
name=self.name,
description=self.description,
)
# TODO - The description may no longer apply.
# It would be safer to change it to something
# generic like "edited" or the default value.
# Don't copy the annotation dict and dbxefs list,
# they may not apply to a subsequence.
# answer.annotations = dict(self.annotations.items())
# answer.dbxrefs = self.dbxrefs[:]
# TODO - Review this in light of adding SeqRecord objects?
if "molecule_type" in self.annotations:
# This will still apply, and we need it for GenBank/EMBL etc output
answer.annotations["molecule_type"] = self.annotations["molecule_type"]
# TODO - Cope with strides by generating ambiguous locations?
start, stop, step = index.indices(parent_length)
if step == 1:
# Select relevant features, add them with shifted locations
# assert str(self.seq)[index] == str(self.seq)[start:stop]
for f in self.features:
if f.ref or f.ref_db:
# TODO - Implement this (with lots of tests)?
import warnings
warnings.warn(
"When slicing SeqRecord objects, any "
"SeqFeature referencing other sequences (e.g. "
"from segmented GenBank records) are ignored."
)
continue
if (
start <= f.location.nofuzzy_start
and f.location.nofuzzy_end <= stop
):
answer.features.append(f._shift(-start))
# Slice all the values to match the sliced sequence
# (this should also work with strides, even negative strides):
for key, value in self.letter_annotations.items():
answer._per_letter_annotations[key] = value[index]
return answer
raise ValueError("Invalid index")
def __iter__(self):
"""Iterate over the letters in the sequence.
For example, using Bio.SeqIO to read in a protein FASTA file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/loveliesbleeding.pro", "fasta")
>>> for amino in record:
... print(amino)
... if amino == "L": break
X
A
G
L
>>> print(record.seq[3])
L
This is just a shortcut for iterating over the sequence directly:
>>> for amino in record.seq:
... print(amino)
... if amino == "L": break
X
A
G
L
>>> print(record.seq[3])
L
Note that this does not facilitate iteration together with any
per-letter-annotation. However, you can achieve that using the
python zip function on the record (or its sequence) and the relevant
per-letter-annotation:
>>> from Bio import SeqIO
>>> rec = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (rec.id, rec.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(rec.letter_annotations))
['solexa_quality']
>>> for nuc, qual in zip(rec, rec.letter_annotations["solexa_quality"]):
... if qual > 35:
... print("%s %i" % (nuc, qual))
A 40
C 39
G 38
T 37
A 36
You may agree that using zip(rec.seq, ...) is more explicit than using
zip(rec, ...) as shown above.
"""
return iter(self.seq)
def __contains__(self, char):
"""Implement the 'in' keyword, searches the sequence.
e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> "GAATTC" in record
False
>>> "AAA" in record
True
This essentially acts as a proxy for using "in" on the sequence:
>>> "GAATTC" in record.seq
False
>>> "AAA" in record.seq
True
Note that you can also use Seq objects as the query,
>>> from Bio.Seq import Seq
>>> Seq("AAA") in record
True
See also the Seq object's __contains__ method.
"""
return char in self.seq
def __str__(self):
"""Return a human readable summary of the record and its annotation (string).
The python built in function str works by calling the object's __str__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF"),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein, small")
>>> print(str(record))
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF')
In this example you don't actually need to call str explicity, as the
print command does this automatically:
>>> print(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF')
Note that long sequences are shown truncated.
"""
lines = []
if self.id:
lines.append(f"ID: {self.id}")
if self.name:
lines.append(f"Name: {self.name}")
if self.description:
lines.append(f"Description: {self.description}")
if self.dbxrefs:
lines.append("Database cross-references: " + ", ".join(self.dbxrefs))
lines.append(f"Number of features: {len(self.features)}")
for a in self.annotations:
lines.append(f"/{a}={str(self.annotations[a])}")
if self.letter_annotations:
lines.append(
"Per letter annotation for: " + ", ".join(self.letter_annotations)
)
try:
bytes(self.seq)
except UndefinedSequenceError:
lines.append(f"Undefined sequence of length {len(self.seq)}")
else:
# Don't want to include the entire sequence
seq = repr(self.seq)
lines.append(seq)
return "\n".join(lines)
def __repr__(self):
"""Return a concise summary of the record for debugging (string).
The python built in function repr works by calling the object's __repr__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> rec = SeqRecord(Seq("MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKAT"
... "GEMKEQTEWHRVVLFGKLAEVASEYLRKGSQVYIEGQLRTRKWTDQ"
... "SGQDRYTTEVVVNVGGTMQMLGGRQGGGAPAGGNIGGGQPQGGWGQ"
... "PQQPQGGNQFSGGAQSRPQQSAPAAPSNEPPMDFDDDIPF"),
... id="NP_418483.1", name="b4059",
... description="ssDNA-binding protein",
... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"])
>>> print(repr(rec))
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF'), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
At the python prompt you can also use this shorthand:
>>> rec
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF'), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
Note that long sequences are shown truncated. Also note that any
annotations, letter_annotations and features are not shown (as they
would lead to a very long string).
"""
return (
f"{self.__class__.__name__}(seq={self.seq!r}, id={self.id!r},"
f" name={self.name!r}, description={self.description!r},"
f" dbxrefs={self.dbxrefs!r})"
)
def format(self, format):
r"""Return the record as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.SeqIO, which is used to turn the SeqRecord into a
string. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF"),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> record.format("fasta")
'>YP_025292.1 toxic membrane protein\nMKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF\n'
>>> print(record.format("fasta"))
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
The Python print function automatically appends a new line, meaning
in this example a blank line is shown. If you look at the string
representation you can see there is a trailing new line (shown as
slash n) which is important when writing to a file or if
concatenating multiple sequence strings together.
Note that this method will NOT work on every possible file format
supported by Bio.SeqIO (e.g. some are for multiple sequences only,
and binary formats are not supported).
"""
# See also the __format__ method
# See also the Bio.Align.Generic.Alignment class and its format()
return self.__format__(format)
def __format__(self, format_spec):
r"""Return the record as a string in the specified file format.
This method supports the Python format() function and f-strings.
The format_spec should be a lower case string supported by
Bio.SeqIO as a text output file format. Requesting a binary file
format raises a ValueError. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF"),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
...
>>> format(record, "fasta")
'>YP_025292.1 toxic membrane protein\nMKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF\n'
>>> print(f"Here is {record.id} in FASTA format:\n{record:fasta}")
Here is YP_025292.1 in FASTA format:
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
See also the SeqRecord's format() method.
"""
if not format_spec:
# Follow python convention and default to using __str__
return str(self)
from Bio import SeqIO
# Easy case, can call string-building function directly
if format_spec in SeqIO._FormatToString:
return SeqIO._FormatToString[format_spec](self)
# Harder case, make a temp handle instead
handle = StringIO()
try:
SeqIO.write(self, handle, format_spec)
except StreamModeError:
raise ValueError(
"Binary format %s cannot be used with SeqRecord format method"
% format_spec
) from None
return handle.getvalue()
def __len__(self):
"""Return the length of the sequence.
For example, using Bio.SeqIO to read in a FASTA nucleotide file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> len(record)
309
>>> len(record.seq)
309
"""
return len(self.seq)
def __lt__(self, other):
"""Define the less-than operand (not implemented)."""
raise NotImplementedError(_NO_SEQRECORD_COMPARISON)
def __le__(self, other):
"""Define the less-than-or-equal-to operand (not implemented)."""
raise NotImplementedError(_NO_SEQRECORD_COMPARISON)
def __eq__(self, other):
"""Define the equal-to operand (not implemented)."""
raise NotImplementedError(_NO_SEQRECORD_COMPARISON)
def __ne__(self, other):
"""Define the not-equal-to operand (not implemented)."""
raise NotImplementedError(_NO_SEQRECORD_COMPARISON)
def __gt__(self, other):
"""Define the greater-than operand (not implemented)."""
raise NotImplementedError(_NO_SEQRECORD_COMPARISON)
def __ge__(self, other):
"""Define the greater-than-or-equal-to operand (not implemented)."""
raise NotImplementedError(_NO_SEQRECORD_COMPARISON)
def __bool__(self):
"""Boolean value of an instance of this class (True).
This behaviour is for backwards compatibility, since until the
__len__ method was added, a SeqRecord always evaluated as True.
Note that in comparison, a Seq object will evaluate to False if it
has a zero length sequence.
WARNING: The SeqRecord may in future evaluate to False when its
sequence is of zero length (in order to better match the Seq
object behaviour)!
"""
return True
def __add__(self, other):
"""Add another sequence or string to this sequence.
The other sequence can be a SeqRecord object, a Seq object (or
similar, e.g. a MutableSeq) or a plain Python string. If you add
a plain string or a Seq (like) object, the new SeqRecord will simply
have this appended to the existing data. However, any per letter
annotation will be lost:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> new = record + "ACT"
>>> print("%s %s" % (new.id, new.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNNACT
>>> print(list(new.letter_annotations))
[]
The new record will attempt to combine the annotation, but for any
ambiguities (e.g. different names) it defaults to omitting that
annotation.
>>> from Bio import SeqIO
>>> with open("GenBank/pBAD30.gb") as handle:
... plasmid = SeqIO.read(handle, "gb")
>>> print("%s %i" % (plasmid.id, len(plasmid)))
pBAD30 4923
Now let's cut the plasmid into two pieces, and join them back up the
other way round (i.e. shift the starting point on this plasmid, have
a look at the annotated features in the original file to see why this
particular split point might make sense):
>>> left = plasmid[:3765]
>>> right = plasmid[3765:]
>>> new = right + left
>>> print("%s %i" % (new.id, len(new)))
pBAD30 4923
>>> str(new.seq) == str(right.seq + left.seq)
True
>>> len(new.features) == len(left.features) + len(right.features)
True
When we add the left and right SeqRecord objects, their annotation
is all consistent, so it is all conserved in the new SeqRecord:
>>> new.id == left.id == right.id == plasmid.id
True
>>> new.name == left.name == right.name == plasmid.name
True
>>> new.description == plasmid.description
True
>>> new.annotations == left.annotations == right.annotations
True
>>> new.letter_annotations == plasmid.letter_annotations
True
>>> new.dbxrefs == left.dbxrefs == right.dbxrefs
True
However, we should point out that when we sliced the SeqRecord,
any annotations dictionary or dbxrefs list entries were lost.
You can explicitly copy them like this:
>>> new.annotations = plasmid.annotations.copy()
>>> new.dbxrefs = plasmid.dbxrefs[:]
"""
if not isinstance(other, SeqRecord):
# Assume it is a string or a Seq.
# Note can't transfer any per-letter-annotations
return SeqRecord(
self.seq + other,
id=self.id,
name=self.name,
description=self.description,
features=self.features[:],
annotations=self.annotations.copy(),
dbxrefs=self.dbxrefs[:],
)
# Adding two SeqRecord objects... must merge annotation.
answer = SeqRecord(
self.seq + other.seq, features=self.features[:], dbxrefs=self.dbxrefs[:]
)
# Will take all the features and all the db cross refs,
length = len(self)
for f in other.features:
answer.features.append(f._shift(length))
del length
for ref in other.dbxrefs:
if ref not in answer.dbxrefs:
answer.dbxrefs.append(ref)
# Take common id/name/description/annotation
if self.id == other.id:
answer.id = self.id
if self.name == other.name:
answer.name = self.name
if self.description == other.description:
answer.description = self.description
for k, v in self.annotations.items():
if k in other.annotations and other.annotations[k] == v:
answer.annotations[k] = v
# Can append matching per-letter-annotation
for k, v in self.letter_annotations.items():
if k in other.letter_annotations:
answer.letter_annotations[k] = v + other.letter_annotations[k]
return answer
def __radd__(self, other):
"""Add another sequence or string to this sequence (from the left).
This method handles adding a Seq object (or similar, e.g. MutableSeq)
or a plain Python string (on the left) to a SeqRecord (on the right).
See the __add__ method for more details, but for example:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> new = "ACT" + record
>>> print("%s %s" % (new.id, new.seq))
slxa_0001_1_0001_01 ACTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(new.letter_annotations))
[]
"""
if isinstance(other, SeqRecord):
raise RuntimeError(
"This should have happened via the __add__ of "
"the other SeqRecord being added!"
)
# Assume it is a string or a Seq.
# Note can't transfer any per-letter-annotations
offset = len(other)
return SeqRecord(
other + self.seq,
id=self.id,
name=self.name,
description=self.description,
features=[f._shift(offset) for f in self.features],
annotations=self.annotations.copy(),
dbxrefs=self.dbxrefs[:],
)
def upper(self):
"""Return a copy of the record with an upper case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("acgtACGT"), id="Test",
... description = "Made up for this example")
>>> record.letter_annotations["phred_quality"] = [1, 2, 3, 4, 5, 6, 7, 8]
>>> print(record.upper().format("fastq"))
@Test Made up for this example
ACGTACGT
+
"#$%&'()
<BLANKLINE>
Naturally, there is a matching lower method:
>>> print(record.lower().format("fastq"))
@Test Made up for this example
acgtacgt
+
"#$%&'()
<BLANKLINE>
"""
return SeqRecord(
self.seq.upper(),
id=self.id,
name=self.name,
description=self.description,
dbxrefs=self.dbxrefs[:],
features=self.features[:],
annotations=self.annotations.copy(),
letter_annotations=self.letter_annotations.copy(),
)
def lower(self):
"""Return a copy of the record with a lower case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/aster.pro", "fasta")
>>> print(record.format("fasta"))
>gi|3298468|dbj|BAA31520.1| SAMIPF
GGHVNPAVTFGAFVGGNITLLRGIVYIIAQLLGSTVACLLLKFVTNDMAVGVFSLSAGVG
VTNALVFEIVMTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI
<BLANKLINE>
>>> print(record.lower().format("fasta"))
>gi|3298468|dbj|BAA31520.1| SAMIPF
gghvnpavtfgafvggnitllrgivyiiaqllgstvaclllkfvtndmavgvfslsagvg
vtnalvfeivmtfglvytvyataidpkkgslgtiapiaigfivgani
<BLANKLINE>
To take a more annotation rich example,
>>> from Bio import SeqIO
>>> old = SeqIO.read("EMBL/TRBG361.embl", "embl")
>>> len(old.features)
3
>>> new = old.lower()
>>> len(old.features) == len(new.features)
True
>>> old.annotations["organism"] == new.annotations["organism"]
True
>>> old.dbxrefs == new.dbxrefs
True
"""
return SeqRecord(
self.seq.lower(),
id=self.id,
name=self.name,
description=self.description,
dbxrefs=self.dbxrefs[:],
features=self.features[:],
annotations=self.annotations.copy(),
letter_annotations=self.letter_annotations.copy(),
)
def reverse_complement(
self,
id=False,
name=False,
description=False,
features=True,
annotations=False,
letter_annotations=True,
dbxrefs=False,
):
"""Return new SeqRecord with reverse complement sequence.
By default the new record does NOT preserve the sequence identifier,
name, description, general annotation or database cross-references -
these are unlikely to apply to the reversed sequence.
You can specify the returned record's id, name and description as
strings, or True to keep that of the parent, or False for a default.
You can specify the returned record's features with a list of
SeqFeature objects, or True to keep that of the parent, or False to
omit them. The default is to keep the original features (with the
strand and locations adjusted).
You can also specify both the returned record's annotations and
letter_annotations as dictionaries, True to keep that of the parent,
or False to omit them. The default is to keep the original
annotations (with the letter annotations reversed).
To show what happens to the pre-letter annotations, consider an
example Solexa variant FASTQ file with a single entry, which we'll
read in as a SeqRecord:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Now take the reverse complement, here we explicitly give a new
identifier (the old identifier with a suffix):
>>> rc_record = record.reverse_complement(id=record.id + "_rc")
>>> print("%s %s" % (rc_record.id, rc_record.seq))
slxa_0001_1_0001_01_rc NNNNNNACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT
Notice that the per-letter-annotations have also been reversed,
although this may not be appropriate for all cases.
>>> print(rc_record.letter_annotations["solexa_quality"])
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
Now for the features, we need a different example. Parsing a GenBank
file is probably the easiest way to get an nice example with features
in it...
>>> from Bio import SeqIO
>>> with open("GenBank/pBAD30.gb") as handle:
... plasmid = SeqIO.read(handle, "gb")
>>> print("%s %i" % (plasmid.id, len(plasmid)))
pBAD30 4923
>>> plasmid.seq
Seq('GCTAGCGGAGTGTATACTGGCTTACTATGTTGGCACTGATGAGGGTGTCAGTGA...ATG')
>>> len(plasmid.features)
13
Now, let's take the reverse complement of this whole plasmid:
>>> rc_plasmid = plasmid.reverse_complement(id=plasmid.id+"_rc")
>>> print("%s %i" % (rc_plasmid.id, len(rc_plasmid)))
pBAD30_rc 4923
>>> rc_plasmid.seq
Seq('CATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCA...AGC')
>>> len(rc_plasmid.features)
13
Let's compare the first CDS feature - it has gone from being the
second feature (index 1) to the second last feature (index -2), its
strand has changed, and the location switched round.
>>> print(plasmid.features[1])
type: CDS
location: [1081:1960](-)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
>>> print(rc_plasmid.features[-2])
type: CDS
location: [2963:3842](+)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
You can check this new location, based on the length of the plasmid:
>>> len(plasmid) - 1081
3842
>>> len(plasmid) - 1960
2963
Note that if the SeqFeature annotation includes any strand specific
information (e.g. base changes for a SNP), this information is not
amended, and would need correction after the reverse complement.
Note trying to reverse complement a protein SeqRecord raises an
exception:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> protein_rec = SeqRecord(Seq("MAIVMGR"), id="Test",
... annotations={"molecule_type": "protein"})
>>> protein_rec.reverse_complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
If you have RNA without any U bases, it must be annotated as RNA
otherwise it will be treated as DNA by default with A mapped to T:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> rna1 = SeqRecord(Seq("ACG"), id="Test")
>>> rna2 = SeqRecord(Seq("ACG"), id="Test", annotations={"molecule_type": "RNA"})
>>> print(rna1.reverse_complement(id="RC", description="unk").format("fasta"))
>RC unk
CGT
<BLANKLINE>
>>> print(rna2.reverse_complement(id="RC", description="RNA").format("fasta"))
>RC RNA
CGU
<BLANKLINE>
Also note you can reverse complement a SeqRecord using a MutableSeq:
>>> from Bio.Seq import MutableSeq
>>> from Bio.SeqRecord import SeqRecord
>>> rec = SeqRecord(MutableSeq("ACGT"), id="Test")
>>> rec.seq[0] = "T"
>>> print("%s %s" % (rec.id, rec.seq))
Test TCGT
>>> rc = rec.reverse_complement(id=True)
>>> print("%s %s" % (rc.id, rc.seq))
Test ACGA
"""
from Bio.Seq import Seq, MutableSeq # Lazy to avoid circular imports
if "protein" in self.annotations.get("molecule_type", ""):
raise ValueError("Proteins do not have complements!")
if "RNA" in self.annotations.get("molecule_type", ""):
if isinstance(self.seq, MutableSeq):
# Does not currently have reverse_complement_rna method:
answer = SeqRecord(Seq(self.seq).reverse_complement_rna())
else:
answer = SeqRecord(self.seq.reverse_complement_rna())
else:
# Default to DNA
if isinstance(self.seq, MutableSeq):
# Currently the MutableSeq reverse complement is in situ
answer = SeqRecord(Seq(self.seq).reverse_complement())
else:
answer = SeqRecord(self.seq.reverse_complement())
if isinstance(id, str):
answer.id = id
elif id:
answer.id = self.id
if isinstance(name, str):
answer.name = name
elif name:
answer.name = self.name
if isinstance(description, str):
answer.description = description
elif description:
answer.description = self.description
if isinstance(dbxrefs, list):
answer.dbxrefs = dbxrefs
elif dbxrefs:
# Copy the old dbxrefs
answer.dbxrefs = self.dbxrefs[:]
if isinstance(features, list):
answer.features = features
elif features:
# Copy the old features, adjusting location and string
length = len(answer)
answer.features = [f._flip(length) for f in self.features]
# The old list should have been sorted by start location,
# reversing it will leave it sorted by what is now the end position,
# so we need to resort in case of overlapping features.
# NOTE - In the common case of gene before CDS (and similar) with
# the exact same locations, this will still maintain gene before CDS
answer.features.sort(key=lambda x: x.location.start.position)
if isinstance(annotations, dict):
answer.annotations = annotations
elif annotations:
# Copy the old annotations,
answer.annotations = self.annotations.copy()
if isinstance(letter_annotations, dict):
answer.letter_annotations = letter_annotations
elif letter_annotations:
# Copy the old per letter annotations, reversing them
for key, value in self.letter_annotations.items():
answer._per_letter_annotations[key] = value[::-1]
return answer
def translate(
self,
# Seq translation arguments:
table="Standard",
stop_symbol="*",
to_stop=False,
cds=False,
gap=None,
# SeqRecord annotation arguments:
id=False,
name=False,
description=False,
features=False,
annotations=False,
letter_annotations=False,
dbxrefs=False,
):
"""Return new SeqRecord with translated sequence.
This calls the record's .seq.translate() method (which describes
the translation related arguments, like table for the genetic code),
By default the new record does NOT preserve the sequence identifier,
name, description, general annotation or database cross-references -
these are unlikely to apply to the translated sequence.
You can specify the returned record's id, name and description as
strings, or True to keep that of the parent, or False for a default.
You can specify the returned record's features with a list of
SeqFeature objects, or False (default) to omit them.
You can also specify both the returned record's annotations and
letter_annotations as dictionaries, True to keep that of the parent
(annotations only), or False (default) to omit them.
e.g. Loading a FASTA gene and translating it,
>>> from Bio import SeqIO
>>> gene_record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> print(gene_record.format("fasta"))
>gi|3176602|gb|U78617.1|LOU78617 Lathyrus odoratus phytochrome A (PHYA) gene, partial cds
CAGGCTGCGCGGTTTCTATTTATGAAGAACAAGGTCCGTATGATAGTTGATTGTCATGCA
AAACATGTGAAGGTTCTTCAAGACGAAAAACTCCCATTTGATTTGACTCTGTGCGGTTCG
ACCTTAAGAGCTCCACATAGTTGCCATTTGCAGTACATGGCTAACATGGATTCAATTGCT
TCATTGGTTATGGCAGTGGTCGTCAATGACAGCGATGAAGATGGAGATAGCCGTGACGCA
GTTCTACCACAAAAGAAAAAGAGACTTTGGGGTTTGGTAGTTTGTCATAACACTACTCCG
AGGTTTGTT
<BLANKLINE>
And now translating the record, specifying the new ID and description:
>>> protein_record = gene_record.translate(table=11,
... id="phya",
... description="translation")
>>> print(protein_record.format("fasta"))
>phya translation
QAARFLFMKNKVRMIVDCHAKHVKVLQDEKLPFDLTLCGSTLRAPHSCHLQYMANMDSIA
SLVMAVVVNDSDEDGDSRDAVLPQKKKRLWGLVVCHNTTPRFV
<BLANKLINE>
"""
if "protein" == self.annotations.get("molecule_type", ""):
raise ValueError("Proteins cannot be translated!")
answer = SeqRecord(
self.seq.translate(
table=table, stop_symbol=stop_symbol, to_stop=to_stop, cds=cds, gap=gap
)
)
if isinstance(id, str):
answer.id = id
elif id:
answer.id = self.id
if isinstance(name, str):
answer.name = name
elif name:
answer.name = self.name
if isinstance(description, str):
answer.description = description
elif description:
answer.description = self.description
if isinstance(dbxrefs, list):
answer.dbxrefs = dbxrefs
elif dbxrefs:
# Copy the old dbxrefs
answer.dbxrefs = self.dbxrefs[:]
if isinstance(features, list):
answer.features = features
elif features:
# Does not make sense to copy old features as locations wrong
raise TypeError("Unexpected features argument %r" % features)
if isinstance(annotations, dict):
answer.annotations = annotations
elif annotations:
# Copy the old annotations
answer.annotations = self.annotations.copy()
# Set/update to protein:
answer.annotations["molecule_type"] = "protein"
if isinstance(letter_annotations, dict):
answer.letter_annotations = letter_annotations
elif letter_annotations:
# Does not make sense to copy these as length now wrong
raise TypeError(
"Unexpected letter_annotations argument %r" % letter_annotations
)
return answer
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| 39.825328
| 215
| 0.608333
|
4a05ab01e040309dba78e2f4e923dda8b4636103
| 1,834
|
py
|
Python
|
bots/zulip_git_config.py
|
dehnert/zulip
|
f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5
|
[
"Apache-2.0"
] | 1
|
2017-07-27T19:49:12.000Z
|
2017-07-27T19:49:12.000Z
|
bots/zulip_git_config.py
|
dehnert/zulip
|
f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5
|
[
"Apache-2.0"
] | 8
|
2021-03-31T18:45:09.000Z
|
2022-03-11T23:25:59.000Z
|
bots/zulip_git_config.py
|
tobby2002/zulip
|
66e7c455759f9368bae16b9a604cf63f8e3524cd
|
[
"Apache-2.0"
] | null | null | null |
# Zulip, Inc's internal git plugin configuration.
# The plugin and example config are under api/integrations/
# Leaving all the instructions out of this file to avoid having to
# sync them as we update the comments.
if False: from typing import Dict, Optional, Text
ZULIP_USER = "commit-bot@zulip.com"
ZULIP_API_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# commit_notice_destination() lets you customize where commit notices
# are sent to.
#
# It takes the following arguments:
# * repo = the name of the git repository
# * branch = the name of the branch that was pushed to
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification, e.g. for ).
#
# The default code below will send every commit pushed to "master" to
# * stream "commits"
# * topic "master"
# And similarly for branch "test-post-receive" (for use when testing).
def commit_notice_destination(repo, branch, commit):
# type: (Text, Text, Text) -> Optional[Dict[str, Text]]
if branch in ["master", "prod", "test-post-receive"]:
return dict(stream = 'test' if 'test-' in branch else 'commits',
subject = u"%s" % (branch,))
# Return None for cases where you don't want a notice sent
return None
# Modify this function to change how commits are displayed; the most
# common customization is to include a link to the commit in your
# graphical repository viewer, e.g.
#
# return '!avatar(%s) [%s](https://example.com/commits/%s)\n' % (author, subject, commit_id)
def format_commit_message(author, subject, commit_id):
# type: (str, str, str) -> str
return '!avatar(%s) [%s](https://git.zulip.net/eng/zulip/commit/%s)\n' % (author, subject, commit_id)
ZULIP_API_PATH = "/home/zulip/zulip/api"
ZULIP_SITE = "https://zulip.com"
| 39.021277
| 105
| 0.707743
|
4a05ab0415c16125519b2d153c74cf75808d366b
| 1,838
|
py
|
Python
|
python/runtime/pai/submit_pai_task.py
|
awsl-dbq/sqlflow
|
6684ac4b4f26774bd10e437bc52080fdbae5ce49
|
[
"Apache-2.0"
] | null | null | null |
python/runtime/pai/submit_pai_task.py
|
awsl-dbq/sqlflow
|
6684ac4b4f26774bd10e437bc52080fdbae5ce49
|
[
"Apache-2.0"
] | null | null | null |
python/runtime/pai/submit_pai_task.py
|
awsl-dbq/sqlflow
|
6684ac4b4f26774bd10e437bc52080fdbae5ce49
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import six
from runtime.dbapi.maxcompute import MaxComputeConnection
from runtime.diagnostics import SQLFlowDiagnostic
def run_command_and_log(cmd):
p = subprocess.Popen(cmd,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False)
for line in p.stdout:
if six.PY3 and isinstance(line, bytes):
line = line.decode('utf-8')
if line is not None:
print(line)
p.communicate()
return p.returncode
def submit_pai_task(pai_cmd, datasource):
"""Submit given cmd to PAI which manipulate datasource
Args:
pai_cmd: The command to submit
datasource: The datasource this cmd will manipulate
"""
user, passwd, address, project = MaxComputeConnection.get_uri_parts(
datasource)
cmd = [
"odpscmd", "--instance-priority", "9", "-u", user, "-p", passwd,
"--project", project, "--endpoint", address, "-e", pai_cmd
]
exitcode = run_command_and_log(cmd)
if exitcode != 0:
raise SQLFlowDiagnostic("Execute odps cmd fail: cmd is %s" %
" ".join(cmd))
| 32.821429
| 74
| 0.650163
|
4a05ab8530e27e17b018e1ecabb59b37439cf336
| 2,247
|
py
|
Python
|
alipay/aop/api/domain/AlipaySecurityRiskAuthenticationInitializeModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipaySecurityRiskAuthenticationInitializeModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipaySecurityRiskAuthenticationInitializeModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AuthenticationInfo import AuthenticationInfo
class AlipaySecurityRiskAuthenticationInitializeModel(object):
def __init__(self):
self._authentication_info = None
self._biz_info = None
self._env_info = None
@property
def authentication_info(self):
return self._authentication_info
@authentication_info.setter
def authentication_info(self, value):
if isinstance(value, AuthenticationInfo):
self._authentication_info = value
else:
self._authentication_info = AuthenticationInfo.from_alipay_dict(value)
@property
def biz_info(self):
return self._biz_info
@biz_info.setter
def biz_info(self, value):
self._biz_info = value
@property
def env_info(self):
return self._env_info
@env_info.setter
def env_info(self, value):
self._env_info = value
def to_alipay_dict(self):
params = dict()
if self.authentication_info:
if hasattr(self.authentication_info, 'to_alipay_dict'):
params['authentication_info'] = self.authentication_info.to_alipay_dict()
else:
params['authentication_info'] = self.authentication_info
if self.biz_info:
if hasattr(self.biz_info, 'to_alipay_dict'):
params['biz_info'] = self.biz_info.to_alipay_dict()
else:
params['biz_info'] = self.biz_info
if self.env_info:
if hasattr(self.env_info, 'to_alipay_dict'):
params['env_info'] = self.env_info.to_alipay_dict()
else:
params['env_info'] = self.env_info
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskAuthenticationInitializeModel()
if 'authentication_info' in d:
o.authentication_info = d['authentication_info']
if 'biz_info' in d:
o.biz_info = d['biz_info']
if 'env_info' in d:
o.env_info = d['env_info']
return o
| 29.96
| 89
| 0.628393
|
4a05aba3534c3a49f898c10db3e7681f8fb28389
| 14,676
|
py
|
Python
|
models/train_chalearn_main.py
|
Lalit-garg/Gesture-Detection
|
fa405c57fa8497fbeb7b462968dd2534c32412cf
|
[
"MIT"
] | null | null | null |
models/train_chalearn_main.py
|
Lalit-garg/Gesture-Detection
|
fa405c57fa8497fbeb7b462968dd2534c32412cf
|
[
"MIT"
] | null | null | null |
models/train_chalearn_main.py
|
Lalit-garg/Gesture-Detection
|
fa405c57fa8497fbeb7b462968dd2534c32412cf
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import sys
import os
import numpy as np
import c3D_main as conv3d1
import c3D2 as conv3d2
import c3D3 as conv3d3
import time
import pickle
import random
import argparse
from tensorflow.python.client import device_lib
from AdamWOptimizer import create_optimizer
def append_frames(cut_frame_array, required_num_frames):
appended_list_of_frames = list(cut_frame_array)
num_curr_frames = cut_frame_array.shape[0]
num_more_frames = required_num_frames - num_curr_frames
for i in range(num_more_frames):
appended_list_of_frames.append(cut_frame_array[i % num_curr_frames])
return np.array(appended_list_of_frames)
def train_neural_network(x_inpuT,
y_inpuT,
data_path,
val_data_path,
save_loss_path,
save_model_path,
batch_size,
val_batch_size,
learning_rate,
weight_decay,
epochs,
which_model,
num_val_videos,
random_clips,
win_size,
ignore_factor):
with tf.name_scope("cross_entropy"):
prediction = 0
if which_model == 1:
prediction = conv3d1.inference(x_inpuT)
elif which_model == 2:
prediction = conv3d2.inference(x_inpuT)
elif which_model == 3:
prediction = conv3d3.inference(x_inpuT)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y_inpuT))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# optimizer = 0
if weight_decay is not None:
print("weight decay applied.")
optimizer = create_optimizer(cost, learning_rate, weight_decay)
else:
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.name_scope("accuracy"):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_inpuT, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
print("session starts!")
sess.run(tf.global_variables_initializer())
start_time = time.time()
epoch_loss_list = []
val_loss_list = []
pkl_files_list = os.listdir(data_path)
val_pkl_files_list = os.listdir(val_data_path)
for epoch in range(epochs):
print("Epoch {} started!".format(epoch + 1))
epoch_start_time = time.time()
epoch_loss = 0
train_acc = 0
num_batch_completed = 0
window_size = win_size
num_clips_per_video = random_clips
random.seed(7)
print("Random seed fixed for training.")
# pkl_files_list = pkl_files_list[ : 64]
num_videos = len(pkl_files_list) - (len(pkl_files_list) % batch_size)
num_blocks = int(num_videos / batch_size)
block_x = np.zeros((num_clips_per_video, batch_size, depth, height, width, 3))
block_y = np.zeros((num_clips_per_video, batch_size, num_classes))
for block_index in range(num_blocks):
for index_in_batch, pkl_file in enumerate(
pkl_files_list[block_index * batch_size: (block_index + 1) * batch_size]):
with open(os.path.join(data_path, pkl_file), 'rb') as f:
frames_and_label = pickle.load(f)
cut_frame_array = frames_and_label[0]
label = frames_and_label[1]
num_frames = cut_frame_array.shape[0]
required_num_frames = int(window_size * ignore_factor)
if num_frames <= required_num_frames:
print(num_frames, required_num_frames)
cut_frame_array = append_frames(cut_frame_array, required_num_frames)
num_frames = cut_frame_array.shape[0]
print(num_frames)
for batch_index in range(num_clips_per_video):
start_frame = random.randint(0, num_frames - window_size)
end_frame = start_frame + window_size
block_x[batch_index, index_in_batch, :, :, :, :] = np.array(
cut_frame_array[start_frame: end_frame, :, :, :])
basic_line = [0] * num_classes
basic_line[int(label) - 1] = 1
basic_label = basic_line
block_y[batch_index, index_in_batch, :] = np.array(basic_label)
for batch_index in range(num_clips_per_video):
batch_start_time = time.time()
mini_batch_x = block_x[batch_index, :, :, :, :, :]
mini_batch_x = mini_batch_x / 255.0
mini_batch_y = block_y[batch_index, :, :]
perm = np.random.permutation(batch_size)
mini_batch_x = mini_batch_x[perm]
mini_batch_y = mini_batch_y[perm]
_optimizer, _cost, _prediction, _accuracy = sess.run([optimizer, cost, prediction, accuracy],
feed_dict={x_inpuT: mini_batch_x,
y_inpuT: mini_batch_y})
epoch_loss += _cost
train_acc += _accuracy
num_batch_completed += 1
batch_end_time = time.time()
log1 = "\rEpoch: {}, " \
"batches completed: {}, " \
"time taken: {:.5f}, " \
"loss: {:.6f}, " \
"accuracy: {:.4f} \n". \
format(
epoch + 1,
num_batch_completed,
batch_end_time - batch_start_time,
epoch_loss / (batch_size * num_batch_completed),
_accuracy)
print(log1)
sys.stdout.flush()
del block_x, block_y
# validation loss
val_loss = 0
val_acc = 0
val_num_batch_completed = 0
num_clips_per_video = 1
val_num_videos = num_val_videos
val_num_blocks = int(val_num_videos / val_batch_size)
val_block_x = np.zeros((num_clips_per_video, val_batch_size, depth, height, width, 3))
val_block_y = np.zeros((num_clips_per_video, val_batch_size, num_classes))
random.seed(23)
print("Random seed fixed for validation.")
for block_index in range(val_num_blocks):
for index_in_batch, val_pkl_file in enumerate(
val_pkl_files_list[block_index * val_batch_size: (block_index + 1) * val_batch_size]):
with open(os.path.join(val_data_path, val_pkl_file), 'rb') as f:
frames_and_label = pickle.load(f)
cut_frame_array = frames_and_label[0]
label = frames_and_label[1]
num_frames = cut_frame_array.shape[0]
required_num_frames = int(window_size * ignore_factor)
if num_frames <= window_size:
cut_frame_array = append_frames(cut_frame_array, required_num_frames)
num_frames = cut_frame_array.shape[0]
for batch_index in range(num_clips_per_video):
start_frame = random.randint(0, num_frames - window_size)
end_frame = start_frame + window_size
val_block_x[batch_index, index_in_batch, :, :, :, :] = np.array(
cut_frame_array[start_frame: end_frame, :, :, :])
basic_line = [0] * num_classes
basic_line[int(label) - 1] = 1
basic_label = basic_line
val_block_y[batch_index, index_in_batch, :] = np.array(basic_label)
for batch_index in range(num_clips_per_video):
val_batch_x = val_block_x[batch_index, :, :, :, :, :]
val_batch_x = val_batch_x / 255.0
val_batch_y = val_block_y[batch_index, :, :]
val_cost, val_batch_accuracy = sess.run([cost, accuracy],
feed_dict={x_inpuT: val_batch_x, y_inpuT: val_batch_y})
val_acc += val_batch_accuracy
val_loss += val_cost
val_num_batch_completed += 1
del val_block_x, val_block_y
epoch_loss = epoch_loss / (batch_size * num_batch_completed)
train_acc = train_acc / num_batch_completed
val_loss /= (val_batch_size * val_num_batch_completed)
val_acc = val_acc / val_num_batch_completed
epoch_end_time = time.time()
log3 = "Epoch {} done; " \
"Time Taken: {:.4f}s; " \
"Train_loss: {:.6f}; " \
"Val_loss: {:.6f}; " \
"Train_acc: {:.4f}; " \
"Val_acc: {:.4f}; " \
"Train batches: {}; " \
"Val batches: {};\n". \
format(epoch + 1, epoch_end_time - epoch_start_time, epoch_loss, val_loss, train_acc, val_acc,
num_batch_completed, val_num_batch_completed)
print(log3)
if save_loss_path is not None:
file1 = open(save_loss_path, "a")
file1.write(log3)
file1.close()
epoch_loss_list.append(epoch_loss)
val_loss_list.append(val_loss)
if save_model_path is not None:
saver.save(sess, save_model_path)
end_time = time.time()
print('Time elapse: ', str(end_time - start_time))
print(epoch_loss_list)
if save_loss_path is not None:
file1 = open(save_loss_path, "a")
file1.write("Train Loss List: {} \n".format(str(epoch_loss_list)))
file1.write("Val Loss List: {} \n".format(str(val_loss_list)))
file1.close()
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('-bs', action='store', dest='batch_size', type=int)
parser.add_argument('-vbs', action='store', dest='val_batch_size', type=int)
parser.add_argument('-lr', action='store', dest='learning_rate', type=float)
parser.add_argument('-wd', action='store', dest='weight_decay', type=float, const=None)
parser.add_argument('-e', action='store', dest='epochs', type=int)
parser.add_argument('-nvv', action='store', dest='num_val_videos', type=int)
parser.add_argument('-rc', action='store', dest='random_clips', type=int)
parser.add_argument('-ws', action='store', dest='win_size', type=int)
parser.add_argument('-slp', action='store', dest='save_loss_path', const=None)
parser.add_argument('-smp', action='store', dest='save_model_path', const=None)
parser.add_argument('-mn', action='store', dest='model_num', type=int)
parser.add_argument('-vd', action='store', dest='visible_devices')
parser.add_argument('-nd', action='store', dest='num_device', type=int)
parser.add_argument('-if', action='store', dest='ign_fact', type=float, const=None)
results = parser.parse_args()
arg_batch_size = results.batch_size
arg_val_batch_size = results.val_batch_size
arg_lr = results.learning_rate
arg_wd = results.weight_decay
arg_epochs = results.epochs
arg_num_val_videos = results.num_val_videos
arg_random_clips = results.random_clips
arg_win_size = results.win_size
arg_save_loss_path = results.save_loss_path
arg_save_model_path = results.save_model_path
arg_model_num = results.model_num
arg_visible_devices = results.visible_devices
arg_num_device = results.num_device
arg_ign_fact = results.ign_fact
data_path = "/home/axp798/axp798gallinahome/data/chalearn/train_64/"
val_data_path = "/home/axp798/axp798gallinahome/data/chalearn/valid_64/"
ar_save_loss_path = None
if arg_save_loss_path is not None:
ar_save_loss_path = "/home/axp798/axp798gallinahome/Gesture-Detection/models/loss_chalearn/{}".format(
arg_save_loss_path)
ar_save_model_path = None
if arg_save_model_path is not None:
path = '/home/axp798/axp798gallinahome/Gesture-Detection/models/{}/'.format(arg_save_model_path)
if not os.path.exists(path):
os.mkdir(path)
ar_save_model_path = path + "model"
if ar_save_loss_path is not None:
file1 = open(ar_save_loss_path, "w")
file1.write("Params: {} \n".format(results))
file1.write("Losses: \n")
file1.close()
depth = arg_win_size
height = 64
width = 64
num_classes = 249
os.environ['CUDA_VISIBLE_DEVICES'] = "{}".format(arg_visible_devices)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
print(device_lib.list_local_devices())
choose_device = "/device:GPU:{}".format(arg_num_device)
with tf.device(choose_device):
x_inpuT = tf.placeholder(tf.float32, shape=[arg_batch_size, depth, height, width, 3])
y_inpuT = tf.placeholder(tf.float32, shape=[arg_batch_size, num_classes])
train_neural_network(x_inpuT, y_inpuT, data_path, val_data_path,
save_loss_path=ar_save_loss_path,
save_model_path=ar_save_model_path,
batch_size=arg_batch_size,
learning_rate=arg_lr,
weight_decay=arg_wd,
epochs=arg_epochs,
val_batch_size=arg_val_batch_size,
which_model=arg_model_num,
num_val_videos=arg_num_val_videos,
random_clips=arg_random_clips,
win_size=arg_win_size,
ignore_factor=arg_ign_fact)
| 39.989101
| 115
| 0.569365
|
4a05abbad07ed4e7f72391db2df0abe080b62a05
| 1,375
|
py
|
Python
|
thehood/urls.py
|
Alexanderoke/Neighbourhood
|
75bcffbc72288e2d2b58cfca2c6b415e47f0ec1a
|
[
"MIT"
] | null | null | null |
thehood/urls.py
|
Alexanderoke/Neighbourhood
|
75bcffbc72288e2d2b58cfca2c6b415e47f0ec1a
|
[
"MIT"
] | null | null | null |
thehood/urls.py
|
Alexanderoke/Neighbourhood
|
75bcffbc72288e2d2b58cfca2c6b415e47f0ec1a
|
[
"MIT"
] | null | null | null |
"""thehood URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path,include
from django.conf import settings
from user import views as user_views
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register, name='register'),
path('login/', auth_views.LoginView.as_view(template_name='user/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='user/logout.html'), name='logout'),
path('profile/', user_views.profile, name='profile'),
path('',include('hood.urls') ),
]
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 41.666667
| 100
| 0.728
|
4a05ac647a686d4078e96c1aec42c19824501721
| 5,377
|
py
|
Python
|
tests/test_utils.py
|
LemontechSA/glide
|
a84ec1b9c8a982430308e8b0a04f84f26200765c
|
[
"MIT"
] | 19
|
2019-09-09T18:51:26.000Z
|
2021-11-23T21:15:37.000Z
|
tests/test_utils.py
|
LemontechSA/glide
|
a84ec1b9c8a982430308e8b0a04f84f26200765c
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
LemontechSA/glide
|
a84ec1b9c8a982430308e8b0a04f84f26200765c
|
[
"MIT"
] | 4
|
2019-12-01T22:40:04.000Z
|
2022-03-23T18:06:52.000Z
|
import configparser
import logging
import os
from shutil import copyfile
import sqlite3
try:
import pymysql
except ImportError:
pymysql = None
try:
from redis import Redis
from redis.exceptions import ConnectionError
except:
Redis = None
import sqlalchemy as sa
from tlbx import st, rmfile
from glide import *
from glide.utils import dbg, info, warn, error
logging.getLogger("glide").setLevel(logging.INFO)
TEST_DATA_NAME = "dma_zip"
assert "GLIDE_CONFIG_FILE" in os.environ, (
"Please specify the location of a glide config file using the environment "
"variable GLIDE_CONFIG_FILE"
)
config = configparser.ConfigParser()
config.read(os.environ["GLIDE_CONFIG_FILE"])
test_config = config["TEST"]
def redis_running():
print("Checking for running Redis server")
conn = Redis(socket_connect_timeout=1)
try:
conn.ping()
return True
except ConnectionError:
return False
def lower(s):
return s.lower() if type(s) == str else s
def df_lower(df):
df = df.applymap(lower)
return df
def row_lower(row):
for k, v in row.items():
row[k] = lower(v)
return row
def lower_rows(data):
for row in data:
row_lower(row)
return data
def get_filenames(rootdir, extension):
infile = "%s/%s.%s" % (rootdir, TEST_DATA_NAME, extension)
outfile = "%s/%s.%s" % (test_config["OutputDirectory"], TEST_DATA_NAME, extension)
rmfile(outfile, ignore_missing=True)
return infile, outfile
def get_db_filenames(rootdir):
table = TEST_DATA_NAME
db_name = "%s.db" % table
in_db_file = rootdir + "/" + db_name
out_db_file = "%s/%s" % (test_config["OutputDirectory"], db_name)
return table, in_db_file, out_db_file
def copy_sqlite_test_db():
rootdir = get_current_dir()
table, in_db_file, out_db_file = get_db_filenames(rootdir)
copyfile(in_db_file, out_db_file)
def clear_sqlite_table_if_exists(conn, table):
try:
conn.execute("delete from %s" % table)
conn.commit()
except sqlite3.OperationalError:
pass
def get_sqlalchemy_mysql_engine():
host = test_config["MySQLHost"]
port = int(test_config["MySQLPort"])
user = test_config["MySQLUser"]
password = test_config.get("MySQLPassword", None)
schema = test_config["MySQLTestSchema"]
if host in ["localhost", "127.0.0.1"] and not password:
conn_str = "mysql+pymysql://%(user)s@%(host)s/%(schema)s" % locals()
else:
conn_str = (
"mysql+pymysql://%(user)s:%(password)s@%(host)s:%(port)s/%(schema)s"
% locals()
)
engine = sa.create_engine(conn_str)
return engine
def get_sqlalchemy_conn():
engine = get_sqlalchemy_mysql_engine()
return engine.connect()
def get_pymysql_conn():
assert pymysql, "PyMySQL package is not installed"
host = test_config["MySQLHost"]
port = int(test_config["MySQLPort"])
user = test_config["MySQLUser"]
password = test_config.get("MySQLPassword", None)
schema = test_config["MySQLTestSchema"]
conn = pymysql.connect(
host=host,
port=port,
db=schema,
user=user,
passwd=password,
cursorclass=pymysql.cursors.DictCursor,
)
return conn
def get_sqlite_in_conn():
rootdir = get_current_dir()
_, in_db_file, _ = get_db_filenames(rootdir)
conn = sqlite3.connect(in_db_file)
conn.row_factory = sqlite3.Row
return conn
def get_sqlite_out_conn():
rootdir = get_current_dir()
_, _, out_db_file = get_db_filenames(rootdir)
conn = sqlite3.connect(out_db_file)
conn.row_factory = sqlite3.Row
return conn
def sqlite_glider(rootdir, nodes, reset_output=False):
"""Note: this should not be called once you have already connected to
the sqlite output DB"""
table, in_db_file, out_db_file = get_db_filenames(rootdir)
if reset_output:
rmfile(out_db_file, ignore_missing=True)
copyfile(in_db_file, out_db_file)
glider = Glider(nodes)
return glider, table
def sqlalchemy_setup(rootdir, conn, truncate=False, sa_objects=False):
in_table, out_table = db_tables()
if truncate:
try:
conn.execute("truncate %s" % out_table)
except Exception as e:
print("Error during truncation: %s" % str(e))
if sa_objects:
meta = sa.MetaData()
meta.bind = conn.engine
meta.reflect()
in_table = meta.tables[in_table.split(".")[-1]]
out_table = meta.tables[out_table.split(".")[-1]]
return in_table, out_table
def db_tables():
in_table = "%s.%s" % (test_config["MySQLTestSchema"], TEST_DATA_NAME)
out_table = "%s_tmp" % in_table
return in_table, out_table
def dbapi_setup(rootdir, conn, truncate=False):
in_table, out_table = db_tables()
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute("use %s" % test_config["MySQLTestSchema"])
if truncate:
cursor.execute("drop table if exists %s" % out_table)
cursor.execute("create table %s like %s" % (out_table, in_table))
return in_table, out_table, cursor
def file_glider(rootdir, extension, nodes):
infile, outfile = get_filenames(rootdir, extension)
glider = Glider(nodes)
return glider, infile, outfile
def get_current_dir():
return os.path.dirname(os.path.abspath(__file__))
| 26.357843
| 86
| 0.675098
|
4a05ac857f23cb032431dca22e9f9dc234c173f4
| 370
|
py
|
Python
|
pype9/utils/mpi.py
|
tclose/Pype9
|
23f96c0885fd9df12d9d11ff800f816520e4b17a
|
[
"MIT"
] | null | null | null |
pype9/utils/mpi.py
|
tclose/Pype9
|
23f96c0885fd9df12d9d11ff800f816520e4b17a
|
[
"MIT"
] | null | null | null |
pype9/utils/mpi.py
|
tclose/Pype9
|
23f96c0885fd9df12d9d11ff800f816520e4b17a
|
[
"MIT"
] | 1
|
2021-04-08T12:46:21.000Z
|
2021-04-08T12:46:21.000Z
|
class DummyMPICom(object):
rank = 0
size = 1
def barrier(self):
pass
try:
from mpi4py import MPI # @UnusedImport @IgnorePep8 This is imported before NEURON to avoid a bug in NEURON
except ImportError:
mpi_comm = DummyMPICom()
else:
mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0
def is_mpi_master():
return (mpi_comm.rank == MPI_ROOT)
| 17.619048
| 111
| 0.678378
|
4a05acd8f5e80560ae9e9fc73108bca43bc7a6af
| 3,386
|
py
|
Python
|
webapp/starter/starter/settings.py
|
djshen-ponddy/docker-django-mysql
|
53e053ac9252512e3c4a878fd6f06ee316bfa37d
|
[
"MIT"
] | null | null | null |
webapp/starter/starter/settings.py
|
djshen-ponddy/docker-django-mysql
|
53e053ac9252512e3c4a878fd6f06ee316bfa37d
|
[
"MIT"
] | null | null | null |
webapp/starter/starter/settings.py
|
djshen-ponddy/docker-django-mysql
|
53e053ac9252512e3c4a878fd6f06ee316bfa37d
|
[
"MIT"
] | null | null | null |
"""
Django settings for starter project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'starter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'starter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('MYSQL_DATABASE'),
'USER': os.environ.get('MYSQL_USER'),
'PASSWORD': os.environ.get('MYSQL_PASSWORD'),
'HOST': os.environ.get('MYSQL_HOST'),
'PORT': os.environ.get('MYSQL_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/srv/static-files'
| 26.661417
| 91
| 0.696102
|
4a05ad5ece5f980999f58dff4d1b584c168f83f2
| 1,159
|
py
|
Python
|
tests/terraform/graph/checks_infra/attribute_solvers/not_exists_solver/test_solver.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1
|
2022-02-20T21:20:39.000Z
|
2022-02-20T21:20:39.000Z
|
tests/terraform/graph/checks_infra/attribute_solvers/not_exists_solver/test_solver.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 3
|
2022-03-07T20:37:31.000Z
|
2022-03-21T20:20:14.000Z
|
tests/terraform/graph/checks_infra/attribute_solvers/not_exists_solver/test_solver.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
import os
from tests.terraform.graph.checks_infra.test_base import TestBaseSolver
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
class TestNotExistsSolver(TestBaseSolver):
def setUp(self):
self.checks_dir = TEST_DIRNAME
super(TestNotExistsSolver, self).setUp()
def test_nested_attribute_doesnt_exists_versioning(self):
root_folder = '../../../resources/s3_bucket'
check_id = "VersioningEnabledExists"
should_pass = []
should_fail = ['aws_s3_bucket.destination']
expected_results = {check_id: {"should_pass": should_pass, "should_fail": should_fail}}
self.run_test(root_folder=root_folder, expected_results=expected_results, check_id=check_id)
def test_nested_attribute_doesnt_exists_tag(self):
root_folder = '../../../resources/s3_bucket'
check_id = "TagEnvironmentExists"
should_pass = ['aws_s3_bucket.destination']
should_fail = []
expected_results = {check_id: {"should_pass": should_pass, "should_fail": should_fail}}
self.run_test(root_folder=root_folder, expected_results=expected_results, check_id=check_id)
| 38.633333
| 100
| 0.721311
|
4a05af024e51c7a312a711a2d7ccac42ed4363fd
| 19,574
|
py
|
Python
|
tensor2tensor/models/research/autoencoders.py
|
xueeinstein/tensor2tensor
|
b42e7bae72044916d465b7e298569b2823fe9bc0
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/models/research/autoencoders.py
|
xueeinstein/tensor2tensor
|
b42e7bae72044916d465b7e298569b2823fe9bc0
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/models/research/autoencoders.py
|
xueeinstein/tensor2tensor
|
b42e7bae72044916d465b7e298569b2823fe9bc0
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autoencoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import discretization
from tensor2tensor.models import basic
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class AutoencoderAutoregressive(basic.BasicAutoencoder):
"""Autoencoder with an autoregressive part."""
def body(self, features):
hparams = self.hparams
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
# Run the basic autoencoder part first.
basic_result, losses = super(AutoencoderAutoregressive, self).body(features)
shape = common_layers.shape_list(basic_result)
basic1d = tf.reshape(basic_result, [shape[0], -1, shape[3]])
# During autoregressive inference, don't resample.
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
if hasattr(hparams, "sampled_basic1d_tensor"):
basic1d = hparams.sampled_basic1d_tensor
else:
hparams.sampled_basic1d_tensor = basic1d
# Prepare inputs for autoregressive modes.
if common_layers.shape_list(features["targets"])[1] == 1:
# This happens on the first step of predicitions.
assert hparams.mode == tf.estimator.ModeKeys.PREDICT
features["targets"] = tf.zeros_like(basic_result)
targets_dropout = common_layers.mix(
features["targets"], tf.zeros_like(basic_result),
hparams.bottleneck_warmup_steps, is_training,
max_prob=1.0 - hparams.autoregressive_dropout, broadcast_last=True)
targets1d = tf.reshape(targets_dropout, [shape[0], -1, shape[3]])
targets_shifted = common_layers.shift_right_3d(targets1d)
concat1d = tf.concat([basic1d, targets_shifted], axis=-1)
# The forget_base hparam sets purely-autoregressive mode, no autoencoder.
if hparams.autoregressive_forget_base:
concat1d = tf.reshape(features["targets"], [shape[0], -1, shape[3]])
concat1d = common_layers.shift_right_3d(concat1d)
# The autoregressive part depends on the mode.
if hparams.autoregressive_mode == "none":
assert not hparams.autoregressive_forget_base
return basic_result, losses
if hparams.autoregressive_mode == "conv3":
res = common_layers.conv1d(concat1d, shape[3], 3, padding="LEFT",
activation=common_layers.belu,
name="autoregressive_conv3")
return tf.reshape(res, shape), losses
if hparams.autoregressive_mode == "conv5":
res = common_layers.conv1d(concat1d, shape[3], 5, padding="LEFT",
activation=common_layers.belu,
name="autoregressive_conv5")
return tf.reshape(res, shape), losses
if hparams.autoregressive_mode == "sru":
res = common_layers.conv1d(concat1d, shape[3], 3, padding="LEFT",
activation=common_layers.belu,
name="autoregressive_sru_conv3")
res = common_layers.sru(res)
return tf.reshape(res, shape), losses
raise ValueError("Unsupported autoregressive mode: %s"
% hparams.autoregressive_mode)
def infer(self, features=None, *args, **kwargs):
"""Produce predictions from the model by sampling."""
# Inputs and features preparation needed to handle edge cases.
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
# Sample first.
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
features["targets"] = tf.zeros(
[self.hparams.batch_size, 1, 1, num_channels],
dtype=tf.int32)
logits, _ = self(features) # pylint: disable=not-callable
samples = common_layers.sample_with_temperature(
logits, 0.0)
shape = common_layers.shape_list(samples)
# Sample again if requested for the autoregressive part.
extra_samples = 0
self.hparams.autoregressive_dropout = 0.2
for i in range(extra_samples):
if i == extra_samples - 2:
self.hparams.autoregressive_dropout -= 0.1
self.hparams.sampling_temp /= 2
if i == extra_samples - 1:
self.hparams.autoregressive_dropout -= 0.1
self.hparams.sampling_temp = 0.0
features["targets"] = samples
old_samples1d = tf.reshape(samples, [shape[0], -1, shape[3]])
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
logits, _ = self(features) # pylint: disable=not-callable
samples = common_layers.sample_with_temperature(
logits, self.hparams.sampling_temp)
samples1d = tf.reshape(samples, [shape[0], -1, shape[3]])
samples1d = tf.concat([old_samples1d[:, :i, :], samples1d[:, i:, :]],
axis=1)
samples = tf.reshape(samples1d, shape)
# Restore inputs to not confuse Estimator in edge cases.
if inputs_old is not None:
features["inputs"] = inputs_old
# Return samples.
return samples
@registry.register_model
class AutoencoderResidual(AutoencoderAutoregressive):
"""Residual autoencoder."""
def encoder(self, x):
with tf.variable_scope("encoder"):
hparams = self.hparams
kernel, strides = self._get_kernel_and_strides()
residual_kernel = (hparams.residual_kernel_height,
hparams.residual_kernel_width)
residual_kernel1d = (hparams.residual_kernel_height, 1)
residual_kernel = residual_kernel1d if self.is1d else residual_kernel
residual_conv = tf.layers.conv2d
if hparams.residual_use_separable_conv:
residual_conv = tf.layers.separable_conv2d
# Down-convolutions.
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
x = self.make_even_size(x)
x = tf.nn.dropout(x, 1.0 - hparams.dropout)
filters = hparams.hidden_size * 2**(i + 1)
filters = min(filters, hparams.max_hidden_size)
x = tf.layers.conv2d(
x, filters, kernel, strides=strides,
padding="SAME", activation=common_layers.belu, name="strided")
y = x
for r in range(hparams.num_residual_layers):
residual_filters = filters
if r < hparams.num_residual_layers - 1:
residual_filters = int(
filters * hparams.residual_filter_multiplier)
y = residual_conv(
y, residual_filters, residual_kernel,
padding="SAME", activation=common_layers.belu,
name="residual_%d" % r)
x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
x = common_layers.layer_norm(x)
return x
def decoder(self, x):
with tf.variable_scope("decoder"):
hparams = self.hparams
kernel, strides = self._get_kernel_and_strides()
residual_kernel = (hparams.residual_kernel_height,
hparams.residual_kernel_width)
residual_kernel1d = (hparams.residual_kernel_height, 1)
residual_kernel = residual_kernel1d if self.is1d else residual_kernel
residual_conv = tf.layers.conv2d
if hparams.residual_use_separable_conv:
residual_conv = tf.layers.separable_conv2d
# Up-convolutions.
for i in range(hparams.num_hidden_layers):
x = tf.nn.dropout(x, 1.0 - hparams.dropout)
j = hparams.num_hidden_layers - i - 1
filters = hparams.hidden_size * 2**j
filters = min(filters, hparams.max_hidden_size)
with tf.variable_scope("layer_%d" % i):
j = hparams.num_hidden_layers - i - 1
filters = hparams.hidden_size * 2**j
x = tf.layers.conv2d_transpose(
x, filters, kernel, strides=strides,
padding="SAME", activation=common_layers.belu, name="strided")
y = x
for r in range(hparams.num_residual_layers):
residual_filters = filters
if r < hparams.num_residual_layers - 1:
residual_filters = int(
filters * hparams.residual_filter_multiplier)
y = residual_conv(
y, residual_filters, residual_kernel,
padding="SAME", activation=common_layers.belu,
name="residual_%d" % r)
x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
x = common_layers.layer_norm(x)
return x
@registry.register_model
class AutoencoderBasicDiscrete(AutoencoderAutoregressive):
"""Discrete autoencoder."""
def bottleneck(self, x):
hparams = self.hparams
x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_size, name="bottleneck"))
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0
d *= noise
x = common_layers.mix(d, x, hparams.discretize_warmup_steps,
hparams.mode == tf.estimator.ModeKeys.TRAIN)
return x
def sample(self):
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_size]
rand = tf.random_uniform(size)
return 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
@registry.register_model
class AutoencoderResidualDiscrete(AutoencoderResidual):
"""Discrete residual autoencoder."""
def bottleneck(self, x, bottleneck_size=None):
if bottleneck_size is not None:
old_bottleneck_size = self.hparams.bottleneck_size
self.hparams.bottleneck_size = bottleneck_size
res = discretization.parametrized_bottleneck(x, self.hparams)
if bottleneck_size is not None:
self.hparams.bottleneck_size = old_bottleneck_size
return res
def unbottleneck(self, x, res_size):
return discretization.parametrized_unbottleneck(x, res_size, self.hparams)
def bottleneck_loss(self, b):
part = tf.random_uniform(common_layers.shape_list(b))
selection = tf.to_float(tf.less(part, tf.random_uniform([])))
selection_size = tf.reduce_sum(selection)
part_avg = tf.abs(tf.reduce_sum(b * selection)) / (selection_size + 1)
return part_avg
def sample(self):
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_size]
rand = tf.random_uniform(size)
res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
# If you want to set some first bits to a fixed value, do this:
# fixed = tf.zeros_like(rand) - 1.0
# nbits = 3
# res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1)
return res
@registry.register_model
class AutoencoderOrderedDiscrete(AutoencoderResidualDiscrete):
"""Ordered discrete autoencoder."""
def bottleneck(self, x):
hparams = self.hparams
noise = hparams.bottleneck_noise
hparams.bottleneck_noise = 0.0 # We'll add noise below.
x = discretization.parametrized_bottleneck(x, hparams)
hparams.bottleneck_noise = noise
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
# We want a number p such that p^bottleneck_size = 1 - noise.
# So log(p) * bottleneck_size = log(noise)
log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_size)
# Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_size.
noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
# Having the no-noise mask, we can make noise just uniformly at random.
ordered_noise = tf.random_uniform(tf.shape(x))
# We want our noise to be 1s at the start and random {-1, 1} bits later.
ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
# Now we flip the bits of x on the noisy positions (ordered and normal).
x *= 2.0 * ordered_noise - 1
return x
@registry.register_model
class AutoencoderStacked(AutoencoderResidualDiscrete):
"""A stacked autoencoder."""
def stack(self, b, size, bottleneck_size, name):
with tf.variable_scope(name + "_stack"):
unb = self.unbottleneck(b, size)
enc = self.encoder(unb)
return self.bottleneck(enc, bottleneck_size=bottleneck_size)
def unstack(self, b, size, bottleneck_size, name):
with tf.variable_scope(name + "_unstack"):
unb = self.unbottleneck(b, size)
dec = self.decoder(unb)
pred = tf.layers.dense(dec, bottleneck_size, name="pred")
pred_shape = common_layers.shape_list(pred)
pred1 = tf.reshape(pred, pred_shape[:-1] + [-1, 2])
x, y = tf.split(pred1, 2, axis=-1)
x = tf.squeeze(x, axis=[-1])
y = tf.squeeze(y, axis=[-1])
gt = 2.0 * tf.to_float(tf.less(x, y)) - 1.0
gtc = tf.tanh(y - x)
gt += gtc - tf.stop_gradient(gtc)
return gt, pred1
def stack_loss(self, b, b_pred, name):
with tf.variable_scope(name):
labels_discrete = tf.to_int32((b + 1.0) * 0.5)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_discrete, logits=b_pred)
return tf.reduce_mean(loss)
def full_stack(self, b, x_size, bottleneck_size, losses, is_training, i):
stack1_b = self.stack(b, x_size, bottleneck_size, "step%d" % i)
if i > 1:
stack1_b = self.full_stack(stack1_b, 2 * x_size, 2 * bottleneck_size,
losses, is_training, i - 1)
b1, b_pred = self.unstack(stack1_b, x_size, bottleneck_size, "step%d" % i)
losses["bottleneck%d_loss" % i] = self.bottleneck_loss(stack1_b)
losses["stack%d_loss" % i] = self.stack_loss(b, b_pred, "step%d" % i)
b_shape = common_layers.shape_list(b)
if is_training:
b1 = tf.cond(tf.less(tf.random_uniform([]), 0.5),
lambda: b, lambda: b1)
return tf.reshape(b1, b_shape)
def body(self, features):
hparams = self.hparams
num_stacks = hparams.num_hidden_layers
hparams.num_hidden_layers = 1
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
x = features["targets"]
shape = common_layers.shape_list(x)
is1d = shape[2] == 1
self.is1d = is1d
x, _ = common_layers.pad_to_same_length(
x, x, final_length_divisible_by=2**num_stacks, axis=1)
if not is1d:
x, _ = common_layers.pad_to_same_length(
x, x, final_length_divisible_by=2**num_stacks, axis=2)
# Run encoder.
x = self.encoder(x)
x_size = common_layers.shape_list(x)[-1]
# Bottleneck (mix during early training, not too important but stable).
b = self.bottleneck(x)
b_loss = self.bottleneck_loss(b)
losses = {"bottleneck0_loss": b_loss}
b = self.full_stack(b, 2 * x_size, 2 * hparams.bottleneck_size,
losses, is_training, num_stacks - 1)
b = self.unbottleneck(b, x_size)
b = common_layers.mix(b, x, hparams.bottleneck_warmup_steps, is_training)
# With probability bottleneck_max_prob use the bottleneck, otherwise x.
if hparams.bottleneck_max_prob < 1.0:
x = tf.where(tf.less(tf.random_uniform([]),
hparams.bottleneck_max_prob), b, x)
else:
x = b
else:
b = self.sample()
res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
res_size = min(res_size, hparams.max_hidden_size)
x = self.unbottleneck(b, res_size)
# Run decoder.
x = self.decoder(x)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
return x
# Cut to the right size and mix before returning.
res = x[:, :shape[1], :shape[2], :]
res = common_layers.mix(res, features["targets"],
hparams.bottleneck_warmup_steps // 2, is_training)
hparams.num_hidden_layers = num_stacks
return res, losses
@registry.register_hparams
def autoencoder_autoregressive():
"""Autoregressive autoencoder model."""
hparams = basic.basic_autoencoder()
hparams.add_hparam("autoregressive_forget_base", False)
hparams.add_hparam("autoregressive_mode", "conv3")
hparams.add_hparam("autoregressive_dropout", 0.4)
return hparams
@registry.register_hparams
def autoencoder_residual():
"""Residual autoencoder model."""
hparams = autoencoder_autoregressive()
hparams.optimizer = "Adam"
hparams.learning_rate_constant = 0.0001
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup"
hparams.dropout = 0.05
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.max_hidden_size = 1024
hparams.add_hparam("num_residual_layers", 2)
hparams.add_hparam("residual_kernel_height", 3)
hparams.add_hparam("residual_kernel_width", 3)
hparams.add_hparam("residual_filter_multiplier", 2.0)
hparams.add_hparam("residual_dropout", 0.2)
hparams.add_hparam("residual_use_separable_conv", int(True))
return hparams
@registry.register_hparams
def autoencoder_basic_discrete():
"""Basic autoencoder model."""
hparams = autoencoder_autoregressive()
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.bottleneck_size = 4096
hparams.bottleneck_noise = 0.1
hparams.bottleneck_warmup_steps = 3000
hparams.add_hparam("discretize_warmup_steps", 5000)
return hparams
@registry.register_hparams
def autoencoder_residual_discrete():
"""Residual discrete autoencoder model."""
hparams = autoencoder_residual()
hparams.bottleneck_size = 4096
hparams.bottleneck_noise = 0.1
hparams.bottleneck_warmup_steps = 3000
hparams.add_hparam("discretize_warmup_steps", 5000)
hparams.add_hparam("bottleneck_kind", "tanh_discrete")
hparams.add_hparam("isemhash_noise_dev", 0.5)
hparams.add_hparam("isemhash_mix_prob", 0.5)
hparams.add_hparam("isemhash_filter_size_multiplier", 2.0)
return hparams
@registry.register_hparams
def autoencoder_residual_discrete_big():
"""Residual discrete autoencoder model, big version."""
hparams = autoencoder_residual_discrete()
hparams.hidden_size = 128
hparams.max_hidden_size = 4096
hparams.bottleneck_noise = 0.1
hparams.dropout = 0.1
hparams.residual_dropout = 0.4
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete():
"""Basic autoencoder model."""
hparams = autoencoder_residual_discrete()
return hparams
@registry.register_hparams
def autoencoder_stacked():
"""Stacked autoencoder model."""
hparams = autoencoder_residual_discrete()
hparams.bottleneck_size = 128
return hparams
| 40.358763
| 80
| 0.678247
|
4a05af656d59b46a2b528386759e8b3d5b9cc9d1
| 834
|
py
|
Python
|
muadib/celery.py
|
lordoftheflies/muadib-notebook
|
0bba86d775e25800a92e490e71f4e2aba7a09cd3
|
[
"Apache-2.0"
] | 1
|
2018-05-16T06:31:13.000Z
|
2018-05-16T06:31:13.000Z
|
muadib/celery.py
|
lordoftheflies/muadib-notebook
|
0bba86d775e25800a92e490e71f4e2aba7a09cd3
|
[
"Apache-2.0"
] | 4
|
2018-03-27T00:08:11.000Z
|
2018-04-21T20:30:53.000Z
|
muadib/celery.py
|
lordoftheflies/muadib-notebook
|
0bba86d775e25800a92e490e71f4e2aba7a09cd3
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'muadib.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', 'DevelopmentConfiguration')
import configurations
configurations.setup()
app = Celery('muadib')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 32.076923
| 73
| 0.78777
|
4a05af711e2883d526b852e812be5565d7a98163
| 3,114
|
py
|
Python
|
integration_tests/test_suites/celery-k8s-integration-test-suite/test_queued_run_coordinator.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
integration_tests/test_suites/celery-k8s-integration-test-suite/test_queued_run_coordinator.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
integration_tests/test_suites/celery-k8s-integration-test-suite/test_queued_run_coordinator.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
import os
from dagster.core.test_utils import create_run_for_test
from dagster.utils import merge_dicts
from dagster.utils.yaml_utils import merge_yamls
from dagster_k8s.test import wait_for_job_and_get_raw_logs
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import (
ReOriginatedExternalPipelineForTest,
get_test_project_environments_path,
get_test_project_workspace_and_external_pipeline,
)
from marks import mark_daemon
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
def get_celery_engine_config(dagster_docker_image, job_namespace):
return {
"execution": {
"celery-k8s": {
"config": {
"job_image": dagster_docker_image,
"job_namespace": job_namespace,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
}
}
},
}
def assert_events_in_order(logs, expected_events):
logged_events = [log.dagster_event.event_type_value for log in logs if log.is_dagster_event]
filtered_logged_events = [event for event in logged_events if event in expected_events]
assert filtered_logged_events == expected_events
@mark_daemon
def test_execute_queeud_run_on_celery_k8s( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance_for_daemon, helm_namespace_for_daemon
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace=helm_namespace_for_daemon,
),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(
dagster_instance_for_daemon, pipeline_name
) as (workspace, external_pipeline):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance_for_daemon,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance_for_daemon.submit_run(
run.run_id,
workspace,
)
wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace_for_daemon
)
logs = dagster_instance_for_daemon.all_logs(run.run_id)
assert_events_in_order(
logs,
["PIPELINE_ENQUEUED", "PIPELINE_DEQUEUED", "PIPELINE_STARTING", "PIPELINE_SUCCESS"],
)
| 35.793103
| 96
| 0.692678
|
4a05af7a54cf79742ced720b2ddf8a0277209b66
| 403
|
py
|
Python
|
searchlist/searchlist/custom_storages.py
|
kurtrm/homeless_to_health
|
c60f8f54c1bc4716cd34dbd718b4930c71c781a3
|
[
"MIT"
] | 5
|
2017-07-27T03:11:30.000Z
|
2017-08-10T20:02:02.000Z
|
searchlist/searchlist/custom_storages.py
|
kurtrm/homeless_to_health
|
c60f8f54c1bc4716cd34dbd718b4930c71c781a3
|
[
"MIT"
] | 26
|
2017-07-28T17:44:53.000Z
|
2017-09-20T22:52:26.000Z
|
searchlist/searchlist/custom_storages.py
|
kurtrm/homeless_to_hearth
|
c60f8f54c1bc4716cd34dbd718b4930c71c781a3
|
[
"MIT"
] | 1
|
2018-03-23T23:18:25.000Z
|
2018-03-23T23:18:25.000Z
|
"""Custom storage classes for static and media files."""
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
class StaticStorage(S3BotoStorage):
"""Custom storage class for static files."""
location = settings.STATICFILES_LOCATION
class MediaStorage(S3BotoStorage):
"""Custom storage class for media files."""
location = settings.MEDIAFILES_LOCATION
| 26.866667
| 56
| 0.769231
|
4a05b042ad53c5f80517ff22d82ae86a6c3a953d
| 1,155
|
py
|
Python
|
trinity/_utils/connect.py
|
dendisuhubdy/trinity
|
001664781259c7dd0779a0ef6f822451b608ded4
|
[
"MIT"
] | 14
|
2020-08-24T18:23:31.000Z
|
2021-11-04T14:11:04.000Z
|
trinity/_utils/connect.py
|
dendisuhubdy/trinity
|
001664781259c7dd0779a0ef6f822451b608ded4
|
[
"MIT"
] | 19
|
2020-08-25T15:57:05.000Z
|
2021-07-07T00:49:45.000Z
|
trinity/_utils/connect.py
|
dendisuhubdy/trinity
|
001664781259c7dd0779a0ef6f822451b608ded4
|
[
"MIT"
] | 7
|
2020-08-24T22:53:02.000Z
|
2022-03-28T18:51:48.000Z
|
from typing import Iterator
import contextlib
from eth.abc import ChainAPI
from lahja import EndpointAPI
from trinity.boot_info import BootInfo
from trinity.db.eth1.header import AsyncHeaderDB
from trinity.chains.light_eventbus import (
EventBusLightPeerChain,
)
from trinity.config import (
Eth1AppConfig,
)
from trinity.constants import (
SYNC_LIGHT,
)
from trinity.db.manager import DBClient
@contextlib.contextmanager
def get_eth1_chain_with_remote_db(boot_info: BootInfo,
event_bus: EndpointAPI) -> Iterator[ChainAPI]:
app_config = boot_info.trinity_config.get_app_config(Eth1AppConfig)
chain_config = app_config.get_chain_config()
chain: ChainAPI
base_db = DBClient.connect(boot_info.trinity_config.database_ipc_path)
with base_db:
if boot_info.args.sync_mode == SYNC_LIGHT:
header_db = AsyncHeaderDB(base_db)
chain = chain_config.light_chain_class(
header_db,
peer_chain=EventBusLightPeerChain(event_bus)
)
else:
chain = chain_config.full_chain_class(base_db)
yield chain
| 28.875
| 80
| 0.714286
|
4a05b0450ccaab66e53328b3ece0906adf27dabc
| 2,673
|
py
|
Python
|
terraform_validator/custom_rules/IamManagedPolicyNotActionRule.py
|
rubelw/terraform-validator
|
a9d0335a532acdb4070e5537155b03b34915b73e
|
[
"MIT"
] | 7
|
2018-11-18T00:29:55.000Z
|
2020-05-18T13:23:37.000Z
|
terraform_validator/custom_rules/IamManagedPolicyNotActionRule.py
|
rubelw/terraform-validator
|
a9d0335a532acdb4070e5537155b03b34915b73e
|
[
"MIT"
] | 1
|
2021-05-26T06:58:46.000Z
|
2021-05-26T06:58:46.000Z
|
terraform_validator/custom_rules/IamManagedPolicyNotActionRule.py
|
rubelw/terraform-validator
|
a9d0335a532acdb4070e5537155b03b34915b73e
|
[
"MIT"
] | 2
|
2019-10-23T15:22:52.000Z
|
2020-06-22T07:00:45.000Z
|
from __future__ import absolute_import, division, print_function
import sys
import inspect
from builtins import (str)
from terraform_validator.custom_rules.BaseRule import BaseRule
def lineno():
"""Returns the current line number in our program."""
return str(' - IamManagedPolicyNotActionRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))
class IamManagedPolicyNotActionRule(BaseRule):
def __init__(self, cfn_model=None, debug=None):
"""
Initialize IamManagedPolicyNotActionRule
:param cfn_model:
"""
BaseRule.__init__(self, cfn_model, debug=debug)
def rule_text(self):
"""
Returns rule text
:return:
"""
return 'IAM managed policy should not allow Allow+NotAction'
def rule_type(self):
"""
Returns rule type
:return:
"""
self.type= 'VIOLATION::WARNING'
return 'VIOLATION::WARNING'
def rule_id(self):
"""
Returns rule id
:return:
"""
if self.debug:
print('rule_id'+lineno())
self.id = 'W17'
return 'W17'
def audit_impl(self):
"""
Audit
:return: violations
"""
if self.debug:
print('IamManagedPolicyNotActionRule - audit_impl'+lineno())
violating_policies = []
resources = self.cfn_model.resources_by_type('AWS::IAM::ManagedPolicy')
if len(resources)>0:
for resource in resources:
if self.debug:
print(str(dir(resource))+lineno())
print('resource: '+str(resource)+lineno())
if hasattr(resource,'policy_document'):
if resource.policy_document:
if self.debug:
print('has policy document '+lineno())
if resource.policy_document.allows_not_action():
if self.debug:
print('has allows not not')
print('resource id:'+str(resource.logical_resource_id)+lineno())
violating_policies.append(str(resource.logical_resource_id))
else:
if self.debug:
print('does not have policy document'+lineno())
else:
if self.debug:
print('no resources'+lineno())
if self.debug:
print('returning violating policies to'+lineno())
return violating_policies
| 29.373626
| 151
| 0.541339
|
4a05b08a3d31ae45be79e15d36ee20f6ba866a2b
| 3,518
|
py
|
Python
|
critiquebrainz/db/test/spam_report_test.py
|
akshaaatt/critiquebrainz
|
39184152af5f23adaa991c4b43ecbbb6f086f809
|
[
"Apache-2.0"
] | 70
|
2015-03-10T00:08:21.000Z
|
2022-02-20T05:36:53.000Z
|
critiquebrainz/db/test/spam_report_test.py
|
akshaaatt/critiquebrainz
|
39184152af5f23adaa991c4b43ecbbb6f086f809
|
[
"Apache-2.0"
] | 279
|
2015-12-08T14:10:45.000Z
|
2022-03-29T13:54:23.000Z
|
critiquebrainz/db/test/spam_report_test.py
|
akshaaatt/critiquebrainz
|
39184152af5f23adaa991c4b43ecbbb6f086f809
|
[
"Apache-2.0"
] | 95
|
2015-03-12T21:39:42.000Z
|
2022-03-10T00:51:04.000Z
|
import critiquebrainz.db.license as db_license
import critiquebrainz.db.review as db_review
import critiquebrainz.db.spam_report as db_spam_report
import critiquebrainz.db.users as db_users
from critiquebrainz.data.testing import DataTestCase
from critiquebrainz.db.user import User
class SpamReportTestCase(DataTestCase):
def setUp(self):
super(SpamReportTestCase, self).setUp()
author = User(db_users.get_or_create(0, '0', new_user_data={
"display_name": "Author",
}))
self.user1 = User(db_users.get_or_create(1, '1', new_user_data={
"display_name": "Tester #1",
}))
self.user2 = User(db_users.get_or_create(2, '2', new_user_data={
"display_name": "Tester #2",
}))
license = db_license.create(
id='Test',
full_name='Test License',
)
self.review = db_review.create(
entity_id="e7aad618-fa86-3983-9e77-405e21796eca",
entity_type="release_group",
text="Testing!",
user_id=author.id,
is_draft=False,
license_id=license["id"],
)
self.revision_id = self.review["last_revision"]["id"]
self.report = db_spam_report.create(self.revision_id, self.user1.id, "To test is this report")
self.report_time = self.report["reported_at"]
def test_get(self):
report = db_spam_report.get(self.user1.id, self.revision_id)
report["user_id"] = str(report["user_id"])
self.assertDictEqual(report, {
"user_id": self.user1.id,
"revision_id": self.revision_id,
"reported_at": self.report_time,
"reason": "To test is this report",
"is_archived": False,
})
def test_archive(self):
db_spam_report.archive(self.user1.id, self.revision_id)
report = db_spam_report.get(self.user1.id, self.revision_id)
self.assertEqual(report['is_archived'], True)
def test_create(self):
report = db_spam_report.create(self.revision_id, self.user2.id, "This is a report")
self.assertEqual(report["reason"], "This is a report")
def test_list_reports(self):
db_spam_report.create(self.revision_id, self.user2.id, "This is a report")
db_review.update(
review_id=self.review["id"],
drafted=self.review["is_draft"],
text="Updated Review",
)
self.review = db_review.get_by_id(self.review["id"])
db_spam_report.create(self.review["last_revision"]["id"], self.user1.id,
"This is again a report on the updated review")
# two reports on the old revision and one on the new revision.
reports, count = db_spam_report.list_reports(review_id=self.review["id"]) # pylint: disable=unused-variable
self.assertEqual(count, 3)
# get all reports by a user.
reports, count = db_spam_report.list_reports(user_id=self.user2.id)
self.assertEqual(count, 1)
# archive and include all archived reports.
# there must be two reports including the archived one.
db_spam_report.archive(self.user1.id, self.review["last_revision"]["id"])
reports, count = db_spam_report.list_reports(inc_archived=True)
self.assertEqual(count, 3)
# there must be one reports excluding the archived one.
reports, count = db_spam_report.list_reports(inc_archived=False)
self.assertEqual(count, 2)
| 42.902439
| 116
| 0.639852
|
4a05b0a051a8034562d125e5a2727fd7fcc407f8
| 241
|
py
|
Python
|
manager/downloads/downloads/application/use_cases/__init__.py
|
G4brym/download-manager
|
8795d09d8f63511c980d3f10e6b2b762d41bff0c
|
[
"MIT"
] | 3
|
2021-04-28T14:29:06.000Z
|
2022-03-27T21:02:32.000Z
|
manager/downloads/downloads/application/use_cases/__init__.py
|
G4brym/docker-download-manager
|
8795d09d8f63511c980d3f10e6b2b762d41bff0c
|
[
"MIT"
] | 5
|
2021-08-04T21:37:00.000Z
|
2021-08-04T21:37:02.000Z
|
manager/downloads/downloads/application/use_cases/__init__.py
|
G4brym/docker-download-manager
|
8795d09d8f63511c980d3f10e6b2b762d41bff0c
|
[
"MIT"
] | 1
|
2021-09-06T15:45:37.000Z
|
2021-09-06T15:45:37.000Z
|
__all__ = ["FileDownload", "FileRetry", "FileRetryAll", "TaskDownloadFile"]
from .file_download import FileDownload
from .file_retry import FileRetry
from .file_retry_all import FileRetryAll
from .task_download_file import TaskDownloadFile
| 34.428571
| 75
| 0.829876
|
4a05b0d0f62645c83e9016c17e4880add62d7376
| 5,203
|
py
|
Python
|
Solutions/mailroom/mailroom_fp/mailroom_mfr.py
|
UWPCE-PythonCert/InstructorResources
|
13b1cfa4cad6a8c3491f8a602c8afda5400c9ac7
|
[
"Unlicense"
] | null | null | null |
Solutions/mailroom/mailroom_fp/mailroom_mfr.py
|
UWPCE-PythonCert/InstructorResources
|
13b1cfa4cad6a8c3491f8a602c8afda5400c9ac7
|
[
"Unlicense"
] | 1
|
2020-12-20T17:07:17.000Z
|
2020-12-20T17:07:17.000Z
|
Solutions/mailroom/mailroom_fp/mailroom_mfr.py
|
UWPCE-PythonCert/InstructorResources
|
13b1cfa4cad6a8c3491f8a602c8afda5400c9ac7
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# http://uwpce-pythoncert.github.io/IntroToPython/exercises/mailroom-fp.html
import glob
import pickle
def print_usage():
print(
'''
mailroom>>
a: Add a donor and donation
c: run a Challenge fundraiser
h: Help (this message)
l: List donors
r: print Report
s: Show database
t: Thank donors
q: quit
'''
)
def load_donordb():
try:
with open('mailroom.pickle', 'rb') as db_handle:
donor_db = pickle.load(db_handle)
except IOError:
donor_db = {
"Aristotle": [384.0, 322.0],
"Kant": [1724.0, 1804.0, 1785.0],
"Locke": [1632.0],
"Russell": [1872.0, 1970.0, 1950.0],
"Dennett": [1942.0],
}
return donor_db
def save_donordb(db):
try:
with open('mailroom.pickle', 'wb') as db_handle:
pickle.dump(db, db_handle)
except IOError:
raise "Error: Unable to save donor database."
def add_donation(db, donor, contribution):
# Validate user input as numeric
try:
float(contribution)
except ValueError as my_except:
print("mailroom>> Input validation error: {}".format(my_except))
return None
# Catch embezzlement
try:
assert float(contribution) >= 0.0
except AssertionError as my_except:
print("mailroom>> Donations must be greater than $0.00: {}".format(my_except))
return None
if donor in db.keys():
db[donor].append(float(contribution))
else:
db[donor] = [float(contribution)]
return db
def multiplier_factory(factor):
'''
A closure to create the multiplier function
Args:
factor (int): the multiplier to close into the returned function
Returns:
a function which will multiply its arguments by factor
'''
# Catch embezzlement
try:
assert int(factor) > 0
except AssertionError as my_except:
print("mailroom>> Challenge multipliers must be > 0: {}".format(my_except))
return None
def func(value):
return int(factor) * value
return func
def challenge(db, factor):
""" Run a fund raising challenge
Args:
db (dict): the donor database
factor (int): challenge multiplier
Returns:
dict: a new, updaated donor database
"""
challenge_multiplier = multiplier_factory(factor)
challenge_maps = dict()
new_db = dict()
for doner in db:
challenge_maps[doner] = map(challenge_multiplier, db[doner])
for name, new_donations in challenge_maps.items():
new_db[name] = [donation for donation in new_donations]
return new_db
def print_db(db):
for name, donations in db.items():
print(name, donations)
def tally_report(values):
donation_total = sum(values)
num_gifts = len(values)
average_gift = donation_total / num_gifts
return donation_total, num_gifts, average_gift
def print_report(db):
# Print a header
print("Donor Name | Total Given | Num Gifts | Average Gift")
# Print each row
for names, values in db.items():
donation_total, num_gifts, average_gift = tally_report(values)
print("{} | {:11,.2f} | {} | ${:11,.2f}".format(
names.ljust(25),
donation_total,
str(num_gifts).rjust(9),
average_gift,
))
def thank_donor(donor, amount):
with open('mailroom-thankyou-{}.txt'.format(donor), 'w') as f:
f.write("Thank you, {}, for your generous donations totaling ${}.\nSincerly, The Mailroom Team\n".format(donor, amount))
def thank_donors(db):
for name, values in db.items():
thank_donor(name, sum(values))
def list_donor_files():
print(glob.glob("mailroom-thankyou-*.txt"))
def main():
'''
Handle user interaction in the main event loop
'''
donor_db = load_donordb()
while True:
user_input = input("mailroom>> ")
if user_input == 'quit' or user_input == 'q':
save_donordb(donor_db)
break
elif user_input == 'add' or user_input == 'a':
donor = input("mailroom: What is the name of the donor? ")
contribution_amount = input("mailroom>> How much is {} contributing? ".format(donor))
add_donation(donor_db, donor, contribution_amount)
elif user_input == 'challenge' or user_input == 'c':
factor = input("mailroom: Challenge! By what factor will donations be multiplied? ")
donor_db = challenge(donor_db, factor)
elif user_input == 'help' or user_input == 'h':
print_usage()
elif user_input == 'list' or user_input == 'l':
print(sorted(set(donor_db.keys())))
elif user_input == 'show' or user_input == 's':
print_db(donor_db)
elif user_input == 'report' or user_input == 'r':
print_report(donor_db)
elif user_input == 'thanks' or user_input == 't':
thank_donors(donor_db)
list_donor_files()
if __name__ == '__main__':
print_usage()
main()
| 25.380488
| 128
| 0.600231
|
4a05b19a64641a5c115279e7a2ba7f65247f5b39
| 792
|
py
|
Python
|
var/spack/repos/builtin/packages/py-entrypoints/package.py
|
dwstreetNNL/spack
|
8f929707147c49606d00386a10161529dad4ec56
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-entrypoints/package.py
|
dwstreetNNL/spack
|
8f929707147c49606d00386a10161529dad4ec56
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-entrypoints/package.py
|
dwstreetNNL/spack
|
8f929707147c49606d00386a10161529dad4ec56
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyEntrypoints(PythonPackage):
"""Discover and load entry points from installed packages."""
homepage = "https://pypi.python.org/pypi/entrypoints"
url = "https://pypi.io/packages/source/e/entrypoints/entrypoints-0.2.3.tar.gz"
version('0.3', sha256='c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451')
version('0.2.3', sha256='d2d587dde06f99545fb13a383d2cd336a8ff1f359c5839ce3a64c917d10c029f')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-configparser', when='^python@:2.8', type=('build', 'run'))
| 39.6
| 95
| 0.737374
|
4a05b32b5716e7e2c5b93746c1394979281e5c7e
| 36,007
|
py
|
Python
|
invest_natcap/carbon/carbon_biophysical.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
invest_natcap/carbon/carbon_biophysical.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
invest_natcap/carbon/carbon_biophysical.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
"""InVEST Carbon biophysical module at the "uri" level"""
import sys
import os
import math
import logging
import shutil
from osgeo import gdal
from osgeo import ogr
from scipy.stats import norm
import numpy
import pygeoprocessing.geoprocessing
from invest_natcap.carbon import carbon_utils
logging.basicConfig(format='%(asctime)s %(name)-18s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
LOGGER = logging.getLogger('invest_natcap.carbon.biophysical')
NUM_MONTE_CARLO_RUNS = 10000
class MapCarbonPoolError(Exception):
"""A custom error for catching lulc codes from a raster that do not
match the carbon pools data file"""
pass
def execute(args):
return execute_30(**args)
def execute_30(**args):
"""This function invokes the carbon model given URI inputs of files.
It will do filehandling and open/create appropriate objects to
pass to the core carbon biophysical processing function. It may write
log, warning, or error messages to stdout.
args - a python dictionary with at the following possible entries:
args['workspace_dir'] - a uri to the directory that will write output
and other temporary files during calculation. (required)
args['suffix'] - a string to append to any output file name (optional)
args['lulc_cur_uri'] - is a uri to a GDAL raster dataset (required)
args['carbon_pools_uri'] - is a uri to a CSV or DBF dataset mapping carbon
storage density to the lulc classifications specified in the
lulc rasters. (required if 'do_uncertainty' is false)
args['carbon_pools_uncertain_uri'] - as above, but has probability distribution
data for each lulc type rather than point estimates.
(required if 'do_uncertainty' is true)
args['do_uncertainty'] - a boolean that indicates whether we should do
uncertainty analysis. Defaults to False if not present.
args['confidence_threshold'] - a number between 0 and 100 that indicates
the minimum threshold for which we should highlight regions in the output
raster. (required if 'do_uncertainty' is True)
args['lulc_fut_uri'] - is a uri to a GDAL raster dataset (optional
if calculating sequestration)
args['lulc_cur_year'] - An integer representing the year of lulc_cur
used in HWP calculation (required if args contains a
'hwp_cur_shape_uri', or 'hwp_fut_shape_uri' key)
args['lulc_fut_year'] - An integer representing the year of lulc_fut
used in HWP calculation (required if args contains a
'hwp_fut_shape_uri' key)
args['lulc_redd_uri'] - is a uri to a GDAL raster dataset that represents
land cover data for the REDD policy scenario (optional).
args['hwp_cur_shape_uri'] - Current shapefile uri for harvested wood
calculation (optional, include if calculating current lulc hwp)
args['hwp_fut_shape_uri'] - Future shapefile uri for harvested wood
calculation (optional, include if calculating future lulc hwp)
returns a dict with the names of all output files."""
if '_process_pool' not in args:
args['_process_pool'] = None
else:
LOGGER.debug('Found a process pool: %s', args['_process_pool'])
file_suffix = carbon_utils.make_suffix(args)
dirs = carbon_utils.setup_dirs(args['workspace_dir'],
'output', 'intermediate')
def outfile_uri(prefix, scenario_type, dirtype='output', filetype='tif'):
'''Creates the appropriate output file URI.
prefix: 'tot_C', 'sequest', or similar
scenario type: 'cur', 'fut', or 'redd'
dirtype: 'output' or 'intermediate'
'''
if scenario_type == 'fut' and args.get('lulc_redd_uri'):
# We're doing REDD analysis, so call the future scenario 'base',
# since it's the 'baseline' scenario.
scenario_type = 'base'
filename = '%s_%s%s.%s' % (prefix, scenario_type, file_suffix, filetype)
return os.path.join(dirs[dirtype], filename)
pools = _compute_carbon_pools(args)
do_uncertainty = args['do_uncertainty']
# Map total carbon for each scenario.
outputs = {}
for lulc_uri in ['lulc_cur_uri', 'lulc_fut_uri', 'lulc_redd_uri']:
if lulc_uri in args:
scenario_type = lulc_uri.split('_')[-2] #get the 'cur', 'fut', or 'redd'
LOGGER.info('Mapping carbon for %s scenario.', scenario_type)
nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(args[lulc_uri])
nodata_out = -5.0
def map_carbon_pool(lulc):
if lulc == nodata:
return nodata_out
return pools[lulc]['total']
dataset_out_uri = outfile_uri('tot_C', scenario_type)
outputs['tot_C_%s' % scenario_type] = dataset_out_uri
pixel_size_out = pygeoprocessing.geoprocessing.get_cell_size_from_uri(args[lulc_uri])
# Create a raster that models total carbon storage per pixel.
try:
pygeoprocessing.geoprocessing.vectorize_datasets(
[args[lulc_uri]], map_carbon_pool, dataset_out_uri,
gdal.GDT_Float32, nodata_out, pixel_size_out,
"intersection", dataset_to_align_index=0,
process_pool=args['_process_pool'])
except KeyError:
raise MapCarbonPoolError('There was a KeyError when mapping '
'land cover ids to carbon pools. This can happen when '
'there is a land cover id that does not exist in the '
'carbon pool data file.')
if do_uncertainty:
def map_carbon_pool_variance(lulc):
if lulc == nodata:
return nodata_out
return pools[lulc]['variance']
variance_out_uri = outfile_uri(
'variance_C', scenario_type, dirtype='intermediate')
outputs['variance_C_%s' % scenario_type] = variance_out_uri
# Create a raster that models variance in carbon storage per pixel.
pygeoprocessing.geoprocessing.vectorize_datasets(
[args[lulc_uri]], map_carbon_pool_variance, variance_out_uri,
gdal.GDT_Float32, nodata_out, pixel_size_out,
"intersection", dataset_to_align_index=0,
process_pool=args['_process_pool'])
#Add calculate the hwp storage, if it is passed as an input argument
hwp_key = 'hwp_%s_shape_uri' % scenario_type
if hwp_key in args:
LOGGER.info('Computing HWP storage.')
c_hwp_uri = outfile_uri('c_hwp', scenario_type, dirtype='intermediate')
bio_hwp_uri = outfile_uri('bio_hwp', scenario_type, dirtype='intermediate')
vol_hwp_uri = outfile_uri('vol_hwp', scenario_type, dirtype='intermediate')
if scenario_type == 'cur':
_calculate_hwp_storage_cur(
args[hwp_key], args[lulc_uri], c_hwp_uri, bio_hwp_uri,
vol_hwp_uri, args['lulc_%s_year' % scenario_type])
temp_c_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
shutil.copyfile(outputs['tot_C_cur'], temp_c_cur_uri)
hwp_cur_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(c_hwp_uri)
def add_op(tmp_c_cur, hwp_cur):
"""add two rasters and in nodata in the second, return
the first"""
return numpy.where(
hwp_cur == hwp_cur_nodata, tmp_c_cur,
tmp_c_cur + hwp_cur)
pygeoprocessing.geoprocessing.vectorize_datasets(
[temp_c_cur_uri, c_hwp_uri], add_op,
outputs['tot_C_cur'], gdal.GDT_Float32, nodata_out,
pixel_size_out, "intersection",
dataset_to_align_index=0, vectorize_op=False)
elif scenario_type == 'fut':
hwp_shapes = {}
if 'hwp_cur_shape_uri' in args:
hwp_shapes['cur'] = args['hwp_cur_shape_uri']
if 'hwp_fut_shape_uri' in args:
hwp_shapes['fut'] = args['hwp_fut_shape_uri']
_calculate_hwp_storage_fut(
hwp_shapes, args[lulc_uri], c_hwp_uri, bio_hwp_uri,
vol_hwp_uri, args['lulc_cur_year'],
args['lulc_fut_year'], args['_process_pool'])
temp_c_fut_uri = pygeoprocessing.geoprocessing.temporary_filename()
shutil.copyfile(outputs['tot_C_fut'], temp_c_fut_uri)
hwp_fut_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(c_hwp_uri)
def add_op(tmp_c_fut, hwp_fut):
return numpy.where(
hwp_fut == hwp_fut_nodata, tmp_c_fut,
tmp_c_fut + hwp_fut)
pygeoprocessing.geoprocessing.vectorize_datasets(
[temp_c_fut_uri, c_hwp_uri], add_op,
outputs['tot_C_fut'], gdal.GDT_Float32, nodata_out,
pixel_size_out, "intersection",
dataset_to_align_index=0,
vectorize_op=False)
for fut_type in ['fut', 'redd']:
fut_type_lulc_uri = 'lulc_%s_uri' % fut_type
if 'lulc_cur_uri' in args and fut_type_lulc_uri in args:
LOGGER.info('Computing sequestration for %s scenario', fut_type)
def sub_op(c_cur, c_fut):
fut_nodata = c_fut == nodata_out
cur_nodata = c_cur == nodata_out
cur_clean = numpy.where(cur_nodata, 0, c_cur)
fut_clean = numpy.where(fut_nodata, 0, c_fut)
seq = fut_clean - cur_clean
return numpy.where(fut_nodata & cur_nodata, nodata_out, seq)
pixel_size_out = pygeoprocessing.geoprocessing.get_cell_size_from_uri(args['lulc_cur_uri'])
outputs['sequest_%s' % fut_type] = outfile_uri('sequest', fut_type)
pygeoprocessing.geoprocessing.vectorize_datasets(
[outputs['tot_C_cur'], outputs['tot_C_%s' % fut_type]], sub_op,
outputs['sequest_%s' % fut_type], gdal.GDT_Float32, nodata_out,
pixel_size_out, "intersection", dataset_to_align_index=0,
process_pool=args['_process_pool'], vectorize_op=False)
if do_uncertainty:
LOGGER.info('Computing confident cells for %s scenario.', fut_type)
confidence_threshold = args['confidence_threshold']
# Returns 1 if we're confident storage will increase,
# -1 if we're confident storage will decrease,
# 0 if we're not confident either way.
def confidence_op(c_cur, c_fut, var_cur, var_fut):
if nodata_out in [c_cur, c_fut, var_cur, var_fut]:
return nodata_out
if var_cur == 0 and var_fut == 0:
# There's no variance, so we can just compare the mean estimates.
if c_fut > c_cur:
return 1
if c_fut < c_cur:
return -1
return 0
# Given two distributions (one for current storage, one for future storage),
# we use the difference distribution (current storage - future storage),
# and calculate the probability that the difference is less than 0.
# This is equal to the probability that the future storage is greater than
# the current storage.
# We calculate the standard score by beginning with 0, subtracting the mean
# of the difference distribution, and dividing by the standard deviation
# of the difference distribution.
# The mean of the difference distribution is the difference of the means of cur and fut.
# The variance of the difference distribution is the sum of the variances of cur and fut.
standard_score = (c_fut - c_cur) / math.sqrt(var_cur + var_fut)
# Calculate the cumulative distribution function for the standard normal distribution.
# This gives us the probability that future carbon storage is greater than
# current carbon storage.
# This formula is copied from http://docs.python.org/3.2/library/math.html
probability = (1.0 + math.erf(standard_score / math.sqrt(2.0))) / 2.0
# Multiply by 100 so we have probability in the same units as the confidence_threshold.
confidence = 100 * probability
if confidence >= confidence_threshold:
# We're confident carbon storage will increase.
return 1
if confidence <= 100 - confidence_threshold:
# We're confident carbon storage will decrease.
return -1
# We're not confident about whether storage will increase or decrease.
return 0
outputs['conf_%s' % fut_type] = outfile_uri('conf', fut_type)
pygeoprocessing.geoprocessing.vectorize_datasets(
[outputs[name] for name in ['tot_C_cur', 'tot_C_%s' % fut_type,
'variance_C_cur', 'variance_C_%s' % fut_type]],
confidence_op, outputs['conf_%s' % fut_type],
gdal.GDT_Float32, nodata_out,
pixel_size_out, "intersection", dataset_to_align_index=0,
process_pool=args['_process_pool'])
# Do a Monte Carlo simulation for uncertainty analysis.
# We only do this if HWP is not enabled, because the simulation
# computes carbon just by summing carbon across the
# landscape, which is wrong if we're doing HWP analysis.
if (do_uncertainty and
'hwp_cur_shape_uri' not in args and
'hwp_fut_shape_uri' not in args):
outputs['uncertainty'] = _compute_uncertainty_data(args, pools)
return outputs
def _compute_carbon_pools(args):
"""Returns a dict with data on carbon pool totals and variance."""
if args['do_uncertainty']:
pool_inputs = pygeoprocessing.geoprocessing.get_lookup_from_table(
args['carbon_pools_uncertain_uri'], 'lucode')
else:
pool_inputs = pygeoprocessing.geoprocessing.get_lookup_from_table(
args['carbon_pools_uri'], 'lucode')
cell_area_ha = _compute_cell_area_ha(args)
pool_estimate_types = ['c_above', 'c_below', 'c_soil', 'c_dead']
if args['do_uncertainty']:
# We want the mean and standard deviation columns from the input.
pool_estimate_sds = [s + '_sd' for s in pool_estimate_types]
pool_estimate_types = [s + '_mean' for s in pool_estimate_types]
pools = {}
for lulc_id in pool_inputs:
pools[lulc_id] = {}
# Compute the total carbon per pixel for each lulc type
pools[lulc_id]['total'] = cell_area_ha * sum(
pool_inputs[lulc_id][pool_type]
for pool_type in pool_estimate_types)
if args['do_uncertainty']:
# Compute the total variance per pixel for each lulc type.
# We have a normal distribution for each pool; we assume each is
# independent, so the variance of the sum is equal to the sum of
# the variances. Note that we scale by the area squared.
pools[lulc_id]['variance'] = (cell_area_ha ** 2) * sum(
pool_inputs[lulc_id][pool_type_sd] ** 2
for pool_type_sd in pool_estimate_sds)
return pools
def _compute_cell_area_ha(args):
cell_area_cur = pygeoprocessing.geoprocessing.get_cell_size_from_uri(args['lulc_cur_uri']) ** 2
for scenario in ['fut', 'redd']:
try:
lulc_uri = args['lulc_%s_uri' % scenario]
except KeyError:
continue
cell_area_in_scenario = pygeoprocessing.geoprocessing.get_cell_size_from_uri(lulc_uri) ** 2
if abs(cell_area_cur - cell_area_in_scenario) <= sys.float_info.epsilon:
LOGGER.warn(
'The LULC map for the %s scenario has a different cell area '
'than the LULC map for the current scenario. Please '
'ensure that all LULC maps have the same cell area.' % scenario)
# Convert to hectares.
return cell_area_cur / 10000.0
def _compute_uncertainty_data(args, pools):
"""Computes the mean and std dev for carbon storage and sequestration."""
LOGGER.info("Computing uncertainty data.")
# Count how many grid cells have each lulc type in each scenario map.
lulc_counts = {}
for scenario in ['cur', 'fut', 'redd']:
try:
lulc_uri = args['lulc_%s_uri' % scenario]
except KeyError:
continue
lulc_counts[scenario] = pygeoprocessing.geoprocessing.unique_raster_values_count(
lulc_uri)
# Do a Monte Carlo simulation for carbon storage.
monte_carlo_results = {}
LOGGER.info("Beginning Monte Carlo simulation.")
for _ in range(NUM_MONTE_CARLO_RUNS):
run_results = _do_monte_carlo_run(pools, lulc_counts)
# Note that in this context, 'scenario' could be an actual scenario
# (e.g. current, future, REDD) or it could be a sequestration
# (e.g. sequestration under future or sequestration under REDD).
for scenario, carbon_amount in run_results.items():
try:
monte_carlo_results[scenario].append(carbon_amount)
except KeyError:
monte_carlo_results[scenario] = [carbon_amount]
LOGGER.info("Done with Monte Carlo simulation.")
# Compute the mean and standard deviation for each scenario.
results = {}
for scenario in monte_carlo_results:
results[scenario] = norm.fit(monte_carlo_results[scenario])
return results
def _do_monte_carlo_run(pools, lulc_counts):
"""Do a single Monte Carlo run for carbon storage.
Returns a dict with the results, keyed by scenario, and
# including results for sequestration.
"""
# Sample carbon-per-grid-cell from the given normal distribution.
# We sample this independently for each LULC type.
lulc_carbon_samples = {}
for lulc_id, distribution in pools.items():
if not distribution['variance']:
lulc_carbon_samples[lulc_id] = distribution['total']
else:
lulc_carbon_samples[lulc_id] = numpy.random.normal(
distribution['total'],
math.sqrt(distribution['variance']))
# Compute the amount of carbon in each scenario.
results = {}
for scenario, counts in lulc_counts.items():
# Amount of carbon is the sum across all lulc types of:
# (number of grid cells) x (carbon per grid cell)
results[scenario] = sum(
count * lulc_carbon_samples[lulc_id]
for lulc_id, count in counts.items())
# Compute sequestration.
for scenario in ['fut', 'redd']:
if scenario not in results:
continue
results['sequest_%s' % scenario] = results[scenario] - results['cur']
return results
def _calculate_hwp_storage_cur(
hwp_shape_uri, base_dataset_uri, c_hwp_uri, bio_hwp_uri, vol_hwp_uri,
yr_cur):
"""Calculates carbon storage, hwp biomassPerPixel and volumePerPixel due
to harvested wood products in parcels on current landscape.
hwp_shape - oal shapefile indicating harvest map of interest
base_dataset_uri - a gdal dataset to create the output rasters from
c_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for current calculation
bio_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
vol_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
yr_cur - year of the current landcover map
No return value"""
############### Start
pixel_area = pygeoprocessing.geoprocessing.get_cell_size_from_uri(base_dataset_uri) ** 2 / 10000.0 #convert to Ha
hwp_shape = ogr.Open(hwp_shape_uri)
base_dataset = gdal.Open(base_dataset_uri)
nodata = -5.0
#Create a temporary shapefile to hold values of per feature carbon pools
#HWP biomassPerPixel and volumePerPixel, will be used later to rasterize
#those values to output rasters
hwp_shape_copy = ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
calculated_attribute_names = ['c_hwp_pool', 'bio_hwp', 'vol_hwp']
for x in calculated_attribute_names:
field_def = ogr.FieldDefn(x, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel, and
#volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it doesn't
#make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['start_date'] != 0 and field_args['cut_cur'] != 0:
time_span = yr_cur - field_args['start_date']
start_years = time_span
#Calculate the carbon pool due to decaying HWP over the time_span
feature_carbon_storage_per_pixel = (
pixel_area * _carbon_pool_in_hwp_from_parcel(
field_args['cut_cur'], time_span, start_years,
field_args['freq_cur'], field_args['decay_cur']))
#Next lines caculate biomassPerPixel and volumePerPixel of
#harvested wood
number_of_harvests = \
math.ceil(time_span / float(field_args['freq_cur']))
biomass_in_feature = field_args['cut_cur'] * number_of_harvests / \
float(field_args['c_den_cur'])
biomass_per_pixel = biomass_in_feature * pixel_area
volume_per_pixel = biomass_per_pixel / field_args['bcef_cur']
#Copy biomass_per_pixel and carbon pools to the temporary feature
#for rasterization of the entire layer later
for field, value in zip(calculated_attribute_names,
[feature_carbon_storage_per_pixel,
biomass_per_pixel, volume_per_pixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attribute_name, raster_uri in zip(
calculated_attribute_names, [c_hwp_uri, bio_hwp_uri, vol_hwp_uri]):
raster = pygeoprocessing.geoprocessing.new_raster_from_base(
base_dataset, raster_uri, 'GTiff', nodata, gdal.GDT_Float32,
fill_value=nodata)
gdal.RasterizeLayer(raster, [1], hwp_shape_layer_copy,
options=['ATTRIBUTE=' + attribute_name])
raster.FlushCache()
raster = None
def _calculate_hwp_storage_fut(
hwp_shapes, base_dataset_uri, c_hwp_uri, bio_hwp_uri, vol_hwp_uri,
yr_cur, yr_fut, process_pool=None):
"""Calculates carbon storage, hwp biomassPerPixel and volumePerPixel due to
harvested wood products in parcels on current landscape.
hwp_shapes - a dictionary containing the current and/or future harvest
maps (or nothing)
hwp_shapes['cur'] - oal shapefile indicating harvest map from the
current landscape
hwp_shapes['fut'] - oal shapefile indicating harvest map from the
future landscape
c_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for current calculation
bio_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
vol_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
yr_cur - year of the current landcover map
yr_fut - year of the current landcover map
process_pool - a process pool for parallel processing (can be None)
No return value"""
############### Start
pixel_area = pygeoprocessing.geoprocessing.get_cell_size_from_uri(base_dataset_uri) ** 2 / 10000.0 #convert to Ha
nodata = -5.0
c_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
bio_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
vol_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, c_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, bio_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, vol_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
#Create a temporary shapefile to hold values of per feature carbon pools
#HWP biomassPerPixel and volumePerPixel, will be used later to rasterize
#those values to output rasters
calculatedAttributeNames = ['c_hwp_pool', 'bio_hwp', 'vol_hwp']
if 'cur' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['cur'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['start_date'] != 0 and field_args['cut_cur'] != 0:
time_span = (yr_fut + yr_cur) / 2.0 - field_args['start_date']
start_years = yr_fut - field_args['start_date']
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = (
pixel_area * _carbon_pool_in_hwp_from_parcel(
field_args['cut_cur'], time_span, start_years,
field_args['freq_cur'], field_args['decay_cur']))
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_cur']))
#The measure of biomass is in terms of Mg/ha
biomassInFeaturePerArea = field_args['cut_cur'] * \
numberOfHarvests / float(field_args['c_den_cur'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_cur']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, raster_uri in zip(calculatedAttributeNames,
[c_hwp_cur_uri, bio_hwp_cur_uri, vol_hwp_cur_uri]):
nodata = -1.0
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, raster_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
raster = gdal.Open(raster_uri, gdal.GA_Update)
gdal.RasterizeLayer(raster, [1], hwp_shape_layer_copy, options=['ATTRIBUTE=' + attributeName])
raster.FlushCache()
raster = None
#handle the future term
if 'fut' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['fut'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['cut_fut'] != 0:
time_span = yr_fut - (yr_fut + yr_cur) / 2.0
start_years = time_span
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = pixel_area * \
_carbon_pool_in_hwp_from_parcel(
field_args['cut_fut'], time_span, start_years,
field_args['freq_fut'], field_args['decay_fut'])
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_fut']))
biomassInFeaturePerArea = field_args['cut_fut'] * \
numberOfHarvests / float(field_args['c_den_fut'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_fut']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, (raster_uri, cur_raster_uri) in zip(
calculatedAttributeNames, [(c_hwp_uri, c_hwp_cur_uri), (bio_hwp_uri, bio_hwp_cur_uri), (vol_hwp_uri, vol_hwp_cur_uri)]):
temp_filename = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
base_dataset_uri, temp_filename, 'GTiff',
nodata, gdal.GDT_Float32, fill_value=nodata)
temp_raster = gdal.Open(temp_filename, gdal.GA_Update)
gdal.RasterizeLayer(temp_raster, [1], hwp_shape_layer_copy,
options=['ATTRIBUTE=' + attributeName])
temp_raster.FlushCache()
temp_raster = None
#add temp_raster and raster cur raster into the output raster
nodata = -1.0
base_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
raster_uri)
cur_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
cur_raster_uri)
def add_op(base, current):
"""add two rasters"""
nodata_mask = (base == base_nodata) | (current == cur_nodata)
return numpy.where(nodata_mask, nodata, base+current)
pixel_size_out = (
pygeoprocessing.geoprocessing.get_cell_size_from_uri(
raster_uri))
pygeoprocessing.geoprocessing.vectorize_datasets(
[cur_raster_uri, temp_filename], add_op, raster_uri,
gdal.GDT_Float32, nodata,
pixel_size_out, "intersection", dataset_to_align_index=0,
vectorize_op=False)
def _get_fields(feature):
"""Return a dict with all fields in the given feature.
feature - an OGR feature.
Returns an assembled python dict with a mapping of
fieldname -> fieldvalue"""
fields = {}
for i in xrange(feature.GetFieldCount()):
field_def = feature.GetFieldDefnRef(i)
name = field_def.GetName().lower()
value = feature.GetField(i)
fields[name] = value
return fields
def _carbon_pool_in_hwp_from_parcel(carbonPerCut, start_years, timeSpan, harvestFreq,
decay):
"""This is the summation equation that appears in equations 1, 5, 6, and 7
from the user's guide
carbonPerCut - The amount of carbon removed from a parcel during a
harvest period
start_years - The number of years ago that the harvest first started
timeSpan - The number of years to calculate the harvest over
harvestFreq - How many years between harvests
decay - the rate at which carbon is decaying from HWP harvested from
parcels
returns a float indicating the amount of carbon stored from HWP
harvested in units of Mg/ha"""
carbonSum = 0.0
omega = math.log(2) / decay
#Recall that xrange is nonexclusive on the upper bound, so it corresponds
#to the -1 in the summation terms given in the user's manual
for t in xrange(int(math.ceil(float(start_years) / harvestFreq))):
carbonSum += (1 - math.exp(-omega)) / (omega *
math.exp((timeSpan - t * harvestFreq) * omega))
return carbonSum * carbonPerCut
| 47.006527
| 150
| 0.629211
|
4a05b363287b5f7f12de460f96701f8638385110
| 2,638
|
py
|
Python
|
docs_build/tutorials_templates/data_management/upload_and_manage_data_and_metadata/scripts.py
|
dataloop-ai/sdk_examples
|
422d5629df5af343d2dc275e9570bb83c4e2f49d
|
[
"MIT"
] | 3
|
2022-01-07T20:33:49.000Z
|
2022-03-22T12:41:30.000Z
|
docs_build/tutorials_templates/data_management/upload_and_manage_data_and_metadata/scripts.py
|
dataloop-ai/sdk_examples
|
422d5629df5af343d2dc275e9570bb83c4e2f49d
|
[
"MIT"
] | null | null | null |
docs_build/tutorials_templates/data_management/upload_and_manage_data_and_metadata/scripts.py
|
dataloop-ai/sdk_examples
|
422d5629df5af343d2dc275e9570bb83c4e2f49d
|
[
"MIT"
] | 3
|
2021-12-29T13:11:30.000Z
|
2022-03-22T12:25:50.000Z
|
def section1():
import dtlpy as dl
if dl.token_expired():
dl.login()
project = dl.projects.get(project_name='project_name')
dataset = project.datasets.get(dataset_name='dataset_name')
dataset.items.upload(local_path=[r'C:/home/project/images/John Morris.jpg',
r'C:/home/project/images/John Benton.jpg',
r'C:/home/project/images/Liu Jinli.jpg'],
remote_path='/folder_name') # Remote path is optional, images will go to the main directory by default
def section2():
import dtlpy as dl
if dl.token_expired():
dl.login()
project = dl.projects.get(project_name='project_name')
dataset = project.datasets.get(dataset_name='dataset_name')
dataset.items.upload(local_path=r'C:/home/project/images',
remote_path='/folder_name') # Remote path is optional, images will go to the main directory by default
def section3():
dataset = project.datasets.get(dataset_name='dataset_name')
url_path = 'http://ww.some_website/beautiful_flower.jpg'
# Create link
link = dl.UrlLink(ref=url_path, mimetype='image', name='file_name.jpg')
# Upload link
item = dataset.items.upload(local_path=link)
def section4():
show
item.open_in_web()
def section5():
import pandas
import dtlpy as dl
dataset = dl.datasets.get(dataset_id='id') # Get dataset
to_upload = list()
# First item and info attached:
to_upload.append({'local_path': r"E:\TypesExamples\000000000064.jpg", # Item file path
'local_annotations_path': r"E:\TypesExamples\000000000776.json", # Annotations file path
'remote_path': "/first", # Dataset folder to upload the item to
'remote_name': 'f.jpg', # Dataset folder name
'item_metadata': {'user': {'dummy': 'fir'}}}) # Added user metadata
# Second item and info attached:
to_upload.append({'local_path': r"E:\TypesExamples\000000000776.jpg", # Item file path
'local_annotations_path': r"E:\TypesExamples\000000000776.json", # Annotations file path
'remote_path': "/second", # Dataset folder to upload the item to
'remote_name': 's.jpg', # Dataset folder name
'item_metadata': {'user': {'dummy': 'sec'}}}) # Added user metadata
df = pandas.DataFrame(to_upload) # Make data into table
items = dataset.items.upload(local_path=df,
overwrite=True) # Upload table to platform
| 46.280702
| 128
| 0.619409
|
4a05b39641e2409ccbdc0cacceb67062ccd82a77
| 11,223
|
py
|
Python
|
homeassistant/components/abode/__init__.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 1
|
2021-12-07T08:14:59.000Z
|
2021-12-07T08:14:59.000Z
|
homeassistant/components/abode/__init__.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 60
|
2021-01-25T12:30:32.000Z
|
2022-03-30T08:52:14.000Z
|
homeassistant/components/abode/__init__.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 2
|
2021-11-19T23:20:40.000Z
|
2021-11-20T00:18:40.000Z
|
"""Support for the Abode Security System."""
from functools import partial
from abodepy import Abode
from abodepy.exceptions import AbodeAuthenticationException, AbodeException
import abodepy.helpers.timeline as TIMELINE
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DATE,
ATTR_DEVICE_ID,
ATTR_ENTITY_ID,
ATTR_TIME,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import ATTRIBUTION, DEFAULT_CACHEDB, DOMAIN, LOGGER
CONF_POLLING = "polling"
SERVICE_SETTINGS = "change_setting"
SERVICE_CAPTURE_IMAGE = "capture_image"
SERVICE_TRIGGER_AUTOMATION = "trigger_automation"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_TYPE = "device_type"
ATTR_EVENT_CODE = "event_code"
ATTR_EVENT_NAME = "event_name"
ATTR_EVENT_TYPE = "event_type"
ATTR_EVENT_UTC = "event_utc"
ATTR_SETTING = "setting"
ATTR_USER_NAME = "user_name"
ATTR_APP_TYPE = "app_type"
ATTR_EVENT_BY = "event_by"
ATTR_VALUE = "value"
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
CHANGE_SETTING_SCHEMA = vol.Schema(
{vol.Required(ATTR_SETTING): cv.string, vol.Required(ATTR_VALUE): cv.string}
)
CAPTURE_IMAGE_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
AUTOMATION_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
PLATFORMS = [
Platform.ALARM_CONTROL_PANEL,
Platform.BINARY_SENSOR,
Platform.LOCK,
Platform.SWITCH,
Platform.COVER,
Platform.CAMERA,
Platform.LIGHT,
Platform.SENSOR,
]
class AbodeSystem:
"""Abode System class."""
def __init__(self, abode, polling):
"""Initialize the system."""
self.abode = abode
self.polling = polling
self.entity_ids = set()
self.logout_listener = None
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up Abode integration from a config entry."""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
polling = config_entry.data.get(CONF_POLLING)
cache = hass.config.path(DEFAULT_CACHEDB)
# For previous config entries where unique_id is None
if config_entry.unique_id is None:
hass.config_entries.async_update_entry(
config_entry, unique_id=config_entry.data[CONF_USERNAME]
)
try:
abode = await hass.async_add_executor_job(
Abode, username, password, True, True, True, cache
)
except AbodeAuthenticationException as ex:
raise ConfigEntryAuthFailed(f"Invalid credentials: {ex}") from ex
except (AbodeException, ConnectTimeout, HTTPError) as ex:
raise ConfigEntryNotReady(f"Unable to connect to Abode: {ex}") from ex
hass.data[DOMAIN] = AbodeSystem(abode, polling)
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
await setup_hass_events(hass)
await hass.async_add_executor_job(setup_hass_services, hass)
await hass.async_add_executor_job(setup_abode_events, hass)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
hass.services.async_remove(DOMAIN, SERVICE_SETTINGS)
hass.services.async_remove(DOMAIN, SERVICE_CAPTURE_IMAGE)
hass.services.async_remove(DOMAIN, SERVICE_TRIGGER_AUTOMATION)
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.stop)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.logout)
hass.data[DOMAIN].logout_listener()
hass.data.pop(DOMAIN)
return unload_ok
def setup_hass_services(hass: HomeAssistant) -> None:
"""Home Assistant services."""
def change_setting(call: ServiceCall) -> None:
"""Change an Abode system setting."""
setting = call.data[ATTR_SETTING]
value = call.data[ATTR_VALUE]
try:
hass.data[DOMAIN].abode.set_setting(setting, value)
except AbodeException as ex:
LOGGER.warning(ex)
def capture_image(call: ServiceCall) -> None:
"""Capture a new image."""
entity_ids = call.data[ATTR_ENTITY_ID]
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_camera_capture_{entity_id}"
dispatcher_send(hass, signal)
def trigger_automation(call: ServiceCall) -> None:
"""Trigger an Abode automation."""
entity_ids = call.data[ATTR_ENTITY_ID]
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_trigger_automation_{entity_id}"
dispatcher_send(hass, signal)
hass.services.register(
DOMAIN, SERVICE_SETTINGS, change_setting, schema=CHANGE_SETTING_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_CAPTURE_IMAGE, capture_image, schema=CAPTURE_IMAGE_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_TRIGGER_AUTOMATION, trigger_automation, schema=AUTOMATION_SCHEMA
)
async def setup_hass_events(hass: HomeAssistant) -> None:
"""Home Assistant start and stop callbacks."""
def logout(event):
"""Logout of Abode."""
if not hass.data[DOMAIN].polling:
hass.data[DOMAIN].abode.events.stop()
hass.data[DOMAIN].abode.logout()
LOGGER.info("Logged out of Abode")
if not hass.data[DOMAIN].polling:
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.start)
hass.data[DOMAIN].logout_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, logout
)
def setup_abode_events(hass: HomeAssistant) -> None:
"""Event callbacks."""
def event_callback(event, event_json):
"""Handle an event callback from Abode."""
data = {
ATTR_DEVICE_ID: event_json.get(ATTR_DEVICE_ID, ""),
ATTR_DEVICE_NAME: event_json.get(ATTR_DEVICE_NAME, ""),
ATTR_DEVICE_TYPE: event_json.get(ATTR_DEVICE_TYPE, ""),
ATTR_EVENT_CODE: event_json.get(ATTR_EVENT_CODE, ""),
ATTR_EVENT_NAME: event_json.get(ATTR_EVENT_NAME, ""),
ATTR_EVENT_TYPE: event_json.get(ATTR_EVENT_TYPE, ""),
ATTR_EVENT_UTC: event_json.get(ATTR_EVENT_UTC, ""),
ATTR_USER_NAME: event_json.get(ATTR_USER_NAME, ""),
ATTR_APP_TYPE: event_json.get(ATTR_APP_TYPE, ""),
ATTR_EVENT_BY: event_json.get(ATTR_EVENT_BY, ""),
ATTR_DATE: event_json.get(ATTR_DATE, ""),
ATTR_TIME: event_json.get(ATTR_TIME, ""),
}
hass.bus.fire(event, data)
events = [
TIMELINE.ALARM_GROUP,
TIMELINE.ALARM_END_GROUP,
TIMELINE.PANEL_FAULT_GROUP,
TIMELINE.PANEL_RESTORE_GROUP,
TIMELINE.AUTOMATION_GROUP,
TIMELINE.DISARM_GROUP,
TIMELINE.ARM_GROUP,
TIMELINE.ARM_FAULT_GROUP,
TIMELINE.TEST_GROUP,
TIMELINE.CAPTURE_GROUP,
TIMELINE.DEVICE_GROUP,
]
for event in events:
hass.data[DOMAIN].abode.events.add_event_callback(
event, partial(event_callback, event)
)
class AbodeEntity(Entity):
"""Representation of an Abode entity."""
_attr_attribution = ATTRIBUTION
def __init__(self, data):
"""Initialize Abode entity."""
self._data = data
self._attr_should_poll = data.polling
async def async_added_to_hass(self):
"""Subscribe to Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.add_connection_status_callback,
self.unique_id,
self._update_connection_status,
)
self.hass.data[DOMAIN].entity_ids.add(self.entity_id)
async def async_will_remove_from_hass(self):
"""Unsubscribe from Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.remove_connection_status_callback, self.unique_id
)
def _update_connection_status(self):
"""Update the entity available property."""
self._attr_available = self._data.abode.events.connected
self.schedule_update_ha_state()
class AbodeDevice(AbodeEntity):
"""Representation of an Abode device."""
def __init__(self, data, device):
"""Initialize Abode device."""
super().__init__(data)
self._device = device
self._attr_name = device.name
self._attr_unique_id = device.device_uuid
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.add_device_callback,
self._device.device_id,
self._update_callback,
)
async def async_will_remove_from_hass(self):
"""Unsubscribe from device events."""
await super().async_will_remove_from_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.remove_all_device_callbacks, self._device.device_id
)
def update(self):
"""Update device state."""
self._device.refresh()
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
"device_id": self._device.device_id,
"battery_low": self._device.battery_low,
"no_response": self._device.no_response,
"device_type": self._device.type,
}
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
return DeviceInfo(
identifiers={(DOMAIN, self._device.device_id)},
manufacturer="Abode",
model=self._device.type,
name=self._device.name,
)
def _update_callback(self, device):
"""Update the device state."""
self.schedule_update_ha_state()
class AbodeAutomation(AbodeEntity):
"""Representation of an Abode automation."""
def __init__(self, data, automation):
"""Initialize for Abode automation."""
super().__init__(data)
self._automation = automation
self._attr_name = automation.name
self._attr_unique_id = automation.automation_id
self._attr_extra_state_attributes = {
"type": "CUE automation",
}
def update(self):
"""Update automation state."""
self._automation.refresh()
| 31.883523
| 88
| 0.68324
|
4a05b5b7aa9b2e916d1da2477a0d98bc5ddc80dd
| 24,402
|
py
|
Python
|
tests/system_tests_delivery_abort.py
|
prvn002/qpid-dispatch
|
f277905077e164aac2d38736441be5c49d2dfaf7
|
[
"Apache-2.0"
] | null | null | null |
tests/system_tests_delivery_abort.py
|
prvn002/qpid-dispatch
|
f277905077e164aac2d38736441be5c49d2dfaf7
|
[
"Apache-2.0"
] | 157
|
2019-06-11T21:22:19.000Z
|
2022-03-30T21:02:59.000Z
|
tests/system_tests_delivery_abort.py
|
prvn002/qpid-dispatch
|
f277905077e164aac2d38736441be5c49d2dfaf7
|
[
"Apache-2.0"
] | 1
|
2017-02-06T15:13:27.000Z
|
2017-02-06T15:13:27.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton import Message
from system_test import Logger, TestCase, Qdrouterd, main_module, unittest, TIMEOUT, TestTimeout, PollTimeout
from proton.handlers import MessagingHandler
from proton.reactor import Container
from qpid_dispatch_internal.compat import BINARY
class RouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name, connection):
config = [
('router', {'mode': 'interior', 'id': name, 'allowUnsettledMulticast': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'role': 'route-container'}),
('linkRoute', {'prefix': 'link', 'direction': 'in', 'containerId': 'LRC'}),
('linkRoute', {'prefix': 'link', 'direction': 'out', 'containerId': 'LRC'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'spread', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', ('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', ('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port}))
cls.routers[0].wait_router_connected('B')
cls.routers[1].wait_router_connected('A')
def test_01_message_route_truncated_one_router(self):
test = MessageRouteTruncateTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
"addr_01")
test.run()
self.assertEqual(None, test.error)
def test_02_message_route_truncated_two_routers(self):
test = MessageRouteTruncateTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
"addr_02")
test.run()
self.assertEqual(None, test.error)
def test_03_link_route_truncated_one_router(self):
test = LinkRouteTruncateTest(self.routers[0].addresses[0],
self.routers[0].addresses[1],
"link.addr_03",
self.routers[0].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_04_link_route_truncated_two_routers(self):
test = LinkRouteTruncateTest(self.routers[1].addresses[0],
self.routers[0].addresses[1],
"link.addr_04",
self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_05_message_route_abort_one_router(self):
test = MessageRouteAbortTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
"addr_05")
test.run()
if test.error:
test.logger.dump()
self.assertEqual(None, test.error)
def test_06_message_route_abort_two_routers(self):
test = MessageRouteAbortTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
"addr_06")
test.run()
if test.error:
test.logger.dump()
self.assertEqual(None, test.error)
def test_07_multicast_truncate_one_router(self):
test = MulticastTruncateTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
self.routers[0].addresses[0],
"multicast.addr_07")
test.run()
self.assertEqual(None, test.error)
class Entity(object):
def __init__(self, status_code, status_description, attrs):
self.status_code = status_code
self.status_description = status_description
self.attrs = attrs
def __getattr__(self, key):
return self.attrs[key]
class RouterProxy(object):
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return Entity(ap['statusCode'], ap['statusDescription'], msg.body)
def read_address(self, name):
ap = {'operation': 'READ', 'type': 'org.apache.qpid.dispatch.router.address', 'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
class MessageRouteTruncateTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, address):
super(MessageRouteTruncateTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.address = address
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender1 = None
self.sender2 = None
self.sender3 = None
self.receiver = None
self.streaming = False
self.delivery = None
self.data = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
self.long_data = ""
self.sent_stream = 0
self.program = ['Send_Short_1', 'Send_Long_Truncated', 'Send_Short_2', 'Send_Short_3']
self.result = []
self.expected_result = ['Send_Short_1', 'Aborted_Delivery', '2', '2', '2', '2', '2',
'2', '2', '2', '2', '2', 'Send_Short_2', '3', '3', '3', '3',
'3', '3', '3', '3', '3', '3', 'Send_Short_3']
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (self.program, self.result)
self.sender_conn.close()
self.receiver_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.sender1 = event.container.create_sender(self.sender_conn, self.address, name="S1")
self.sender2 = event.container.create_sender(self.sender_conn, self.address, name="S2")
self.sender3 = event.container.create_sender(self.sender_conn, self.address, name="S3")
self.receiver = event.container.create_receiver(self.receiver_conn, self.address)
def stream(self):
self.sender1.stream(BINARY(self.long_data))
self.sent_stream += len(self.long_data)
if self.sent_stream >= 1000000:
self.streaming = False
self.sender1.close()
self.send()
def send(self):
next_op = self.program.pop(0) if len(self.program) > 0 else None
if next_op == 'Send_Short_1':
m = Message(body="%s" % next_op)
self.sender1.send(m)
elif next_op == 'Send_Long_Truncated':
for i in range(100):
self.long_data += self.data
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
self.streaming = True
self.stream()
elif next_op == 'Send_Short_2':
m = Message(body="2")
for i in range(10):
self.sender2.send(m)
m = Message(body="Send_Short_2")
self.sender2.send(m)
self.sender2.close()
elif next_op == 'Send_Short_3':
m = Message(body="3")
for i in range(10):
self.sender3.send(m)
m = Message(body="%s" % next_op)
self.sender3.send(m)
self.sender_conn.close()
def on_sendable(self, event):
if event.sender == self.sender1 and self.program[0] == 'Send_Short_1':
self.send()
if self.streaming:
self.stream()
def on_message(self, event):
m = event.message
self.result.append(m.body)
if m.body == 'Send_Short_1':
self.send()
elif m.body == 'Send_Short_2':
self.send()
elif m.body == 'Send_Short_3':
if self.result != self.expected_result:
self.error = "Expected: %r, Actual: %r" % (self.expected_result, self.result)
self.receiver_conn.close()
self.timer.cancel()
def on_aborted(self, event):
self.result.append('Aborted_Delivery')
self.send()
def run(self):
Container(self).run()
class LinkRouteTruncateTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, address, query_host):
super(LinkRouteTruncateTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.address = address
self.query_host = query_host
self.sender_conn = None
self.receiver_conn = None
self.query_conn = None
self.error = None
self.sender1 = None
self.receiver = None
self.poll_timer = None
self.streaming = False
self.delivery = None
self.data = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
self.long_data = ""
self.sent_stream = 0
self.program = ['Send_Short_1', 'Send_Long_Truncated']
self.result = []
self.expected_result = ['Send_Short_1', 'Aborted_Delivery']
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (self.program, self.result)
self.sender_conn.close()
self.receiver_conn.close()
self.query_conn.close()
if self.poll_timer:
self.poll_timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.query_conn = event.container.connect(self.query_host)
self.reply_receiver = event.container.create_receiver(self.query_conn, dynamic=True)
self.agent_sender = event.container.create_sender(self.query_conn, "$management")
def setup_first_links(self, event):
self.sender1 = event.container.create_sender(self.sender_conn, self.address, name="S1")
def stream(self):
self.sender1.stream(BINARY(self.long_data))
self.sent_stream += len(self.long_data)
if self.sent_stream >= 1000000:
self.streaming = False
self.sender1.close()
def send(self):
next_op = self.program.pop(0) if len(self.program) > 0 else None
if next_op == 'Send_Short_1':
m = Message(body="%s" % next_op)
self.sender1.send(m)
elif next_op == 'Send_Long_Truncated':
for i in range(100):
self.long_data += self.data
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
self.streaming = True
self.stream()
def poll_timeout(self):
self.poll()
def poll(self):
request = self.proxy.read_address('Clink')
self.agent_sender.send(request)
def on_sendable(self, event):
if event.sender == self.sender1 and len(self.program) > 0 and self.program[0] == 'Send_Short_1':
self.send()
if event.sender == self.sender1 and self.streaming:
self.stream()
def on_link_opening(self, event):
if event.receiver:
self.receiver = event.receiver
event.receiver.target.address = self.address
event.receiver.open()
def on_link_opened(self, event):
if event.receiver == self.reply_receiver:
self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
self.poll()
def on_message(self, event):
if event.receiver == self.reply_receiver:
response = self.proxy.response(event.message)
if response.status_code == 200 and (response.remoteCount + response.containerCount) > 0:
if self.poll_timer:
self.poll_timer.cancel()
self.poll_timer = None
self.setup_first_links(event)
else:
self.poll_timer = event.reactor.schedule(0.25, PollTimeout(self))
return
m = event.message
self.result.append(m.body)
if m.body == 'Send_Short_1':
self.send()
def on_aborted(self, event):
self.result.append('Aborted_Delivery')
if self.result != self.expected_result:
self.error = "Expected: %r, Actual: %r" % (self.expected_result, self.result)
self.sender_conn.close()
self.receiver_conn.close()
self.query_conn.close()
self.timer.cancel()
def run(self):
container = Container(self)
container.container_id = "LRC"
container.run()
class MessageRouteAbortTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, address):
super(MessageRouteAbortTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.address = address
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender1 = None
self.receiver = None
self.delivery = None
self.logger = Logger(title="MessageRouteAbortTest")
self.program = [('D', 10), ('D', 20), ('A', 30), ('A', 40), ('D', 50), ('D', 60),
('A', 100), ('D', 110),
('A', 1000), ('A', 1010), ('A', 1020), ('A', 1030), ('A', 1040), ('D', 1050),
('A', 10000), ('A', 10010), ('A', 10020), ('A', 10030), ('A', 10040), ('D', 10050),
('A', 100000), ('A', 100010), ('A', 100020), ('A', 100030), ('A', 100040), ('D', 100050), ('F', 10)]
self.result = []
self.expected_result = [10, 20, 50, 60, 110, 1050, 10050, 100050]
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (self.program, self.result)
self.logger.log(self.error)
self.sender_conn.close()
self.receiver_conn.close()
def on_start(self, event):
self.logger.log("on_start")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.sender1 = event.container.create_sender(self.sender_conn, self.address, name="S1")
self.receiver = event.container.create_receiver(self.receiver_conn, self.address)
def send(self):
if self.delivery:
self.logger.log("send(): Do not send - delivery to be aborted is in flight")
return
op, size = self.program.pop(0) if len(self.program) > 0 else (None, None)
self.logger.log("send - op=%s, size=%s" % (str(op), str(size)))
if op is None:
return
body = ""
if op == 'F':
body = "FINISH"
else:
bod = str(size)
bod2 = "0000000000" + bod
bod3 = "." + bod2[-9:]
body = bod3 * (size // 10)
msg = Message(body=body)
if op in 'DF':
self.logger.log("send(): Send message size: %d" % (size))
delivery = self.sender1.send(msg)
if op == 'A':
self.logger.log("send(): Start aborted message size: %d" % (size))
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
encoded = msg.encode()
self.sender1.stream(encoded)
def finish(self):
if self.result != self.expected_result:
self.error = "Expected: %r, Actual: %r" % (self.expected_result, self.result)
self.logger.log(self.error)
self.sender_conn.close()
self.receiver_conn.close()
self.timer.cancel()
def on_sendable(self, event):
self.logger.log("on_sendable")
if event.sender == self.sender1:
if self.delivery:
self.delivery.abort()
self.delivery = None
self.logger.log("on_sendable aborts delivery")
else:
self.send()
def on_message(self, event):
m = event.message
if m.body == "FINISH":
self.finish()
else:
self.logger.log("on_message receives len: %d" % (len(m.body)))
self.result.append(len(m.body))
self.send()
def run(self):
Container(self).run()
class MulticastTruncateTest(MessagingHandler):
def __init__(self, sender_host, receiver_host1, receiver_host2, address):
super(MulticastTruncateTest, self).__init__()
self.sender_host = sender_host
self.receiver_host1 = receiver_host1
self.receiver_host2 = receiver_host2
self.address = address
self.r_attach_count = 0
self.senders_created = False
self.sender_conn = None
self.receiver1_conn = None
self.receiver2_conn = None
self.error = None
self.sender1 = None
self.sender2 = None
self.sender3 = None
self.receiver1 = None
self.receiver2 = None
self.streaming = False
self.delivery = None
self.data = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
self.long_data = ""
self.completions = 0
self.sent_stream = 0
self.program = ['Send_Short_1', 'Send_Long_Truncated', 'Send_Short_2', 'Send_Short_3']
self.result1 = []
self.result2 = []
self.expected_result = ['Send_Short_1', 'Aborted_Delivery', '2', '2', '2', '2', '2',
'2', '2', '2', '2', '2', 'Send_Short_2', '3', '3', '3', '3',
'3', '3', '3', '3', '3', '3', 'Send_Short_3']
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result1: %r, Result2: %r" % (self.program, self.result1, self.result2)
self.sender_conn.close()
self.receiver1_conn.close()
self.receiver2_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver1_conn = event.container.connect(self.receiver_host1)
self.receiver2_conn = event.container.connect(self.receiver_host2)
self.receiver1 = event.container.create_receiver(self.receiver1_conn, self.address)
self.receiver2 = event.container.create_receiver(self.receiver2_conn, self.address)
def stream(self):
self.sender1.stream(BINARY(self.long_data))
self.sent_stream += len(self.long_data)
if self.sent_stream >= 1000000:
self.streaming = False
self.sender1.close()
self.send()
def send(self):
if self.streaming:
self.stream()
return
next_op = self.program.pop(0) if len(self.program) > 0 else None
if next_op == 'Send_Short_1':
m = Message(body="%s" % next_op)
self.sender1.send(m)
elif next_op == 'Send_Long_Truncated':
for i in range(100):
self.long_data += self.data
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
self.streaming = True
self.stream()
elif next_op == 'Send_Short_2':
m = Message(body="2")
for i in range(10):
self.sender2.send(m)
m = Message(body="Send_Short_2")
self.sender2.send(m)
self.sender2.close()
elif next_op == 'Send_Short_3':
m = Message(body="3")
for i in range(10):
self.sender3.send(m)
m = Message(body="%s" % next_op)
self.sender3.send(m)
self.sender_conn.close()
def on_sendable(self, event):
self.send()
def on_link_opened(self, event):
if event.receiver == self.receiver1:
self.r_attach_count += 1
if event.receiver == self.receiver2:
self.r_attach_count += 1
if self.r_attach_count == 2 and not self.senders_created:
self.senders_created = True
self.sender1 = event.container.create_sender(self.sender_conn,
self.address,
name="S1")
self.sender2 = event.container.create_sender(self.sender_conn,
self.address,
name="S2")
self.sender3 = event.container.create_sender(self.sender_conn,
self.address,
name="S3")
def on_message(self, event):
m = event.message
if event.receiver == self.receiver1:
self.result1.append(m.body)
elif event.receiver == self.receiver2:
self.result2.append(m.body)
if m.body == 'Send_Short_1':
self.send()
elif m.body == 'Send_Short_2':
self.send()
elif m.body == 'Send_Short_3':
self.completions += 1
if self.completions == 2:
if self.result1 != self.expected_result or self.result2 != self.expected_result:
self.error = "Expected: %r, Actuals: %r, %r" % (self.expected_result, self.result1, self.result2)
self.receiver1_conn.close()
self.receiver2_conn.close()
self.timer.cancel()
def on_aborted(self, event):
if event.receiver == self.receiver1:
self.result1.append('Aborted_Delivery')
elif event.receiver == self.receiver2:
self.result2.append('Aborted_Delivery')
self.send()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| 39.937807
| 132
| 0.571101
|
4a05b662f762dd551e4e593ec650d32be1eb2966
| 226
|
py
|
Python
|
src/examples_in_my_book/general_problems/modules/passing_cmd_line_args.py
|
lucidrohit/Over-100-Exercises-Python-and-Algorithms
|
62345c7d7c9cc2269f240d134189645fc96c3e80
|
[
"MIT"
] | 2
|
2022-01-07T11:46:32.000Z
|
2022-02-24T08:44:31.000Z
|
src/examples_in_my_book/general_problems/modules/passing_cmd_line_args.py
|
lucidrohit/Over-100-Exercises-Python-and-Algorithms
|
62345c7d7c9cc2269f240d134189645fc96c3e80
|
[
"MIT"
] | null | null | null |
src/examples_in_my_book/general_problems/modules/passing_cmd_line_args.py
|
lucidrohit/Over-100-Exercises-Python-and-Algorithms
|
62345c7d7c9cc2269f240d134189645fc96c3e80
|
[
"MIT"
] | 1
|
2021-10-01T15:35:05.000Z
|
2021-10-01T15:35:05.000Z
|
#!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
import sys
def main():
''' print command line arguments '''
for arg in sys.argv[1:]:
print arg
if __name__ == "__main__":
main()
| 11.894737
| 40
| 0.610619
|
4a05b795ba81b97cf61d2fc4db0e399695da8ad9
| 586
|
py
|
Python
|
docs/support/pcontracts_example_2.py
|
pmacosta/putil
|
416cea52df8221981727e25d133e9b4e3f464798
|
[
"MIT"
] | 6
|
2015-12-15T04:09:08.000Z
|
2020-02-21T01:40:57.000Z
|
docs/support/pcontracts_example_2.py
|
pmacosta/putil
|
416cea52df8221981727e25d133e9b4e3f464798
|
[
"MIT"
] | null | null | null |
docs/support/pcontracts_example_2.py
|
pmacosta/putil
|
416cea52df8221981727e25d133e9b4e3f464798
|
[
"MIT"
] | 2
|
2016-01-21T23:29:17.000Z
|
2020-02-21T01:41:05.000Z
|
# pcontracts_example_2.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,W0702
import putil.pcontracts
@putil.pcontracts.new_contract('Only one exception')
def custom_contract_a(name):
msg = putil.pcontracts.get_exdesc()
if not name:
raise ValueError(msg)
@putil.pcontracts.new_contract(ex1='Empty name', ex2='Invalid name')
def custom_contract_b(name):
msg = putil.pcontracts.get_exdesc()
if not name:
raise ValueError(msg['ex1'])
elif name.find('[') != -1:
raise ValueError(msg['ex2'])
| 27.904762
| 68
| 0.706485
|
4a05b9430077b9f6ee381bef5379bce035ef86b8
| 8,480
|
py
|
Python
|
run_meson_command_tests.py
|
keith-packard/meson
|
ee2e06dafae756056f82bf1994c2a52c05d137ee
|
[
"Apache-2.0"
] | null | null | null |
run_meson_command_tests.py
|
keith-packard/meson
|
ee2e06dafae756056f82bf1994c2a52c05d137ee
|
[
"Apache-2.0"
] | null | null | null |
run_meson_command_tests.py
|
keith-packard/meson
|
ee2e06dafae756056f82bf1994c2a52c05d137ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2018 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import subprocess
import zipapp
from pathlib import Path
from mesonbuild.mesonlib import windows_proof_rmtree, python_command, is_windows
# Find the meson.py adjacent to us
meson_py = Path(__file__).resolve().parent / 'meson.py'
if not meson_py.is_file():
raise RuntimeError("meson.py not found: test must only run from git")
def get_pypath():
import sysconfig
pypath = sysconfig.get_path('purelib', vars={'base': ''})
# Ensure that / is the path separator and not \, then strip /
return Path(pypath).as_posix().strip('/')
def get_pybindir():
import sysconfig
# 'Scripts' on Windows and 'bin' on other platforms including MSYS
return sysconfig.get_path('scripts', vars={'base': ''}).strip('\\/')
class CommandTests(unittest.TestCase):
'''
Test that running meson in various ways works as expected by checking the
value of mesonlib.meson_command that was set during configuration.
'''
def setUp(self):
super().setUp()
self.orig_env = os.environ.copy()
self.orig_dir = os.getcwd()
os.environ['MESON_COMMAND_TESTS'] = '1'
self.tmpdir = Path(tempfile.mkdtemp()).resolve()
self.src_root = Path(__file__).resolve().parent
self.testdir = str(self.src_root / 'test cases/common/1 trivial')
self.meson_args = ['--backend=ninja']
def tearDown(self):
try:
windows_proof_rmtree(str(self.tmpdir))
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
os.chdir(str(self.orig_dir))
super().tearDown()
def _run(self, command, workdir=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ.copy(),
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, command)
return p.stdout
def assertMesonCommandIs(self, line, cmd):
self.assertTrue(line.startswith('meson_command '), msg=line)
self.assertEqual(line, 'meson_command is {!r}'.format(cmd))
def test_meson_uninstalled(self):
# This is what the meson command must be for all these cases
resolved_meson_command = python_command + [str(self.src_root / 'meson.py')]
# Absolute path to meson.py
os.chdir('/')
builddir = str(self.tmpdir / 'build1')
meson_py = str(self.src_root / 'meson.py')
meson_setup = [meson_py, 'setup']
meson_command = python_command + meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# ./meson.py
os.chdir(str(self.src_root))
builddir = str(self.tmpdir / 'build2')
meson_py = './meson.py'
meson_setup = [meson_py, 'setup']
meson_command = python_command + meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# Symlink to meson.py
if is_windows():
# Symlinks require admin perms
return
os.chdir(str(self.src_root))
builddir = str(self.tmpdir / 'build3')
# Create a symlink to meson.py in bindir, and add it to PATH
bindir = (self.tmpdir / 'bin')
bindir.mkdir()
(bindir / 'meson').symlink_to(self.src_root / 'meson.py')
os.environ['PATH'] = str(bindir) + os.pathsep + os.environ['PATH']
# See if it works!
meson_py = 'meson'
meson_setup = [meson_py, 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
def test_meson_installed(self):
# Install meson
prefix = self.tmpdir / 'prefix'
pylibdir = prefix / get_pypath()
bindir = prefix / get_pybindir()
pylibdir.mkdir(parents=True)
os.environ['PYTHONPATH'] = str(pylibdir)
os.environ['PATH'] = str(bindir) + os.pathsep + os.environ['PATH']
self._run(python_command + ['setup.py', 'install', '--prefix', str(prefix)])
# Check that all the files were installed correctly
self.assertTrue(bindir.is_dir())
self.assertTrue(pylibdir.is_dir())
from setup import packages
# Extract list of expected python module files
expect = set()
for pkg in packages:
expect.update([p.as_posix() for p in Path(pkg.replace('.', '/')).glob('*.py')])
# Check what was installed, only count files that are inside 'mesonbuild'
have = set()
for p in Path(pylibdir).glob('**/*.py'):
s = p.as_posix()
if 'mesonbuild' not in s:
continue
have.add(s[s.rfind('mesonbuild'):])
self.assertEqual(have, expect)
# Run `meson`
os.chdir('/')
resolved_meson_command = [str(bindir / 'meson')]
builddir = str(self.tmpdir / 'build1')
meson_setup = ['meson', 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# Run `/path/to/meson`
builddir = str(self.tmpdir / 'build2')
meson_setup = [str(bindir / 'meson'), 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# Run `python3 -m mesonbuild.mesonmain`
resolved_meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
builddir = str(self.tmpdir / 'build3')
meson_setup = ['-m', 'mesonbuild.mesonmain', 'setup']
meson_command = python_command + meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
if is_windows():
# Next part requires a shell
return
# `meson` is a wrapper to `meson.real`
resolved_meson_command = [str(bindir / 'meson.real')]
builddir = str(self.tmpdir / 'build4')
(bindir / 'meson').rename(bindir / 'meson.real')
wrapper = (bindir / 'meson')
with open(wrapper, 'w') as f:
f.write('#!/bin/sh\n\nmeson.real "$@"')
wrapper.chmod(0o755)
meson_setup = [str(wrapper), 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
def test_meson_exe_windows(self):
raise unittest.SkipTest('NOT IMPLEMENTED')
def test_meson_zipapp(self):
if is_windows():
raise unittest.SkipTest('NOT IMPLEMENTED')
source = Path(__file__).resolve().parent.as_posix()
target = self.tmpdir / 'meson.pyz'
zipapp.create_archive(source=source, target=target, interpreter=python_command[0], main=None)
self._run([target.as_posix(), '--help'])
if __name__ == '__main__':
unittest.main(buffer=True)
| 42.4
| 101
| 0.635495
|
4a05ba38bfaac58e68da68906e162115e1fdfeb6
| 2,120
|
py
|
Python
|
ws/handler/appliance/light/zone/__init__.py
|
fabaff/automate-ws
|
a9442f287692787e3f253e1ff23758bec8f3902e
|
[
"MIT"
] | null | null | null |
ws/handler/appliance/light/zone/__init__.py
|
fabaff/automate-ws
|
a9442f287692787e3f253e1ff23758bec8f3902e
|
[
"MIT"
] | 1
|
2021-12-21T11:34:47.000Z
|
2021-12-21T11:34:47.000Z
|
ws/handler/appliance/light/zone/__init__.py
|
fabaff/automate-ws
|
a9442f287692787e3f253e1ff23758bec8f3902e
|
[
"MIT"
] | 1
|
2021-12-21T10:10:13.000Z
|
2021-12-21T10:10:13.000Z
|
import home
from ws.handler.appliance.light import Handler as Parent
class Handler(Parent):
KLASS = home.appliance.light.zone.Appliance
LABEL_ALARMED_ON = "Alarmed On"
LABEL_ALARMED_OFF = "Alarmed Off"
ICON_ALARMED_ON = "fas fa-bell"
ICON_ALARMED_OFF = "fa fa-bell"
def get_label(self, appliance):
if appliance.state.VALUE == home.appliance.light.zone.state.on.State.VALUE:
return self.LABEL_ON
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.alarmed.off.State.VALUE
):
return self.LABEL_ALARMED_OFF
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.alarmed.on.State.VALUE
):
return self.LABEL_ALARMED_ON
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.forced.on.State.VALUE
):
return self.LABEL_FORCED_ON
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.forced.off.State.VALUE
):
return self.LABEL_FORCED_OFF
else:
return self.LABEL_OFF
def get_icon(self, appliance):
if appliance.state.VALUE == home.appliance.light.zone.state.on.State.VALUE:
return self.ICON_ON
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.alarmed.off.State.VALUE
):
return self.ICON_ALARMED_ON
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.alarmed.on.State.VALUE
):
return self.ICON_ALARMED_OFF
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.forced.on.State.VALUE
):
return self.ICON_FORCED_ON
elif (
appliance.state.VALUE
== home.appliance.light.zone.state.forced.off.State.VALUE
):
return self.ICON_FORCED_OFF
else:
return self.ICON_OFF
from ws.handler.appliance.light.zone import home_event_presence
| 31.641791
| 83
| 0.600472
|
4a05bb7142a7163fd0a9dd5bd4698fd20ce3efef
| 1,065
|
py
|
Python
|
sdk/test/test_location_other_location_category.py
|
bs-yapily/yapily-sdk-python
|
0bba45e351b674eb655425a51190f539c4e9896f
|
[
"MIT"
] | null | null | null |
sdk/test/test_location_other_location_category.py
|
bs-yapily/yapily-sdk-python
|
0bba45e351b674eb655425a51190f539c4e9896f
|
[
"MIT"
] | null | null | null |
sdk/test/test_location_other_location_category.py
|
bs-yapily/yapily-sdk-python
|
0bba45e351b674eb655425a51190f539c4e9896f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import yapily
from yapily.models.location_other_location_category import LocationOtherLocationCategory # noqa: E501
from yapily.rest import ApiException
class TestLocationOtherLocationCategory(unittest.TestCase):
"""LocationOtherLocationCategory unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLocationOtherLocationCategory(self):
"""Test LocationOtherLocationCategory"""
# FIXME: construct object with mandatory attributes with example values
# model = yapily.models.location_other_location_category.LocationOtherLocationCategory() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.97561
| 158
| 0.738028
|
4a05bbcfd6845171141d1925e2a86a96dcd0feb2
| 3,408
|
py
|
Python
|
python/curagridder/test_wst.py
|
astronomical-data-processing/curig
|
4d0e944b8c67e99106e56decda00c9c424002625
|
[
"MIT"
] | null | null | null |
python/curagridder/test_wst.py
|
astronomical-data-processing/curig
|
4d0e944b8c67e99106e56decda00c9c424002625
|
[
"MIT"
] | 1
|
2022-03-14T02:06:55.000Z
|
2022-03-14T08:50:30.000Z
|
python/curagridder/test_wst.py
|
astronomical-data-processing/curig
|
4d0e944b8c67e99106e56decda00c9c424002625
|
[
"MIT"
] | 1
|
2022-03-13T10:42:03.000Z
|
2022-03-13T10:42:03.000Z
|
from curig import ms2dirty, dirty2ms
import numpy as np
import time
import pytest
from numpy.testing import assert_, assert_allclose, assert_array_almost_equal
pmp = pytest.mark.parametrize
# some functions are refered to NIFTY
# test this content by - pytest python/curagridder/test_wst.py (test after curig installed)
def _l2error(a, b):
return np.sqrt(np.sum(np.abs(a-b)**2)/np.sum(np.abs(a)**2))
def explicit_gridder(uvw, freq, ms, nxdirty, nydirty, xpixsize, ypixsize):
speedoflight = 299792458.
x, y = np.meshgrid(*[-ss/2 + np.arange(ss) for ss in [nxdirty, nydirty]],
indexing='ij')
x *= xpixsize
y *= ypixsize
res = np.zeros((nxdirty, nydirty))
eps = x**2+y**2
nm1 = -eps/(np.sqrt(1.-eps)+1.)
n = nm1+1
for row in range(ms.shape[0]):
for chan in range(ms.shape[1]):
phase = (freq[chan]/speedoflight *
(x*uvw[row, 0] + y*uvw[row, 1] - uvw[row, 2]*nm1))
res += (ms[row, chan]*np.exp(2j*np.pi*phase)).real
return res/n
@pmp("nrow", (2, 27, 100))
@pmp("nchan", (1, ))
@pmp("nxdirty", (30, 128, 16))
@pmp("nydirty", (128, 250, 64))
@pmp("fov",(1, 10, 20))
@pmp("epsilon", (2e-1, 5e-3, 5e-5, 5e-7, 5e-12))
@pmp("use_wgt", (False,True))
def test_against_wdft(nrow, nchan, nxdirty, nydirty, fov, epsilon, use_wgt):
print("\n\nTesting imaging with {} rows and {} "
"frequency channels".format(nrow, nchan))
print("Dirty image has {}x{} pixels, "
"FOV={} degrees".format(nxdirty, nydirty, fov))
print("Requested accuracy: {}".format(epsilon))
xpixsize = fov*np.pi/180/nxdirty
ypixsize = fov*np.pi/180/nydirty
speedoflight = 299792458.
np.random.seed(42)
f0 = 1e9
freq = f0 + np.arange(nchan)*(f0/nchan)
uvw = (np.random.rand(nrow, 3)-0.5)/(f0/speedoflight)
ms = np.random.rand(nrow, nchan)-0.5 + 1j*(np.random.rand(nrow, nchan)-0.5)
dirty = np.random.rand(nxdirty, nydirty)-0.5
dirty2 = np.zeros((nxdirty,nydirty),dtype=np.float64)
wgt = np.random.rand(nrow, nchan) if use_wgt else None
print("begin")
start = time.time()
dirty2 = ms2dirty(uvw,freq, ms, wgt, nxdirty, nydirty, xpixsize, ypixsize, 0, 0, epsilon, True, 4)
end = time.time()
print("The elapsed time {} (sec)".format(end-start))
print("Execution finished")
ms2 = np.zeros((nrow,1),dtype=np.complex128)
ms2 = dirty2ms(uvw,freq, dirty, wgt, xpixsize, ypixsize, 0, 0, epsilon, True, 4)
# ms2 = np.reshape(ms2,[nrow,1])
print("\nadjointness testing....")
print(np.vdot(ms, ms2).real)
print(np.vdot(dirty2, dirty).real)
assert_allclose(np.vdot(ms, ms2).real, np.vdot(dirty2, dirty), rtol=1e-12)
if nrow<1e4:
print("Vertification begin")
truth = explicit_gridder(uvw, freq, ms, nxdirty, nydirty, xpixsize, ypixsize)
print("L2 error between explicit transform and CURIG:",
_l2error(truth, dirty2))
# the first test will execute 2 times to warp up the GPU
# for i in range(10):
# test_against_wdft(1000, 1, 512, 512, 2, 1e-12)
# test_against_wdft(1000, 1, 512, 512, 2, 1e-12)
# test_against_wdft(10000, 1, 512, 512, 60, 1e-12)
# test_against_wdft(10000, 1, 1024, 1024, 2, 1e-12)
# test_against_wdft(100000000, 1, 1024, 1024, 2, 1e-12)
# test_against_wdft(100000000, 1, 2048, 2048, 2, 1e-12)
# test_against_wdft(100000000, 1, 4096, 4096, 2, 1e-12)
| 34.77551
| 102
| 0.635563
|
4a05bc38e0d858c16220b68d3227a322e3bdafe2
| 1,125
|
py
|
Python
|
mayan/apps/ocr/tests/test_views.py
|
nadwiabd/insight_edms
|
90a09d7ca77cb111c791e307b55a603e82042dfe
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/ocr/tests/test_views.py
|
nadwiabd/insight_edms
|
90a09d7ca77cb111c791e307b55a603e82042dfe
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/ocr/tests/test_views.py
|
nadwiabd/insight_edms
|
90a09d7ca77cb111c791e307b55a603e82042dfe
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.test import override_settings
from documents.tests.test_views import GenericDocumentViewTestCase
from ..permissions import permission_ocr_content_view
@override_settings(OCR_AUTO_OCR=True)
class OCRViewsTestCase(GenericDocumentViewTestCase):
# PyOCR's leak descriptor in get_available_languages and image_to_string
# Disable descriptor leak test until fixed in upstream
_skip_file_descriptor_test = True
def setUp(self):
super(OCRViewsTestCase, self).setUp()
self.login_user()
def _document_content_view(self):
return self.get(
'ocr:document_content', args=(self.document.pk,)
)
def test_document_content_view_no_permissions(self):
response = self._document_content_view()
self.assertEqual(response.status_code, 403)
def test_document_content_view_with_permission(self):
self.grant(permission_ocr_content_view)
response = self._document_content_view()
self.assertContains(
response, 'Mayan EDMS Documentation', status_code=200
)
| 29.605263
| 76
| 0.743111
|
4a05be812218724a0c8721b52008a9c3af374a15
| 6,839
|
py
|
Python
|
app/main/session_interface.py
|
tassaron2/flask-template
|
a4705ee82556dba17afcbebf1dbd3d27e6c6b366
|
[
"MIT"
] | 1
|
2021-01-11T00:02:53.000Z
|
2021-01-11T00:02:53.000Z
|
app/main/session_interface.py
|
tassaron/flask-template
|
a4705ee82556dba17afcbebf1dbd3d27e6c6b366
|
[
"MIT"
] | null | null | null |
app/main/session_interface.py
|
tassaron/flask-template
|
a4705ee82556dba17afcbebf1dbd3d27e6c6b366
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from uuid import uuid4
import pickle
from flask import current_app
from flask.sessions import SessionInterface
from flask.sessions import SessionMixin
from itsdangerous import Signer, BadSignature, want_bytes
from werkzeug.datastructures import CallbackDict
class ServerSideSession(CallbackDict, SessionMixin):
"""
Actual session object returned by the session interface.
Original code from Flask-Session.
"""
def __init__(self, initial=None, sid=None, permanent=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
if permanent:
self.permanent = permanent
self.modified = False
class TassaronSessionInterface(SessionInterface):
"""
Server-side session interface adapted from Flask-Session
"""
serializer = pickle
def __init__(self, app, db):
"""
Original code from Flask-Session.
Adapted from a pull request: https://github.com/fengsp/flask-session/pull/12
Table schema modified by tassaron
user_id column is used to restore a user's server-side session upon login
"""
self.app = app
self.db = db
self.key_prefix = "session:"
self.permanent = True
table = "sessions"
if table not in self.db.metadata:
# ^ Only create Session Model if it doesn't already exist
# Fixes the SQLAlchemy "extend_existing must be true" exception during tests
class Session(self.db.Model):
__tablename__ = table
id = self.db.Column(self.db.Integer, primary_key=True)
session_id = self.db.Column(self.db.String(256), unique=True)
data = self.db.Column(self.db.Text)
expiry = self.db.Column(self.db.DateTime)
user_id = self.db.Column(
self.db.Integer, self.db.ForeignKey("user.id"), nullable=True
)
def __init__(self, session_id, data, expiry):
self.session_id = session_id
self.data = data
self.expiry = expiry
def __repr__(self):
return "<Session data %s>" % self.data
self.sql_session_model = db.session_ext_session_model = Session
else:
self.sql_session_model = db.session_ext_session_model
@staticmethod
def _generate_sid():
return uuid4().hex
@staticmethod
def _get_signer(app):
return Signer(app.secret_key, salt=f"{app.unique_name}_session", key_derivation="hmac")
def get_user_session(self, id):
"""Given a user id, return None or tuple of (session_id, unpickled session data)"""
session = self.sql_session_model.query.filter_by(user_id=id).first()
if session is not None:
return (
session.session_id[len(self.key_prefix) :],
self.serializer.loads(want_bytes(session.data)),
)
def set_user_session(self, sid, uid):
"""Find existing session and assign a user_id to it. Can also set to None"""
store_id = self.key_prefix + sid
existing_session = self.sql_session_model.query.filter_by(
session_id=store_id
).first()
if existing_session is None:
current_app.logger.error(f"The store_id {store_id} isn't valid")
elif (
existing_session.user_id is not None
and uid is not None
and existing_session.user_id != uid
):
current_app.logger.error(
"Session belongs to a different user. Shouldn't happen"
)
else:
existing_session.user_id = uid
self.db.session.add(existing_session)
self.db.session.commit()
def open_session(self, app, request):
"""Original code from Flask-Session. Modified by tassaron"""
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self._generate_sid()
return ServerSideSession(sid=sid, permanent=self.permanent)
signer = self._get_signer(app)
if signer is None:
return None
try:
sid_as_bytes = signer.unsign(sid)
sid = sid_as_bytes.decode()
except BadSignature:
sid = self._generate_sid()
return ServerSideSession(sid=sid, permanent=self.permanent)
store_id = self.key_prefix + sid
saved_session = self.sql_session_model.query.filter_by(
session_id=store_id
).first()
if saved_session and saved_session.expiry <= datetime.utcnow():
# Delete expired session
self.db.session.delete(saved_session)
self.db.session.commit()
saved_session = None
if saved_session:
try:
val = saved_session.data
data = self.serializer.loads(want_bytes(val))
return ServerSideSession(data, sid=sid)
except:
return ServerSideSession(sid=sid, permanent=self.permanent)
return ServerSideSession(sid=sid, permanent=self.permanent)
def save_session(self, app, session, response):
"""Original code from Flask-Session. Modified by tassaron"""
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
store_id = self.key_prefix + session.sid
saved_session = self.sql_session_model.query.filter_by(
session_id=store_id
).first()
if not session:
if session.modified:
if saved_session:
self.db.session.delete(saved_session)
self.db.session.commit()
response.delete_cookie(
app.session_cookie_name, domain=domain, path=path
)
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.serializer.dumps(dict(session))
if saved_session:
saved_session.data = val
saved_session.expiry = expires
self.db.session.commit()
else:
new_session = self.sql_session_model(store_id, val, expires)
self.db.session.add(new_session)
self.db.session.commit()
session_id = self._get_signer(app).sign(want_bytes(session.sid))
response.set_cookie(
app.session_cookie_name,
session_id,
expires=expires,
httponly=httponly,
domain=domain,
path=path,
secure=secure,
)
| 36.185185
| 95
| 0.605059
|
4a05bf277c760f6e9d45128503c9b9cc2540680c
| 451
|
py
|
Python
|
viewlet/api.py
|
andreif/django-viewlet
|
7931e899e064869a13fc6cd16ae7bb205b740605
|
[
"MIT"
] | null | null | null |
viewlet/api.py
|
andreif/django-viewlet
|
7931e899e064869a13fc6cd16ae7bb205b740605
|
[
"MIT"
] | null | null | null |
viewlet/api.py
|
andreif/django-viewlet
|
7931e899e064869a13fc6cd16ae7bb205b740605
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals
from viewlet.library import library
__all__ = ['viewlet', 'get', 'call', 'refresh']
# The decorator
viewlet = library._decorator
def get(viewlet_name):
return library.get(viewlet_name)
def call(viewlet_name, context, *args, **kwargs):
return get(viewlet_name).call(context or {}, *args, **kwargs)
def refresh(name, *args, **kwargs):
return get(name).refresh(*args, **kwargs)
| 20.5
| 65
| 0.711752
|
4a05c11e257e8caaf481886d9a62aed8d1c01d1c
| 1,311
|
py
|
Python
|
deepracer_follow_the_leader_ws/build/async_web_server_cpp/test/tests_.py
|
amitjain-3/working_add
|
ddd3b10d854477e86bf7a8558b3d447ec03a8a5f
|
[
"Apache-2.0"
] | 1
|
2022-03-11T20:15:27.000Z
|
2022-03-11T20:15:27.000Z
|
deepracer_follow_the_leader_ws/build/async_web_server_cpp/test/tests_.py
|
amitjain-3/working_add
|
ddd3b10d854477e86bf7a8558b3d447ec03a8a5f
|
[
"Apache-2.0"
] | null | null | null |
deepracer_follow_the_leader_ws/build/async_web_server_cpp/test/tests_.py
|
amitjain-3/working_add
|
ddd3b10d854477e86bf7a8558b3d447ec03a8a5f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
from launch import LaunchDescription
from launch import LaunchService
from launch.actions import ExecuteProcess
from launch_testing.legacy import LaunchTestService
def main(argv=sys.argv[1:]):
ld = LaunchDescription()
web_server_action = ExecuteProcess(
cmd=['/root/deepracer_ws/aws-deepracer-follow-the-leader-sample-project/deepracer_follow_the_leader_ws/build/async_web_server_cpp/test/test_web_server'])
ld.add_action(web_server_action)
test_directory = '/root/deepracer_ws/aws-deepracer-follow-the-leader-sample-project/deepracer_follow_the_leader_ws/async_web_server_cpp/test'
test1_action = ExecuteProcess(
cmd=[
sys.executable, '-u',
os.path.join(test_directory, 'simple_http_requests_test.py')],
name='simple_http_requests_test',
)
test2_action = ExecuteProcess(
cmd=[
sys.executable, '-u',
os.path.join(test_directory, 'websocket_test.py')],
name='websocket_test',
)
lts = LaunchTestService()
lts.add_test_action(ld, test1_action)
lts.add_test_action(ld, test2_action)
ls = LaunchService(argv=argv)
ls.include_launch_description(ld)
return lts.run(ls)
if __name__ == '__main__':
sys.exit(main())
| 27.893617
| 161
| 0.715484
|
4a05c1f6c2f5b1a7a3adbb89872e6719beebfd5a
| 5,348
|
py
|
Python
|
training/training/report/new_joinees_details/new_joinees_details.py
|
vhrspvl/Minda-Training
|
6d54d44b718506d6fe460abe5796bdee9e74d0ad
|
[
"MIT"
] | null | null | null |
training/training/report/new_joinees_details/new_joinees_details.py
|
vhrspvl/Minda-Training
|
6d54d44b718506d6fe460abe5796bdee9e74d0ad
|
[
"MIT"
] | null | null | null |
training/training/report/new_joinees_details/new_joinees_details.py
|
vhrspvl/Minda-Training
|
6d54d44b718506d6fe460abe5796bdee9e74d0ad
|
[
"MIT"
] | 1
|
2020-02-14T12:56:53.000Z
|
2020-02-14T12:56:53.000Z
|
# Copyright (c) 2013, Minda Sai Pvt LTd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from calendar import monthrange
from datetime import datetime,timedelta,date
from frappe.utils import add_days
from dateutil.rrule import *
def execute(filters=None):
if not filters:
filters = {}
columns = get_columns()
data = []
row = []
employee = employee_details(filters)
for e in employee:
row = [e.biometric_id,e.employee_name,e.date_of_joining]
att = att_details(e.biometric_id)
ita = ita_details(e.biometric_id)
itm = itm_details(e.biometric_id)
st = st_details(e.biometric_id)
cb = cb_details(e.biometric_id)
pk = pk_details(e.biometric_id)
if e.line == None:
row += ["Pending"]
else:
row += [e.line]
if e.shift not in ["A","B","C","G"]:
row += ["Pending"]
else:
row += [e.shift]
if att == 1:
row += ["Active"]
else:
row += ["Continuous Absent"]
if e.department not in ["HR", "Accounts","Finance & Accounts","Purchase"]:
if cb == 1:
row += ["Completed"]
else:
row += ["Pending"]
else:
row += ["NA"]
if st == 1:
row += ["Completed"]
else:
row += ["Pending"]
if e.department not in ["HR", "Accounts","Finance & Accounts","Purchase"]:
if pk == 1:
row += ["Completed"]
else:
row += ["Pending"]
else:
row += ["NA"]
if e.department not in ["HR", "Accounts","Finance & Accounts","Purchase"]:
if ita == 1:
row += ["Completed","NA"]
elif itm == 1:
row += ["NA","Completed"]
else:
row += ["Pending","Pending"]
else:
row += ["NA"]
data.append(row)
return columns, data
def get_columns():
columns = [
_("Employee") + ":Link/Employee:50",
_("Employee Name") + ":Data:100",
_("Date of Joining") + ":Date:100",
_("Line") + ":Link/Line:100",
_("Shift") + ":Link/Shift:100",
_("Status") + ":Select:100",
_("Color Blindness Test") + ":Link/Color Blindness Test:100",
_("Selection Test") + ":Link/Selection Test:100",
_("Practical Knowledge Verification") + ":Link/New Joinees Practical Knowledge Verification:100",
_("Induction Test Assy") + ":Link/Induction Training Assembly Area:100",
_("Induction Test Machine") + ":Link/Induction Training Machine Area Crimping:100",
]
return columns
# def get_conditions(filters):
# conditions = ""
# # if filters.get("employee"):conditions += "AND att.employee = '%s'" % filters["employee"]
# if filters.get("from_date"): conditions += "and c.date_of_skill_evaluatation >= %(from_date)s"
# if filters.get("to_date"): conditions += " and c.date_of_skill_evaluatation <= %(to_date)s"
# return conditions, filters
def employee_details(filters):
employee = frappe.db.sql(
"""select biometric_id,employee_name,shift,status,department,designation,date_of_joining,line from `tabEmployee` where date_of_joining between %s and %s and status = 'Active' """,(filters.get("date_of_joining_from"),filters.get("date_of_joining_to")),as_dict = 1)
return employee
def ita_details(emp_id):
if emp_id:
ita = frappe.db.sql(
"""select * from `tabInduction Training Assembly Area` where employee_code=%s """,(emp_id),as_dict = 1)
if ita:
return True
else:
return False
def itm_details(emp_id):
if emp_id:
ita = frappe.db.sql(
"""select * from `tabInduction Training Machine Area Crimping` where employee_code=%s """,(emp_id),as_dict = 1)
if ita:
return True
else:
return False
def att_details(emp_id):
today = date.today()
yesterday = add_days(today,-1)
day_before_yesterday = add_days(today,-2)
if emp_id:
att = frappe.db.sql(
"""select attendance_date from `tabAttendance` where employee = %s and attendance_date in (%s,%s,%s) """,(emp_id,today,yesterday,day_before_yesterday),as_dict = 1)
if len(att) > 0:
return True
else:
return False
def st_details(emp_id):
if emp_id:
st = frappe.db.sql(
"""select * from `tabSelection Test` where employee_code=%s """,(emp_id),as_dict = 1)
if st:
return True
else:
return False
def cb_details(emp_id):
if emp_id:
cb = frappe.db.sql(
"""select * from `tabColor Blindness Test` where employee_code=%s """,(emp_id),as_dict = 1)
if cb:
return True
else:
return False
def pk_details(emp_id):
if emp_id:
pk = frappe.db.sql(
"""select * from `tabNew Joinees Practical Knowledge Verification` where employee_code=%s """,(emp_id),as_dict = 1)
if pk:
return True
else:
return False
| 33.217391
| 271
| 0.56077
|
4a05c217de9e9ff00299566bcc0044dc7da7c0ba
| 16,348
|
py
|
Python
|
talent/google/cloud/talent_v4beta1/proto/company_pb2.py
|
TheNeuralBit/google-cloud-python
|
226cdf12f5dd69afb0ef665bb9e897d32d56f4b6
|
[
"Apache-2.0"
] | null | null | null |
talent/google/cloud/talent_v4beta1/proto/company_pb2.py
|
TheNeuralBit/google-cloud-python
|
226cdf12f5dd69afb0ef665bb9e897d32d56f4b6
|
[
"Apache-2.0"
] | null | null | null |
talent/google/cloud/talent_v4beta1/proto/company_pb2.py
|
TheNeuralBit/google-cloud-python
|
226cdf12f5dd69afb0ef665bb9e897d32d56f4b6
|
[
"Apache-2.0"
] | 1
|
2021-07-21T17:59:33.000Z
|
2021-07-21T17:59:33.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/talent_v4beta1/proto/company.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.talent_v4beta1.proto import (
common_pb2 as google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_common__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/talent_v4beta1/proto/company.proto",
package="google.cloud.talent.v4beta1",
syntax="proto3",
serialized_options=_b(
"\n\037com.google.cloud.talent.v4beta1B\024CompanyResourceProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/talent/v4beta1;talent\242\002\003CTS"
),
serialized_pb=_b(
'\n/google/cloud/talent_v4beta1/proto/company.proto\x12\x1bgoogle.cloud.talent.v4beta1\x1a\x1cgoogle/api/annotations.proto\x1a.google/cloud/talent_v4beta1/proto/common.proto"\xe4\x03\n\x07\x43ompany\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x65xternal_id\x18\x03 \x01(\t\x12\x36\n\x04size\x18\x04 \x01(\x0e\x32(.google.cloud.talent.v4beta1.CompanySize\x12\x1c\n\x14headquarters_address\x18\x05 \x01(\t\x12\x15\n\rhiring_agency\x18\x06 \x01(\x08\x12\x10\n\x08\x65\x65o_text\x18\x07 \x01(\t\x12\x13\n\x0bwebsite_uri\x18\x08 \x01(\t\x12\x17\n\x0f\x63\x61reer_site_uri\x18\t \x01(\t\x12\x11\n\timage_uri\x18\n \x01(\t\x12\x30\n(keyword_searchable_job_custom_attributes\x18\x0b \x03(\t\x12\x46\n\x0c\x64\x65rived_info\x18\x0c \x01(\x0b\x32\x30.google.cloud.talent.v4beta1.Company.DerivedInfo\x12\x11\n\tsuspended\x18\r \x01(\x08\x1aS\n\x0b\x44\x65rivedInfo\x12\x44\n\x15headquarters_location\x18\x01 \x01(\x0b\x32%.google.cloud.talent.v4beta1.LocationB\x82\x01\n\x1f\x63om.google.cloud.talent.v4beta1B\x14\x43ompanyResourceProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/talent/v4beta1;talent\xa2\x02\x03\x43TSb\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_common__pb2.DESCRIPTOR,
],
)
_COMPANY_DERIVEDINFO = _descriptor.Descriptor(
name="DerivedInfo",
full_name="google.cloud.talent.v4beta1.Company.DerivedInfo",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="headquarters_location",
full_name="google.cloud.talent.v4beta1.Company.DerivedInfo.headquarters_location",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=560,
serialized_end=643,
)
_COMPANY = _descriptor.Descriptor(
name="Company",
full_name="google.cloud.talent.v4beta1.Company",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.talent.v4beta1.Company.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.cloud.talent.v4beta1.Company.display_name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="external_id",
full_name="google.cloud.talent.v4beta1.Company.external_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="size",
full_name="google.cloud.talent.v4beta1.Company.size",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="headquarters_address",
full_name="google.cloud.talent.v4beta1.Company.headquarters_address",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="hiring_agency",
full_name="google.cloud.talent.v4beta1.Company.hiring_agency",
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="eeo_text",
full_name="google.cloud.talent.v4beta1.Company.eeo_text",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="website_uri",
full_name="google.cloud.talent.v4beta1.Company.website_uri",
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="career_site_uri",
full_name="google.cloud.talent.v4beta1.Company.career_site_uri",
index=8,
number=9,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="image_uri",
full_name="google.cloud.talent.v4beta1.Company.image_uri",
index=9,
number=10,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="keyword_searchable_job_custom_attributes",
full_name="google.cloud.talent.v4beta1.Company.keyword_searchable_job_custom_attributes",
index=10,
number=11,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="derived_info",
full_name="google.cloud.talent.v4beta1.Company.derived_info",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="suspended",
full_name="google.cloud.talent.v4beta1.Company.suspended",
index=12,
number=13,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_COMPANY_DERIVEDINFO],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=159,
serialized_end=643,
)
_COMPANY_DERIVEDINFO.fields_by_name[
"headquarters_location"
].message_type = (
google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_common__pb2._LOCATION
)
_COMPANY_DERIVEDINFO.containing_type = _COMPANY
_COMPANY.fields_by_name[
"size"
].enum_type = (
google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_common__pb2._COMPANYSIZE
)
_COMPANY.fields_by_name["derived_info"].message_type = _COMPANY_DERIVEDINFO
DESCRIPTOR.message_types_by_name["Company"] = _COMPANY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Company = _reflection.GeneratedProtocolMessageType(
"Company",
(_message.Message,),
dict(
DerivedInfo=_reflection.GeneratedProtocolMessageType(
"DerivedInfo",
(_message.Message,),
dict(
DESCRIPTOR=_COMPANY_DERIVEDINFO,
__module__="google.cloud.talent_v4beta1.proto.company_pb2",
__doc__="""Derived details about the company.
Attributes:
headquarters_location:
A structured headquarters location of the company, resolved
from [Company.headquarters\_address][google.cloud.talent.v4bet
a1.Company.headquarters\_address] if provided.
""",
# @@protoc_insertion_point(class_scope:google.cloud.talent.v4beta1.Company.DerivedInfo)
),
),
DESCRIPTOR=_COMPANY,
__module__="google.cloud.talent_v4beta1.proto.company_pb2",
__doc__="""A Company resource represents a company in the service. A company is the
entity that owns job postings, that is, the hiring entity responsible
for employing applicants for the job position.
Attributes:
name:
Required during company update. The resource name for a
company. This is generated by the service when a company is
created. The format is "projects/{project\_id}/tenants/{tenan
t\_id}/companies/{company\_id}", for example, "projects/api-
test-project/tenants/foo/companies/bar". Tenant id is
optional and the default tenant is used if unspecified, for
example, "projects/api-test-project/companies/bar".
display_name:
Required. The display name of the company, for example,
"Google, LLC".
external_id:
Required. Client side company identifier, used to uniquely
identify the company. The maximum number of allowed
characters is 255.
size:
Optional. The employer's company size.
headquarters_address:
Optional. The street address of the company's main
headquarters, which may be different from the job location.
The service attempts to geolocate the provided address, and
populates a more specific location wherever possible in [Deriv
edInfo.headquarters\_location][google.cloud.talent.v4beta1.Com
pany.DerivedInfo.headquarters\_location].
hiring_agency:
Optional. Set to true if it is the hiring agency that post
jobs for other employers. Defaults to false if not provided.
eeo_text:
Optional. Equal Employment Opportunity legal disclaimer text
to be associated with all jobs, and typically to be displayed
in all roles. The maximum number of allowed characters is
500.
website_uri:
Optional. The URI representing the company's primary web site
or home page, for example, "https://www.google.com". The
maximum number of allowed characters is 255.
career_site_uri:
Optional. The URI to employer's career site or careers page
on the employer's web site, for example,
"https://careers.google.com".
image_uri:
Optional. A URI that hosts the employer's company logo.
keyword_searchable_job_custom_attributes:
Optional. A list of keys of filterable [Job.custom\_attribute
s][google.cloud.talent.v4beta1.Job.custom\_attributes], whose
corresponding ``string_values`` are used in keyword searches.
Jobs with ``string_values`` under these specified field keys
are returned if any of the values match the search keyword.
Custom field values with parenthesis, brackets and special
symbols are not searchable as-is, and those keyword queries
must be surrounded by quotes.
derived_info:
Output only. Derived details about the company.
suspended:
Output only. Indicates whether a company is flagged to be
suspended from public availability by the service when job
content appears suspicious, abusive, or spammy.
""",
# @@protoc_insertion_point(class_scope:google.cloud.talent.v4beta1.Company)
),
)
_sym_db.RegisterMessage(Company)
_sym_db.RegisterMessage(Company.DerivedInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.737079
| 1,184
| 0.61769
|
4a05c2a1723c65317a1dac8d210ce79285780f1e
| 413
|
py
|
Python
|
old/park_prise.py
|
archu2020/python-2
|
19c626ca9fd37168db8a7ac075fd80c8e2971313
|
[
"Apache-2.0"
] | 48
|
2017-12-24T12:19:55.000Z
|
2022-02-26T13:14:27.000Z
|
old/park_prise.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 3
|
2018-12-05T08:48:14.000Z
|
2020-07-29T01:56:16.000Z
|
old/park_prise.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 113
|
2017-08-09T03:10:04.000Z
|
2022-03-26T16:05:01.000Z
|
class Park:
def __init__(self,weekdays,adult,kid):
self.weekdays = int(weekdays)
self.adult = float(adult)
self.kid = float(kid)
def weekday(self):
self.weekday = (self.adult + self.kid * 0.5) * self.weekdays
return self.weekday
def weekend(self):
self.weekend = (self.adult+ self.kid * 0.5) * self.weekdays * 1.2
return self.weekend
| 34.416667
| 74
| 0.588378
|
4a05c2adc90d8d28c4aa207d668a4de564199d60
| 135
|
py
|
Python
|
app.py
|
mboudet/askosite
|
69c19bc5d1b494339fb61fc1133b9206ac2a4db5
|
[
"MIT"
] | null | null | null |
app.py
|
mboudet/askosite
|
69c19bc5d1b494339fb61fc1133b9206ac2a4db5
|
[
"MIT"
] | null | null | null |
app.py
|
mboudet/askosite
|
69c19bc5d1b494339fb61fc1133b9206ac2a4db5
|
[
"MIT"
] | null | null | null |
from askosite.app import create_app
application = create_app(config='../local.cfg')
if __name__ == '__main__':
application.run()
| 19.285714
| 47
| 0.725926
|
4a05c4305e66f61dd057d2ee15ee3993e5ff4a15
| 210
|
py
|
Python
|
abuse_whois/schemas/api_model.py
|
t4d/abuse_whois
|
32c897da124f6404199484a7395c0f96a7a344f8
|
[
"MIT"
] | 28
|
2018-11-24T09:00:04.000Z
|
2022-02-17T01:31:40.000Z
|
abuse_whois/schemas/api_model.py
|
t4d/abuse_whois
|
32c897da124f6404199484a7395c0f96a7a344f8
|
[
"MIT"
] | 5
|
2018-11-24T04:41:09.000Z
|
2021-10-31T23:36:35.000Z
|
abuse_whois/schemas/api_model.py
|
t4d/abuse_whois
|
32c897da124f6404199484a7395c0f96a7a344f8
|
[
"MIT"
] | 7
|
2019-06-07T16:26:29.000Z
|
2021-11-15T19:38:26.000Z
|
from humps import camelize
from pydantic import BaseModel
class APIModel(BaseModel):
class Config:
orm_mode = True
alias_generator = camelize
allow_population_by_field_name = True
| 21
| 45
| 0.72381
|
4a05c6d24e48834b5ef82ccbb5bde0cd8989e08c
| 2,381
|
py
|
Python
|
text_processing/tf_idf_cosine_similarity.py
|
pri1311/PodMatcher
|
a9f614152e57f97885804d1762eac5f65025ed5c
|
[
"MIT"
] | 4
|
2022-02-05T14:11:57.000Z
|
2022-02-06T09:54:42.000Z
|
text_processing/tf_idf_cosine_similarity.py
|
pri1311/PodMatcher
|
a9f614152e57f97885804d1762eac5f65025ed5c
|
[
"MIT"
] | null | null | null |
text_processing/tf_idf_cosine_similarity.py
|
pri1311/PodMatcher
|
a9f614152e57f97885804d1762eac5f65025ed5c
|
[
"MIT"
] | 2
|
2022-02-05T15:22:09.000Z
|
2022-02-06T22:14:08.000Z
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords as stp
from preprocessing import tf_idf_lemmetizer as tf_idf_lemma
def get_tf_idf_cosine_similarity(compare_doc,doc_corpus):
# lemmatizer = WordNetLemmatizer()
# analyzer = TfidfVectorizer().build_analyzer()
#
# def stemmed_words(doc):
# return (lemmatizer.lemmatize(w) for w in analyzer(doc) if w not in set(stp.words('english')))
tf_idf_vect = TfidfVectorizer(analyzer=tf_idf_lemma.stemmed_words)
#tf_idf_vect = TfidfVectorizer(stop_words='english')
tf_idf_req_vector = tf_idf_vect.fit_transform([compare_doc]).todense()
#tf_idf_req_vector = tf_idf_vect.fit_transform(doc_corpus).todense()
#print('Features are:', len(tf_idf_vect.get_feature_names()))
#print(tf_idf_vect.get_feature_names())
tf_idf_resume_vector = tf_idf_vect.transform(doc_corpus).todense()
#tf_idf_resume_vector = tf_idf_vect.transform([compare_doc]).todense()
cosine_similarity_list = []
for i in range(len(tf_idf_resume_vector)):
cosine_similarity_list.append(cosine_similarity(tf_idf_req_vector,tf_idf_resume_vector[i])[0][0])
# for i in range(len(tf_idf_req_vector)):
# cosine_similarity_list.append(cosine_similarity(tf_idf_resume_vector,tf_idf_req_vector[i])[0][0])
return cosine_similarity_list
def get_tf_cosine_similarity(compare_doc,doc_corpus):
# lemmatizer = WordNetLemmatizer()
# analyzer = TfidfVectorizer().build_analyzer()
#
# def stemmed_words(doc):
# return (lemmatizer.lemmatize(w) for w in analyzer(doc) if w not in set(stp.words('english')))
tf_idf_vect = TfidfVectorizer(use_idf=False, analyzer=tf_idf_lemma.stemmed_words)
#tf_idf_vect = TfidfVectorizer(stop_words='english',use_idf=False)
tf_idf_req_vector = tf_idf_vect.fit_transform([compare_doc]).todense()
#print('Features are:', len(tf_idf_vect.get_feature_names()))
#print(tf_idf_vect.get_feature_names())
tf_idf_resume_vector = tf_idf_vect.transform(doc_corpus).todense()
cosine_similarity_list = []
for i in range(len(tf_idf_resume_vector)):
cosine_similarity_list.append(cosine_similarity(tf_idf_req_vector,tf_idf_resume_vector[i])[0][0])
return cosine_similarity_list
| 47.62
| 105
| 0.767325
|
4a05c711df01d45cac268700058ac09a160b2aa2
| 24,258
|
py
|
Python
|
salt/modules/dockercompose.py
|
tschmittni/salt
|
ccfcd5ed1272576799797ec7f259b676fd130585
|
[
"Apache-2.0"
] | 2
|
2018-11-08T02:59:24.000Z
|
2021-01-04T00:30:50.000Z
|
salt/modules/dockercompose.py
|
The-Loeki/salt
|
8ff8212cc1eacfe409eb9cc017b21250f28dd305
|
[
"Apache-2.0"
] | 4
|
2020-09-04T10:19:34.000Z
|
2020-11-09T12:55:59.000Z
|
salt/modules/dockercompose.py
|
The-Loeki/salt
|
8ff8212cc1eacfe409eb9cc017b21250f28dd305
|
[
"Apache-2.0"
] | 5
|
2017-06-16T23:48:13.000Z
|
2021-04-08T17:43:48.000Z
|
# -*- coding: utf-8 -*-
'''
Module to import docker-compose via saltstack
.. versionadded:: 2016.3.0
:maintainer: Jean Praloran <jeanpralo@gmail.com>
:maturity: new
:depends: docker-compose>=1.5
:platform: all
Introduction
------------
This module allows one to deal with docker-compose file in a directory.
This is a first version only, the following commands are missing at the moment
but will be built later on if the community is interested in this module:
- run
- logs
- port
- scale
Installation Prerequisites
--------------------------
This execution module requires at least version 1.4.0 of both docker-compose_ and
Docker_. docker-compose can easily be installed using :py:func:`pip.install
<salt.modules.pip.install>`:
.. code-block:: bash
salt myminion pip.install docker-compose>=1.5.0
.. _docker-compose: https://pypi.python.org/pypi/docker-compose
.. _Docker: https://www.docker.com/
How to use this module?
-----------------------
In order to use the module if you have no docker-compose file on the server you
can issue the command create, it takes two arguments the path where the
docker-compose.yml will be stored and the content of this latter:
.. code-block:: text
# salt-call -l debug dockercompose.create /tmp/toto '
database:
image: mongo:3.0
command: mongod --smallfiles --quiet --logpath=/dev/null
'
Then you can execute a list of method defined at the bottom with at least one
argument (the path where the docker-compose.yml will be read) and an optional
python list which corresponds to the services names:
.. code-block:: bash
# salt-call -l debug dockercompose.up /tmp/toto
# salt-call -l debug dockercompose.restart /tmp/toto '[database]'
# salt-call -l debug dockercompose.stop /tmp/toto
# salt-call -l debug dockercompose.rm /tmp/toto
Docker-compose method supported
-------------------------------
- up
- restart
- stop
- start
- pause
- unpause
- kill
- rm
- ps
- pull
- build
Functions
---------
- docker-compose.yml management
- :py:func:`dockercompose.create <salt.modules.dockercompose.create>`
- :py:func:`dockercompose.get <salt.modules.dockercompose.get>`
- Manage containers
- :py:func:`dockercompose.restart <salt.modules.dockercompose.restart>`
- :py:func:`dockercompose.stop <salt.modules.dockercompose.stop>`
- :py:func:`dockercompose.pause <salt.modules.dockercompose.pause>`
- :py:func:`dockercompose.unpause <salt.modules.dockercompose.unpause>`
- :py:func:`dockercompose.start <salt.modules.dockercompose.start>`
- :py:func:`dockercompose.kill <salt.modules.dockercompose.kill>`
- :py:func:`dockercompose.rm <salt.modules.dockercompose.rm>`
- :py:func:`dockercompose.up <salt.modules.dockercompose.up>`
- Manage containers image:
- :py:func:`dockercompose.pull <salt.modules.dockercompose.pull>`
- :py:func:`dockercompose.build <salt.modules.dockercompose.build>`
- Gather information about containers:
- :py:func:`dockercompose.ps <salt.modules.dockercompose.ps>`
Detailed Function Documentation
-------------------------------
'''
from __future__ import absolute_import, print_function, unicode_literals
import inspect
import logging
import os
import re
import salt.utils.files
import salt.utils.stringutils
from salt.ext import six
from operator import attrgetter
try:
import compose
from compose.cli.command import get_project
from compose.service import ConvergenceStrategy
HAS_DOCKERCOMPOSE = True
except ImportError:
HAS_DOCKERCOMPOSE = False
try:
from compose.project import OneOffFilter
USE_FILTERCLASS = True
except ImportError:
USE_FILTERCLASS = False
MIN_DOCKERCOMPOSE = (1, 5, 0)
VERSION_RE = r'([\d.]+)'
log = logging.getLogger(__name__)
debug = False
__virtualname__ = 'dockercompose'
DEFAULT_DC_FILENAMES = ('docker-compose.yml', 'docker-compose.yaml')
def __virtual__():
if HAS_DOCKERCOMPOSE:
match = re.match(VERSION_RE, six.text_type(compose.__version__))
if match:
version = tuple([int(x) for x in match.group(1).split('.')])
if version >= MIN_DOCKERCOMPOSE:
return __virtualname__
return (False, 'The dockercompose execution module not loaded: '
'compose python library not available.')
def __standardize_result(status, message, data=None, debug_msg=None):
'''
Standardizes all responses
:param status:
:param message:
:param data:
:param debug_msg:
:return:
'''
result = {
'status': status,
'message': message
}
if data is not None:
result['return'] = data
if debug_msg is not None and debug:
result['debug'] = debug_msg
return result
def __get_docker_file_path(path):
'''
Determines the filepath to use
:param path:
:return:
'''
if os.path.isfile(path):
return path
for dc_filename in DEFAULT_DC_FILENAMES:
file_path = os.path.join(path, dc_filename)
if os.path.isfile(file_path):
return file_path
# implicitly return None
def __read_docker_compose_file(file_path):
'''
Read the compose file if it exists in the directory
:param file_path:
:return:
'''
if not os.path.isfile(file_path):
return __standardize_result(False,
'Path {} is not present'.format(file_path),
None, None)
try:
with salt.utils.files.fopen(file_path, 'r') as fl:
file_name = os.path.basename(file_path)
result = {file_name: ''}
for line in fl:
result[file_name] += salt.utils.stringutils.to_unicode(line)
except EnvironmentError:
return __standardize_result(False,
'Could not read {0}'.format(file_path),
None, None)
return __standardize_result(True,
'Reading content of {0}'.format(file_path),
result, None)
def __write_docker_compose(path, docker_compose):
'''
Write docker-compose to a temp directory
in order to use it with docker-compose ( config check )
:param path:
docker_compose
contains the docker-compose file
:return:
'''
if path.lower().endswith(('.yml', '.yaml')):
file_path = path
dir_name = os.path.dirname(path)
else:
dir_name = path
file_path = os.path.join(dir_name, DEFAULT_DC_FILENAMES[0])
if os.path.isdir(dir_name) is False:
os.mkdir(dir_name)
try:
with salt.utils.files.fopen(file_path, 'w') as fl:
fl.write(salt.utils.stringutils.to_str(docker_compose))
except EnvironmentError:
return __standardize_result(False,
'Could not write {0}'.format(file_path),
None, None)
project = __load_project_from_file_path(file_path)
if isinstance(project, dict):
os.remove(file_path)
return project
return file_path
def __load_project(path):
'''
Load a docker-compose project from path
:param path:
:return:
'''
file_path = __get_docker_file_path(path)
if file_path is None:
msg = 'Could not find docker-compose file at {0}'.format(path)
return __standardize_result(False,
msg,
None, None)
return __load_project_from_file_path(file_path)
def __load_project_from_file_path(file_path):
'''
Load a docker-compose project from file path
:param path:
:return:
'''
try:
project = get_project(project_dir=os.path.dirname(file_path),
config_path=[os.path.basename(file_path)])
except Exception as inst:
return __handle_except(inst)
return project
def __handle_except(inst):
'''
Handle exception and return a standard result
:param inst:
:return:
'''
return __standardize_result(False,
'Docker-compose command {0} failed'.
format(inspect.stack()[1][3]),
'{0}'.format(inst), None)
def _get_convergence_plans(project, service_names):
'''
Get action executed for each container
:param project:
:param service_names:
:return:
'''
ret = {}
plans = project._get_convergence_plans(project.get_services(service_names),
ConvergenceStrategy.changed)
for cont in plans:
(action, container) = plans[cont]
if action == 'create':
ret[cont] = 'Creating container'
elif action == 'recreate':
ret[cont] = 'Re-creating container'
elif action == 'start':
ret[cont] = 'Starting container'
elif action == 'noop':
ret[cont] = 'Container is up to date'
return ret
def get(path):
'''
Get the content of the docker-compose file into a directory
path
Path where the docker-compose file is stored on the server
CLI Example:
.. code-block:: bash
salt myminion dockercompose.get /path/where/docker-compose/stored
'''
file_path = __get_docker_file_path(path)
if file_path is None:
return __standardize_result(False,
'Path {} is not present'.format(path),
None, None)
salt_result = __read_docker_compose_file(file_path)
if not salt_result['status']:
return salt_result
project = __load_project(path)
if isinstance(project, dict):
salt_result['return']['valid'] = False
else:
salt_result['return']['valid'] = True
return salt_result
def create(path, docker_compose):
'''
Create and validate a docker-compose file into a directory
path
Path where the docker-compose file will be stored on the server
docker_compose
docker_compose file
CLI Example:
.. code-block:: bash
salt myminion dockercompose.create /path/where/docker-compose/stored content
'''
if docker_compose:
ret = __write_docker_compose(path, docker_compose)
if isinstance(ret, dict):
return ret
else:
return __standardize_result(False,
'Creating a docker-compose project failed, you must send a valid docker-compose file',
None, None)
return __standardize_result(True,
'Successfully created the docker-compose file',
{'compose.base_dir': path},
None)
def pull(path, service_names=None):
'''
Pull image for containers in the docker-compose file, service_names is a
python list, if omitted pull all images
path
Path where the docker-compose file is stored on the server
service_names
If specified will pull only the image for the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.pull /path/where/docker-compose/stored
salt myminion dockercompose.pull /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
project.pull(service_names)
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Pulling containers images via docker-compose succeeded',
None, None)
def build(path, service_names=None):
'''
Build image for containers in the docker-compose file, service_names is a
python list, if omitted build images for all containers. Please note
that at the moment the module does not allow you to upload your Dockerfile,
nor any other file you could need with your docker-compose.yml, you will
have to make sure the files you need are actually in the directory specified
in the `build` keyword
path
Path where the docker-compose file is stored on the server
service_names
If specified will pull only the image for the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.build /path/where/docker-compose/stored
salt myminion dockercompose.build /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
project.build(service_names)
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Building containers images via docker-compose succeeded',
None, None)
def restart(path, service_names=None):
'''
Restart container(s) in the docker-compose file, service_names is a python
list, if omitted restart all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will restart only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.restart /path/where/docker-compose/stored
salt myminion dockercompose.restart /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
debug_ret = {}
result = {}
if isinstance(project, dict):
return project
else:
try:
project.restart(service_names)
if debug:
for container in project.containers():
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
result[container.get('Name')] = 'restarted'
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Restarting containers via docker-compose', result, debug_ret)
def stop(path, service_names=None):
'''
Stop running containers in the docker-compose file, service_names is a python
list, if omitted stop all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will stop only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.stop /path/where/docker-compose/stored
salt myminion dockercompose.stop /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
debug_ret = {}
result = {}
if isinstance(project, dict):
return project
else:
try:
project.stop(service_names)
if debug:
for container in project.containers(stopped=True):
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
result[container.get('Name')] = 'stopped'
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Stopping containers via docker-compose', result, debug_ret)
def pause(path, service_names=None):
'''
Pause running containers in the docker-compose file, service_names is a python
list, if omitted pause all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will pause only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.pause /path/where/docker-compose/stored
salt myminion dockercompose.pause /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
debug_ret = {}
result = {}
if isinstance(project, dict):
return project
else:
try:
project.pause(service_names)
if debug:
for container in project.containers():
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
result[container.get('Name')] = 'paused'
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Pausing containers via docker-compose', result, debug_ret)
def unpause(path, service_names=None):
'''
Un-Pause containers in the docker-compose file, service_names is a python
list, if omitted unpause all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will un-pause only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.pause /path/where/docker-compose/stored
salt myminion dockercompose.pause /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
debug_ret = {}
result = {}
if isinstance(project, dict):
return project
else:
try:
project.unpause(service_names)
if debug:
for container in project.containers():
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
result[container.get('Name')] = 'unpaused'
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Un-Pausing containers via docker-compose', result, debug_ret)
def start(path, service_names=None):
'''
Start containers in the docker-compose file, service_names is a python
list, if omitted start all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will start only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.start /path/where/docker-compose/stored
salt myminion dockercompose.start /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
debug_ret = {}
result = {}
if isinstance(project, dict):
return project
else:
try:
project.start(service_names)
if debug:
for container in project.containers():
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
result[container.get('Name')] = 'started'
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Starting containers via docker-compose', result, debug_ret)
def kill(path, service_names=None):
'''
Kill containers in the docker-compose file, service_names is a python
list, if omitted kill all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will kill only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.kill /path/where/docker-compose/stored
salt myminion dockercompose.kill /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
debug_ret = {}
result = {}
if isinstance(project, dict):
return project
else:
try:
project.kill(service_names)
if debug:
for container in project.containers(stopped=True):
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
result[container.get('Name')] = 'killed'
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Killing containers via docker-compose', result, debug_ret)
def rm(path, service_names=None):
'''
Remove stopped containers in the docker-compose file, service_names is a python
list, if omitted remove all stopped containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will remove only the specified stopped services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.rm /path/where/docker-compose/stored
salt myminion dockercompose.rm /path/where/docker-compose/stored '[janus]'
'''
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
project.remove_stopped(service_names)
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Removing stopped containers via docker-compose', None, None)
def ps(path):
'''
List all running containers and report some information about them
path
Path where the docker-compose file is stored on the server
CLI Example:
.. code-block:: bash
salt myminion dockercompose.ps /path/where/docker-compose/stored
'''
project = __load_project(path)
result = {}
if isinstance(project, dict):
return project
else:
if USE_FILTERCLASS:
containers = sorted(
project.containers(None, stopped=True) +
project.containers(None, OneOffFilter.only),
key=attrgetter('name'))
else:
containers = sorted(
project.containers(None, stopped=True) +
project.containers(None, one_off=True),
key=attrgetter('name'))
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '{0} ...'.format(command[:26])
result[container.name] = {
'id': container.id,
'name': container.name,
'command': command,
'state': container.human_readable_state,
'ports': container.human_readable_ports,
}
return __standardize_result(True, 'Listing docker-compose containers', result, None)
def up(path, service_names=None):
'''
Create and start containers defined in the docker-compose.yml file
located in path, service_names is a python list, if omitted create and
start all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will create and start only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.up /path/where/docker-compose/stored
salt myminion dockercompose.up /path/where/docker-compose/stored '[janus]'
'''
debug_ret = {}
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
result = _get_convergence_plans(project, service_names)
ret = project.up(service_names)
if debug:
for container in ret:
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Adding containers via docker-compose', result, debug_ret)
| 31.42228
| 122
| 0.632286
|
4a05c7f72f2f5e03db78c75818f0e881e812c6b5
| 9,908
|
py
|
Python
|
sovrin_client/client/wallet/link.py
|
TechWritingWhiz/sovrin-client
|
b5633dd7767b4aaf08f622181f3937a104b290fb
|
[
"Apache-2.0"
] | 13
|
2017-02-16T11:45:50.000Z
|
2017-06-13T20:07:51.000Z
|
sovrin_client/client/wallet/link.py
|
TechWritingWhiz/sovrin-client
|
b5633dd7767b4aaf08f622181f3937a104b290fb
|
[
"Apache-2.0"
] | 20
|
2017-06-29T17:59:57.000Z
|
2017-07-13T23:20:44.000Z
|
sovrin_client/client/wallet/link.py
|
TechWritingWhiz/sovrin-client
|
b5633dd7767b4aaf08f622181f3937a104b290fb
|
[
"Apache-2.0"
] | 57
|
2017-01-21T22:29:27.000Z
|
2017-06-29T10:24:19.000Z
|
from typing import List
from plenum.common.constants import NAME, NONCE
from plenum.common.signer_did import DidIdentity
from plenum.common.types import f
from plenum.common.util import prettyDateDifference, friendlyToRaw
from plenum.common.verifier import DidVerifier
from sovrin_client.client.wallet.types import AvailableClaim
from sovrin_common.exceptions import InvalidLinkException, \
RemoteEndpointNotFound, NotFound
class constant:
TRUST_ANCHOR = "Trust Anchor"
SIGNER_IDENTIFIER = "Identifier"
SIGNER_VER_KEY = "Verification Key"
SIGNER_VER_KEY_EMPTY = '<empty>'
REMOTE_IDENTIFIER = "Remote"
REMOTE_VER_KEY = "Remote Verification Key"
REMOTE_VER_KEY_SAME_AS_ID = '<same as Remote>'
REMOTE_END_POINT = "Remote endpoint"
SIGNATURE = "Signature"
CLAIM_REQUESTS = "Claim Requests"
AVAILABLE_CLAIMS = "Available Claims"
RECEIVED_CLAIMS = "Received Claims"
LINK_NONCE = "Nonce"
LINK_STATUS = "Invitation status"
LINK_LAST_SYNCED = "Last Synced"
LINK_LAST_SEQ_NO = "Last Sync no"
LINK_STATUS_ACCEPTED = "Accepted"
LINK_NOT_SYNCHRONIZED = "<this link has not yet been synchronized>"
UNKNOWN_WAITING_FOR_SYNC = "<unknown, waiting for sync>"
LINK_ITEM_PREFIX = '\n '
NOT_AVAILABLE = "Not Available"
NOT_ASSIGNED = "not yet assigned"
class Link:
def __init__(self,
name,
localIdentifier=None,
localVerkey=None,
trustAnchor=None,
remoteIdentifier=None,
remoteEndPoint=None,
remotePubkey=None,
invitationNonce=None,
proofRequests=None,
internalId=None,
remote_verkey=None):
self.name = name
self.localIdentifier = localIdentifier
self.localVerkey = localVerkey
self.trustAnchor = trustAnchor
self.remoteIdentifier = remoteIdentifier
self.remoteEndPoint = remoteEndPoint
self.remotePubkey = remotePubkey
self.invitationNonce = invitationNonce
# for optionally storing a reference to an identifier in another system
# for example, a college may already have a student ID for a particular
# person, and that student ID can be put in this field
self.internalId = internalId
self.proofRequests = proofRequests or [] # type: List[ProofRequest]
self.verifiedClaimProofs = []
self.availableClaims = [] # type: List[AvailableClaim]
self.remoteVerkey = remote_verkey
self.linkStatus = None
self.linkLastSynced = None
self.linkLastSyncNo = None
def __repr__(self):
return self.key
@property
def key(self):
return self.name
@property
def isRemoteEndpointAvailable(self):
return self.remoteEndPoint and self.remoteEndPoint != \
constant.NOT_AVAILABLE
@property
def isAccepted(self):
return self.linkStatus == constant.LINK_STATUS_ACCEPTED
def __str__(self):
localIdr = self.localIdentifier if self.localIdentifier \
else constant.NOT_ASSIGNED
trustAnchor = self.trustAnchor or ""
trustAnchorStatus = '(not yet written to Sovrin)'
if self.remoteVerkey is not None:
if self.remoteIdentifier == self.remoteVerkey:
remoteVerKey = constant.REMOTE_VER_KEY_SAME_AS_ID
else:
remoteVerKey = self.remoteVerkey
else:
remoteVerKey = constant.UNKNOWN_WAITING_FOR_SYNC
remoteEndPoint = self.remoteEndPoint or \
constant.UNKNOWN_WAITING_FOR_SYNC
if isinstance(remoteEndPoint, tuple):
remoteEndPoint = "{}:{}".format(*remoteEndPoint)
linkStatus = 'not verified, remote verkey unknown'
linkLastSynced = prettyDateDifference(self.linkLastSynced) or \
constant.LINK_NOT_SYNCHRONIZED
if linkLastSynced != constant.LINK_NOT_SYNCHRONIZED and \
remoteEndPoint == constant.UNKNOWN_WAITING_FOR_SYNC:
remoteEndPoint = constant.NOT_AVAILABLE
if self.isAccepted:
trustAnchorStatus = '(confirmed)'
if self.remoteVerkey is None:
remoteVerKey = constant.REMOTE_VER_KEY_SAME_AS_ID
linkStatus = self.linkStatus
# TODO: The verkey would be same as the local identifier until we
# support key rotation
# TODO: This should be set as verkey in case of DID but need it from
# wallet
verKey = self.localVerkey if self.localVerkey else constant.SIGNER_VER_KEY_EMPTY
fixedLinkHeading = "Link"
if not self.isAccepted:
fixedLinkHeading += " (not yet accepted)"
# TODO: Refactor to use string interpolation
# try:
fixedLinkItems = \
'\n' \
'Name: ' + self.name + '\n' \
'Identifier: ' + localIdr + '\n' \
'Trust anchor: ' + trustAnchor + ' ' + trustAnchorStatus + '\n' \
'Verification key: ' + verKey + '\n' \
'Signing key: <hidden>' '\n' \
'Remote: ' + (self.remoteIdentifier or
constant.UNKNOWN_WAITING_FOR_SYNC) + '\n' \
'Remote Verification key: ' + remoteVerKey + '\n' \
'Remote endpoint: ' + remoteEndPoint + '\n' \
'Invitation nonce: ' + self.invitationNonce + '\n' \
'Invitation status: ' + linkStatus + '\n'
optionalLinkItems = ""
if len(self.proofRequests) > 0:
optionalLinkItems += "Proof Request(s): {}". \
format(", ".join([cr.name for cr in self.proofRequests])) \
+ '\n'
if self.availableClaims:
optionalLinkItems += self.avail_claims_str()
if self.linkLastSyncNo:
optionalLinkItems += 'Last sync seq no: ' + self.linkLastSyncNo \
+ '\n'
fixedEndingLines = 'Last synced: ' + linkLastSynced
linkItems = fixedLinkItems + optionalLinkItems + fixedEndingLines
indentedLinkItems = constant.LINK_ITEM_PREFIX.join(
linkItems.splitlines())
return fixedLinkHeading + indentedLinkItems
def avail_claims_str(self):
claim_names = [name for name, _, _ in self.availableClaims]
return "Available Claim(s): {}".\
format(", ".join(claim_names)) + '\n'
@staticmethod
def validate(invitationData):
def checkIfFieldPresent(msg, searchInName, fieldName):
if not msg.get(fieldName):
raise InvalidLinkException(
"Field not found in {}: {}".format(searchInName, fieldName))
checkIfFieldPresent(invitationData, 'given input', 'sig')
checkIfFieldPresent(invitationData, 'given input', 'link-invitation')
linkInvitation = invitationData.get("link-invitation")
linkInvitationReqFields = [f.IDENTIFIER.nm, NAME, NONCE]
for fn in linkInvitationReqFields:
checkIfFieldPresent(linkInvitation, 'link-invitation', fn)
def getRemoteEndpoint(self, required=False):
if not self.remoteEndPoint and required:
raise RemoteEndpointNotFound
if isinstance(self.remoteEndPoint, tuple):
return self.remoteEndPoint
elif isinstance(self.remoteEndPoint, str):
ip, port = self.remoteEndPoint.split(":")
return ip, int(port)
elif self.remoteEndPoint is None:
return None
else:
raise ValueError('Cannot convert endpoint {} to HA'.
format(self.remoteEndPoint))
@property
def remoteVerkey(self):
if not hasattr(self, '_remoteVerkey'):
return None
if self._remoteVerkey is None:
return None
# This property should be used to fetch verkey compared to
# remoteVerkey, its a more consistent name and takes care of
# abbreviated verkey
i = DidIdentity(self.remoteIdentifier, verkey=self._remoteVerkey)
return i.verkey
@property
def full_remote_verkey(self):
verkey = self.remoteVerkey
if verkey is None:
return None
i = DidIdentity(self.remoteIdentifier, verkey=verkey)
full_verkey = i.full_verkey
return full_verkey
@remoteVerkey.setter
def remoteVerkey(self, new_val):
self._remoteVerkey = new_val
def find_available_claims(self, name=None, version=None, origin=None):
return [ac for ac in self.availableClaims
if (not name or name == ac.name) and
(not version or version == ac.version) and
(not origin or origin == ac.origin)]
def find_available_claim(self, name=None, version=None, origin=None,
max_one=True, required=True):
_ = self.find_available_claims(name, version, origin)
assert not max_one or len(_) <= 1, \
'more than one matching available claim found'
if required and len(_) == 0:
raise NotFound
return _[0] if _ else None
def find_proof_requests(self, name=None, version=None):
return [pr for pr in self.proofRequests
if (not name or name == pr.name) and
(not version or version == pr.version)]
def find_proof_request(self, name=None, version=None,
max_one=True, required=True):
_ = self.find_proof_requests(name, version)
assert not max_one or len(_) <= 1, \
'more than one matching available claim found'
if required and len(_) == 0:
raise NotFound
return _[0] if _ else None
| 37.388679
| 96
| 0.617885
|
4a05c8e9b615bf612f9fc1e2e6299244a1879ae1
| 343
|
py
|
Python
|
src/base/network_manger.py
|
charlestian/NetEaseMusic
|
5d8fa4747fcecabb5a09174ff6838718d62c2b31
|
[
"MIT"
] | 19
|
2015-04-18T15:16:58.000Z
|
2021-04-12T06:19:48.000Z
|
src/base/network_manger.py
|
charlestian/NetEaseMusic
|
5d8fa4747fcecabb5a09174ff6838718d62c2b31
|
[
"MIT"
] | null | null | null |
src/base/network_manger.py
|
charlestian/NetEaseMusic
|
5d8fa4747fcecabb5a09174ff6838718d62c2b31
|
[
"MIT"
] | 12
|
2015-04-18T15:16:59.000Z
|
2016-04-20T17:22:58.000Z
|
# -*- coding:utf8 -*-
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
from base.common import singleton
@singleton
class NetworkManager(QNetworkAccessManager):
"""
One QNetworkAccessManager should be enough for the whole Qt application
"""
def __init__(self, parent=None):
super().__init__(parent)
| 26.384615
| 75
| 0.740525
|
4a05c939b32756afbfdc9c582b5714ff570621a7
| 18,767
|
py
|
Python
|
tests/keras_contrib/layers/test_normalization.py
|
WiproOpenSourcePractice/keras-contrib
|
3e77ba234f46b82997271996946b731bc774fb9f
|
[
"MIT"
] | 7
|
2017-07-22T09:05:44.000Z
|
2019-04-30T02:08:04.000Z
|
tests/keras_contrib/layers/test_normalization.py
|
WiproOpenSourcePractice/keras-contrib
|
3e77ba234f46b82997271996946b731bc774fb9f
|
[
"MIT"
] | 1
|
2017-12-26T02:59:59.000Z
|
2017-12-26T02:59:59.000Z
|
tests/keras_contrib/layers/test_normalization.py
|
WiproOpenSourcePractice/keras-contrib
|
3e77ba234f46b82997271996946b731bc774fb9f
|
[
"MIT"
] | 11
|
2017-07-06T14:11:51.000Z
|
2021-08-21T23:18:20.000Z
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.layers import Dense, Activation, Input
from keras import regularizers
from keras.utils.test_utils import layer_test, keras_test
from keras_contrib.layers import normalization
from keras.models import Sequential, Model
from keras import backend as K
from keras_contrib import backend as KC
input_1 = np.arange(10)
input_2 = np.zeros(10)
input_3 = np.ones((10))
input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]
@keras_test
def basic_instancenorm_test():
from keras import regularizers
layer_test(normalization.InstanceNormalization,
kwargs={'epsilon': 0.1,
'gamma_regularizer': regularizers.l2(0.01),
'beta_regularizer': regularizers.l2(0.01)},
input_shape=(3, 4, 2))
layer_test(normalization.InstanceNormalization,
kwargs={'gamma_initializer': 'ones',
'beta_initializer': 'ones'},
input_shape=(3, 4, 2))
layer_test(normalization.InstanceNormalization,
kwargs={'scale': False, 'center': False},
input_shape=(3, 3))
@keras_test
def test_instancenorm_correctness_rank2():
model = Sequential()
norm = normalization.InstanceNormalization(input_shape=(10, 1), axis=-1)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 1))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
@keras_test
def test_instancenorm_correctness_rank1():
# make sure it works with rank1 input tensor (batched)
model = Sequential()
norm = normalization.InstanceNormalization(input_shape=(10,), axis=None)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
@keras_test
def test_instancenorm_training_argument():
bn1 = normalization.InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
y1 = bn1(x1, training=True)
model1 = Model(x1, y1)
np.random.seed(123)
x = np.random.normal(loc=5.0, scale=10.0, size=(20, 10))
output_a = model1.predict(x)
model1.compile(loss='mse', optimizer='rmsprop')
model1.fit(x, x, epochs=1, verbose=0)
output_b = model1.predict(x)
assert np.abs(np.sum(output_a - output_b)) > 0.1
assert_allclose(output_b.mean(), 0.0, atol=1e-1)
assert_allclose(output_b.std(), 1.0, atol=1e-1)
bn2 = normalization.InstanceNormalization(input_shape=(10,))
x2 = Input(shape=(10,))
bn2(x2, training=False)
@keras_test
def test_instancenorm_convnet():
model = Sequential()
norm = normalization.InstanceNormalization(axis=1, input_shape=(3, 4, 4))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(K.eval(norm.gamma), (1, 3, 1, 1))
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_test
def test_shared_instancenorm():
'''Test that a IN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = normalization.InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
@keras_test
def test_instancenorm_perinstancecorrectness():
model = Sequential()
norm = normalization.InstanceNormalization(input_shape=(10,))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# bimodal distribution
z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
x = np.append(z, y)
x = np.reshape(x, (4, 10))
model.fit(x, x, epochs=4, batch_size=4, verbose=1)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
# verify that each instance in the batch is individually normalized
for i in range(4):
instance = out[i]
assert_allclose(instance.mean(), 0.0, atol=1e-1)
assert_allclose(instance.std(), 1.0, atol=1e-1)
# if each instance is normalized, so should the batch
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
@keras_test
def test_instancenorm_perchannel_correctness():
# have each channel with a different average and std
x = np.random.normal(loc=5.0, scale=2.0, size=(10, 1, 4, 4))
y = np.random.normal(loc=10.0, scale=3.0, size=(10, 1, 4, 4))
z = np.random.normal(loc=-5.0, scale=5.0, size=(10, 1, 4, 4))
batch = np.append(x, y, axis=1)
batch = np.append(batch, z, axis=1)
# this model does not provide a normalization axis
model = Sequential()
norm = normalization.InstanceNormalization(axis=None, input_shape=(3, 4, 4), center=False, scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values will not be normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert abs(activations.mean()) > 1e-2
assert abs(activations.std() - 1.0) > 1e-6
# but values are still normalized per-instance
activations = out[instance]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
# this model sets the channel as a normalization axis
model = Sequential()
norm = normalization.InstanceNormalization(axis=1, input_shape=(3, 4, 4), center=False, scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values are now normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
@keras_test
def basic_batchrenorm_test():
from keras import regularizers
layer_test(normalization.BatchRenormalization,
input_shape=(3, 4, 2))
layer_test(normalization.BatchRenormalization,
kwargs={'gamma_regularizer': regularizers.l2(0.01),
'beta_regularizer': regularizers.l2(0.01)},
input_shape=(3, 4, 2))
@keras_test
def test_batchrenorm_mode_0_or_2():
for training in [1, 0, None]:
ip = Input(shape=(10,))
norm_m0 = normalization.BatchRenormalization(momentum=0.8)
out = norm_m0(ip, training=training)
model = Model(ip, out)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(X, X, epochs=4, verbose=0)
out = model.predict(X)
out -= K.eval(norm_m0.beta)
out /= K.eval(norm_m0.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
@keras_test
def test_batchrenorm_mode_0_or_2_twice():
# This is a regression test for issue #4881 with the old
# batch normalization functions in the Theano backend.
model = Sequential()
model.add(normalization.BatchRenormalization(input_shape=(10, 5, 5), axis=1))
model.add(normalization.BatchRenormalization(input_shape=(10, 5, 5), axis=1))
model.compile(loss='mse', optimizer='sgd')
X = np.random.normal(loc=5.0, scale=10.0, size=(20, 10, 5, 5))
model.fit(X, X, epochs=1, verbose=0)
model.predict(X)
@keras_test
def test_batchrenorm_mode_0_convnet():
model = Sequential()
norm_m0 = normalization.BatchRenormalization(axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm_m0)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(X, X, epochs=4, verbose=0)
out = model.predict(X)
out -= np.reshape(K.eval(norm_m0.beta), (1, 3, 1, 1))
out /= np.reshape(K.eval(norm_m0.gamma), (1, 3, 1, 1))
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_test
def test_shared_batchrenorm():
'''Test that a BN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = normalization.BatchRenormalization(input_shape=(10,))
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
assert len(model.updates) == 5
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
assert len(model.updates) == 5
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
@keras_test
def test_batchrenorm_clipping_schedule():
'''Test that the clipping schedule isn't fixed at r_max=1, d_max=0'''
inp = Input(shape=(10,))
bn = normalization.BatchRenormalization(t_delta=1.)
out = bn(inp)
model = Model(inp, out)
model.compile('sgd', 'mse')
x = np.random.normal(5, 10, size=(2, 10))
y = np.random.normal(5, 10, size=(2, 10))
r_max, d_max = K.get_value(bn.r_max), K.get_value(bn.d_max)
assert r_max == 1
assert d_max == 0
for i in range(10):
model.train_on_batch(x, y)
r_max, d_max = K.get_value(bn.r_max), K.get_value(bn.d_max)
assert_allclose([r_max, d_max], [3, 5], atol=1e-1)
@keras_test
def test_batchrenorm_get_config():
'''Test that get_config works on a model with a batchrenorm layer.'''
x = Input(shape=(10,))
y = normalization.BatchRenormalization()(x)
model = Model(x, y)
model.get_config()
@keras_test
def test_basic_groupnorm():
layer_test(normalization.GroupNormalization,
kwargs={'groups': 2,
'epsilon': 0.1,
'gamma_regularizer': regularizers.l2(0.01),
'beta_regularizer': regularizers.l2(0.01)},
input_shape=(3, 4, 2))
layer_test(normalization.GroupNormalization,
kwargs={'groups': 2,
'epsilon': 0.1,
'axis': 1},
input_shape=(3, 4, 2))
layer_test(normalization.GroupNormalization,
kwargs={'groups': 2,
'gamma_initializer': 'ones',
'beta_initializer': 'ones'},
input_shape=(3, 4, 2, 4))
if K.backend() != 'theano':
layer_test(normalization.GroupNormalization,
kwargs={'groups': 2,
'axis': 1,
'scale': False,
'center': False},
input_shape=(3, 4, 2, 4))
@keras_test
def test_groupnorm_correctness_1d():
model = Sequential()
norm = normalization.GroupNormalization(input_shape=(10,), groups=2)
model.add(norm)
model.compile(loss='mse', optimizer='rmsprop')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=5, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
@keras_test
def test_groupnorm_correctness_2d():
model = Sequential()
norm = normalization.GroupNormalization(axis=1, input_shape=(10, 6), groups=2)
model.add(norm)
model.compile(loss='mse', optimizer='rmsprop')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 6))
model.fit(x, x, epochs=5, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm.beta), (1, 10, 1))
out /= np.reshape(K.eval(norm.gamma), (1, 10, 1))
assert_allclose(out.mean(axis=(0, 2)), 0.0, atol=1.1e-1)
assert_allclose(out.std(axis=(0, 2)), 1.0, atol=1.1e-1)
@keras_test
def test_groupnorm_correctness_2d_different_groups():
norm1 = normalization.GroupNormalization(axis=1, input_shape=(10, 6), groups=2)
norm2 = normalization.GroupNormalization(axis=1, input_shape=(10, 6), groups=1)
norm3 = normalization.GroupNormalization(axis=1, input_shape=(10, 6), groups=10)
model = Sequential()
model.add(norm1)
model.compile(loss='mse', optimizer='rmsprop')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 6))
model.fit(x, x, epochs=5, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm1.beta), (1, 10, 1))
out /= np.reshape(K.eval(norm1.gamma), (1, 10, 1))
assert_allclose(out.mean(axis=(0, 2)), 0.0, atol=1.1e-1)
assert_allclose(out.std(axis=(0, 2)), 1.0, atol=1.1e-1)
model = Sequential()
model.add(norm2)
model.compile(loss='mse', optimizer='rmsprop')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 6))
model.fit(x, x, epochs=5, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm2.beta), (1, 10, 1))
out /= np.reshape(K.eval(norm2.gamma), (1, 10, 1))
assert_allclose(out.mean(axis=(0, 2)), 0.0, atol=1.1e-1)
assert_allclose(out.std(axis=(0, 2)), 1.0, atol=1.1e-1)
model = Sequential()
model.add(norm3)
model.compile(loss='mse', optimizer='rmsprop')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 6))
model.fit(x, x, epochs=5, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm3.beta), (1, 10, 1))
out /= np.reshape(K.eval(norm3.gamma), (1, 10, 1))
assert_allclose(out.mean(axis=(0, 2)), 0.0, atol=1.1e-1)
assert_allclose(out.std(axis=(0, 2)), 1.0, atol=1.1e-1)
@keras_test
def test_groupnorm_mode_twice():
# This is a regression test for issue #4881 with the old
# batch normalization functions in the Theano backend.
model = Sequential()
model.add(normalization.GroupNormalization(input_shape=(10, 5, 5), axis=1, groups=2))
model.add(normalization.GroupNormalization(input_shape=(10, 5, 5), axis=1, groups=2))
model.compile(loss='mse', optimizer='sgd')
x = np.random.normal(loc=5.0, scale=10.0, size=(20, 10, 5, 5))
model.fit(x, x, epochs=1, verbose=0)
model.predict(x)
@keras_test
def test_groupnorm_convnet():
model = Sequential()
norm = normalization.GroupNormalization(axis=1, input_shape=(3, 4, 4), groups=3)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(K.eval(norm.gamma), (1, 3, 1, 1))
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_test
@pytest.mark.skipif((K.backend() == 'theano'),
reason='Bug with theano backend')
def test_groupnorm_convnet_no_center_no_scale():
model = Sequential()
norm = normalization.GroupNormalization(axis=-1, center=False, scale=False,
input_shape=(3, 4, 4), groups=2)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_test
def test_shared_groupnorm():
'''Test that a GN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = normalization.GroupNormalization(input_shape=(10,), groups=2)
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
assert len(model.updates) == 0
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
assert len(model.updates) == 0
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
@keras_test
def test_that_trainable_disables_updates():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = Input(shape=(4,))
layer = normalization.GroupNormalization(input_shape=(4,), groups=2)
b = layer(a)
model = Model(a, b)
model.trainable = False
assert len(model.updates) == 0
model.compile('sgd', 'mse')
assert len(model.updates) == 0
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert_allclose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert len(model.updates) == 0
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse')
assert len(model.updates) == 0
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert_allclose(x1, x2, atol=1e-7)
if __name__ == '__main__':
pytest.main([__file__])
| 32.752182
| 107
| 0.629722
|
4a05c9c431be1226afea5ed0a249451881d71f32
| 1,784
|
py
|
Python
|
algorithms/q_learning_to_policy.py
|
punk95/Continual-Learning-With-Curiosity
|
af0c507040e1352beb8740b6b3a7849417fc879a
|
[
"MIT"
] | 2
|
2021-07-12T17:11:35.000Z
|
2021-07-13T05:56:30.000Z
|
algorithms/q_learning_to_policy.py
|
punk95/Continual-Learning-With-Curiosity
|
af0c507040e1352beb8740b6b3a7849417fc879a
|
[
"MIT"
] | null | null | null |
algorithms/q_learning_to_policy.py
|
punk95/Continual-Learning-With-Curiosity
|
af0c507040e1352beb8740b6b3a7849417fc879a
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from q_learning import Q_learning
class Q_learner_Policy():
def __init__(self, q_function, nn_param):
self.nn_params = nn_param
self.Q = q_function
def sample(self, state, format="torch"):
state = torch.Tensor(state).to(self.nn_params.device)
self.batch_size = state.size()[0]
q_values = self.Q.get_value(state, format="torch")
actions = q_values.max(1)[1]
sample_hot_vec = torch.tensor([[0.0 for i in range(self.nn_params.action_dim)]
for j in range(self.batch_size)]).to(self.nn_params.device)
for i in range(self.batch_size):
sample_hot_vec[i][actions[i]] = 1
if format == "torch":
return sample_hot_vec
elif format == "numpy":
return sample_hot_vec.cpu().detach().numpy()
def get_probabilities(self, state, format="torch"):
probabilities = self.sample(state)
if format == "torch":
return probabilities
elif format == "numpy":
return probabilities.cpu().detach().numpy()
def get_probability(self, state, action_no, format="torch"):
probabilities = self.sample(state)
prob = torch.reshape(probabilities[:, action_no], shape=(self.batch_size, 1))
if format == "torch":
return prob
else:
return prob.cpu().detach().numpy()
def get_log_probability(self, state, action_no, format="torch"):
if format == "torch":
return torch.log(1e-8 + self.get_probability(state, action_no, format="torch")).to(self.nn_params.device)
elif format == "numpy":
return np.log(1e-8 + self.get_probability(state, action_no, format="numpy"))
| 32.436364
| 117
| 0.609865
|
4a05ca4c0111bdc5c2ad722cf38f13bc9beb467c
| 1,685
|
py
|
Python
|
tests/tests_assemble_workflow/test_assemble_args.py
|
lobdelle/opensearch-build
|
d2d77fb4282cc3f3c0f938f0bfc83b640621a0f6
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_assemble_workflow/test_assemble_args.py
|
lobdelle/opensearch-build
|
d2d77fb4282cc3f3c0f938f0bfc83b640621a0f6
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_assemble_workflow/test_assemble_args.py
|
lobdelle/opensearch-build
|
d2d77fb4282cc3f3c0f938f0bfc83b640621a0f6
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import logging
import os
import unittest
from unittest.mock import patch
from assemble_workflow.assemble_args import AssembleArgs
class TestAssembleArgs(unittest.TestCase):
ASSEMBLE_PY = "./src/run_assembly.py"
OPENSEARCH_MANIFEST = os.path.realpath(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"manifests",
"1.1.0",
"opensearch-1.1.0.yml",
)
)
@patch("argparse._sys.argv", [ASSEMBLE_PY, OPENSEARCH_MANIFEST])
def test_manifest(self):
self.assertEqual(AssembleArgs().manifest.name, TestAssembleArgs.OPENSEARCH_MANIFEST)
@patch("argparse._sys.argv", [ASSEMBLE_PY, OPENSEARCH_MANIFEST])
def test_keep_default(self):
self.assertFalse(AssembleArgs().keep)
@patch("argparse._sys.argv", [ASSEMBLE_PY, OPENSEARCH_MANIFEST, "--keep"])
def test_keep_true(self):
self.assertTrue(AssembleArgs().keep)
@patch("argparse._sys.argv", [ASSEMBLE_PY, OPENSEARCH_MANIFEST])
def test_verbose_default(self):
self.assertEqual(AssembleArgs().logging_level, logging.INFO)
@patch("argparse._sys.argv", [ASSEMBLE_PY, OPENSEARCH_MANIFEST, "--verbose"])
def test_verbose_true(self):
self.assertTrue(AssembleArgs().logging_level, logging.DEBUG)
@patch("argparse._sys.argv", [ASSEMBLE_PY, OPENSEARCH_MANIFEST, "--base-url", "url"])
def test_base_url(self):
self.assertEqual(AssembleArgs().base_url, "url")
| 31.792453
| 92
| 0.688427
|
4a05caebb525446303f174113a466a00f0d5b800
| 7,279
|
py
|
Python
|
d3rlpy/algos/torch/crr_impl.py
|
aiueola/d3rlpy
|
6058d4dab7484d1d38103210081711f6e05b0c1e
|
[
"MIT"
] | null | null | null |
d3rlpy/algos/torch/crr_impl.py
|
aiueola/d3rlpy
|
6058d4dab7484d1d38103210081711f6e05b0c1e
|
[
"MIT"
] | null | null | null |
d3rlpy/algos/torch/crr_impl.py
|
aiueola/d3rlpy
|
6058d4dab7484d1d38103210081711f6e05b0c1e
|
[
"MIT"
] | null | null | null |
from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from ...gpu import Device
from ...models.builders import create_squashed_normal_policy
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import SquashedNormalPolicy, squash_action
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, hard_sync
from .ddpg_impl import DDPGBaseImpl
class CRRImpl(DDPGBaseImpl):
_beta: float
_n_action_samples: int
_advantage_type: str
_weight_type: str
_max_weight: float
_policy: Optional[SquashedNormalPolicy]
_targ_policy: Optional[SquashedNormalPolicy]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
beta: float,
n_action_samples: int,
advantage_type: str,
weight_type: str,
max_weight: float,
n_critics: int,
tau: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._beta = beta
self._n_action_samples = n_action_samples
self._advantage_type = advantage_type
self._weight_type = weight_type
self._max_weight = max_weight
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(batch.observations)
# unnormalize action via inverse tanh function
clipped_actions = batch.actions.clamp(-0.999999, 0.999999)
unnormalized_act_t = torch.atanh(clipped_actions)
# compute log probability
_, log_probs = squash_action(dist, unnormalized_act_t)
weight = self._compute_weight(batch.observations, batch.actions)
return -(log_probs * weight).mean()
def _compute_weight(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
advantages = self._compute_advantage(obs_t, act_t)
if self._weight_type == "binary":
return (advantages > 0.0).float()
elif self._weight_type == "exp":
return (advantages / self._beta).exp().clamp(0.0, self._max_weight)
raise ValueError(f"invalid weight type: {self._weight_type}.")
def _compute_advantage(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._q_func is not None
assert self._policy is not None
with torch.no_grad():
batch_size = obs_t.shape[0]
# (batch_size, N, action)
policy_actions = self._policy.sample_n(
obs_t, self._n_action_samples
)
flat_actions = policy_actions.reshape(-1, self._action_size)
# repeat observation
# (batch_size, obs_size) -> (batch_size, 1, obs_size)
reshaped_obs_t = obs_t.view(batch_size, 1, *obs_t.shape[1:])
# (batch_sie, 1, obs_size) -> (batch_size, N, obs_size)
repeated_obs_t = reshaped_obs_t.expand(
batch_size, self._n_action_samples, *obs_t.shape[1:]
)
# (batch_size, N, obs_size) -> (batch_size * N, obs_size)
flat_obs_t = repeated_obs_t.reshape(-1, *obs_t.shape[1:])
flat_values = self._q_func(flat_obs_t, flat_actions)
reshaped_values = flat_values.view(obs_t.shape[0], -1, 1)
if self._advantage_type == "mean":
values = reshaped_values.mean(dim=1)
elif self._advantage_type == "max":
values = reshaped_values.max(dim=1).values
else:
raise ValueError(
f"invalid advantage type: {self._advantage_type}."
)
return self._q_func(obs_t, act_t) - values
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_q_func is not None
assert self._targ_policy is not None
with torch.no_grad():
action = self._targ_policy.sample(batch.next_observations)
return self._targ_q_func.compute_target(
batch.next_observations,
action.clamp(-1.0, 1.0),
reduction="min",
)
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
# compute CWP
actions = self._policy.onnx_safe_sample_n(x, self._n_action_samples)
# (batch_size, N, action_size) -> (batch_size * N, action_size)
flat_actions = actions.reshape(-1, self._action_size)
# repeat observation
# (batch_size, obs_size) -> (batch_size, 1, obs_size)
reshaped_obs_t = x.view(x.shape[0], 1, *x.shape[1:])
# (batch_size, 1, obs_size) -> (batch_size, N, obs_size)
repeated_obs_t = reshaped_obs_t.expand(
x.shape[0], self._n_action_samples, *x.shape[1:]
)
# (batch_size, N, obs_size) -> (batch_size * N, obs_size)
flat_obs_t = repeated_obs_t.reshape(-1, *x.shape[1:])
# (batch_size * N, 1)
flat_values = self._q_func(flat_obs_t, flat_actions)
# (batch_size * N, 1) -> (batch_size, N)
reshaped_values = flat_values.view(x.shape[0], -1)
# re-sampling
probs = F.softmax(reshaped_values, dim=1)
indices = torch.multinomial(probs, 1, replacement=True)
return actions[torch.arange(x.shape[0]), indices.view(-1)]
def sync_critic_target(self) -> None:
assert self._targ_q_func is not None
assert self._q_func is not None
hard_sync(self._targ_q_func, self._q_func)
def sync_actor_target(self) -> None:
assert self._targ_policy is not None
assert self._policy is not None
hard_sync(self._targ_policy, self._policy)
| 36.949239
| 79
| 0.638137
|
4a05cb27bf9419408a7b9d26578c37769c83d0df
| 9,739
|
py
|
Python
|
config/settings/base.py
|
parkgeonhu/vople-server
|
55d1dd5f37800266878f47f7d3bc643ef9e99379
|
[
"MIT"
] | 1
|
2020-06-16T09:09:33.000Z
|
2020-06-16T09:09:33.000Z
|
config/settings/base.py
|
parkgeonhu/vople-server
|
55d1dd5f37800266878f47f7d3bc643ef9e99379
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
parkgeonhu/vople-server
|
55d1dd5f37800266878f47f7d3bc643ef9e99379
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (vople/config/settings/base.py - 3 = vople/)
APPS_DIR = ROOT_DIR.path('vople')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', True)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'Asia/Seoul'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'ko-kr'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
#DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
]
LOCAL_APPS = [
'vople.users.apps.UsersAppConfig',
'vople.sounds.apps.SoundsConfig',
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'vople.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
#LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
#LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = r'^admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Jaemin Park""", 'sjsssjs29@gmail.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = False
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'none'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'vople.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'vople.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
REST_USE_JWT = True
ACCOUNT_LOGOUT_ON_GET = True
| 38.494071
| 98
| 0.629428
|
4a05cb2e7c10b14bad5b4aad177b25f6905c8163
| 52,276
|
py
|
Python
|
lib/requests/test_requests.py
|
clayz/crazy-quiz-web
|
7601809ad521d95ae251a026f171b9ec6939c55f
|
[
"Apache-2.0"
] | 3
|
2018-03-27T16:20:47.000Z
|
2019-04-15T12:18:25.000Z
|
lib/requests/test_requests.py
|
clayz/crazy-quiz-web
|
7601809ad521d95ae251a026f171b9ec6939c55f
|
[
"Apache-2.0"
] | null | null | null |
lib/requests/test_requests.py
|
clayz/crazy-quiz-web
|
7601809ad521d95ae251a026f171b9ec6939c55f
|
[
"Apache-2.0"
] | 2
|
2016-03-18T05:30:32.000Z
|
2021-05-26T20:06:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Requests."""
from __future__ import division
import json
import os
import pickle
import unittest
import collections
import io
import requests
import pytest
from requests.adapters import HTTPAdapter
from requests.auth import HTTPDigestAuth, _basic_auth_str
from requests.compat import (
Morsel, cookielib, getproxies, str, urljoin, urlparse, is_py3, builtin_str)
from requests.cookies import cookiejar_from_dict, morsel_to_cookie
from requests.exceptions import (ConnectionError, ConnectTimeout,
InvalidSchema, InvalidURL, MissingSchema,
ReadTimeout, Timeout)
from requests.models import PreparedRequest
from requests.structures import CaseInsensitiveDict
from requests.sessions import SessionRedirectMixin
from requests.models import urlencode
from requests.hooks import default_hooks
try:
import StringIO
except ImportError:
import io as StringIO
if is_py3:
def u(s):
return s
else:
def u(s):
return s.decode('unicode-escape')
# Requests to this URL should always fail with a connection timeout (nothing
# listening on that port)
TARPIT = "http://10.255.255.1"
HTTPBIN = os.environ.get('HTTPBIN_URL', 'http://httpbin.org/')
# Issue #1483: Make sure the URL always has a trailing slash
HTTPBIN = HTTPBIN.rstrip('/') + '/'
def httpbin(*suffix):
"""Returns url for HTTPBIN resource."""
return urljoin(HTTPBIN, '/'.join(suffix))
class RequestsTestCase(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
"""Create simple data set with headers."""
pass
def tearDown(self):
"""Teardown."""
pass
def test_entry_points(self):
requests.session
requests.session().get
requests.session().head
requests.get
requests.head
requests.put
requests.patch
requests.post
def test_invalid_url(self):
with pytest.raises(MissingSchema):
requests.get('hiwpefhipowhefopw')
with pytest.raises(InvalidSchema):
requests.get('localhost:3128')
with pytest.raises(InvalidSchema):
requests.get('localhost.localdomain:3128/')
with pytest.raises(InvalidSchema):
requests.get('10.122.1.1:3128/')
with pytest.raises(InvalidURL):
requests.get('http://')
def test_basic_building(self):
req = requests.Request()
req.url = 'http://kennethreitz.org/'
req.data = {'life': '42'}
pr = req.prepare()
assert pr.url == req.url
assert pr.body == 'life=42'
def test_no_content_length(self):
get_req = requests.Request('GET', httpbin('get')).prepare()
assert 'Content-Length' not in get_req.headers
head_req = requests.Request('HEAD', httpbin('head')).prepare()
assert 'Content-Length' not in head_req.headers
def test_override_content_length(self):
headers = {
'Content-Length': 'not zero'
}
r = requests.Request('POST', httpbin('post'), headers=headers).prepare()
assert 'Content-Length' in r.headers
assert r.headers['Content-Length'] == 'not zero'
def test_path_is_not_double_encoded(self):
request = requests.Request('GET', "http://0.0.0.0/get/test case").prepare()
assert request.path_url == '/get/test%20case'
def test_params_are_added_before_fragment(self):
request = requests.Request('GET',
"http://example.com/path#fragment", params={"a": "b"}).prepare()
assert request.url == "http://example.com/path?a=b#fragment"
request = requests.Request('GET',
"http://example.com/path?key=value#fragment", params={"a": "b"}).prepare()
assert request.url == "http://example.com/path?key=value&a=b#fragment"
def test_mixed_case_scheme_acceptable(self):
s = requests.Session()
s.proxies = getproxies()
parts = urlparse(httpbin('get'))
schemes = ['http://', 'HTTP://', 'hTTp://', 'HttP://',
'https://', 'HTTPS://', 'hTTps://', 'HttPs://']
for scheme in schemes:
url = scheme + parts.netloc + parts.path
r = requests.Request('GET', url)
r = s.send(r.prepare())
assert r.status_code == 200, 'failed for scheme {0}'.format(scheme)
def test_HTTP_200_OK_GET_ALTERNATIVE(self):
r = requests.Request('GET', httpbin('get'))
s = requests.Session()
s.proxies = getproxies()
r = s.send(r.prepare())
assert r.status_code == 200
def test_HTTP_302_ALLOW_REDIRECT_GET(self):
r = requests.get(httpbin('redirect', '1'))
assert r.status_code == 200
assert r.history[0].status_code == 302
assert r.history[0].is_redirect
# def test_HTTP_302_ALLOW_REDIRECT_POST(self):
# r = requests.post(httpbin('status', '302'), data={'some': 'data'})
# self.assertEqual(r.status_code, 200)
def test_HTTP_200_OK_GET_WITH_PARAMS(self):
heads = {'User-agent': 'Mozilla/5.0'}
r = requests.get(httpbin('user-agent'), headers=heads)
assert heads['User-agent'] in r.text
assert r.status_code == 200
def test_HTTP_200_OK_GET_WITH_MIXED_PARAMS(self):
heads = {'User-agent': 'Mozilla/5.0'}
r = requests.get(httpbin('get') + '?test=true', params={'q': 'test'}, headers=heads)
assert r.status_code == 200
def test_set_cookie_on_301(self):
s = requests.session()
url = httpbin('cookies/set?foo=bar')
s.get(url)
assert s.cookies['foo'] == 'bar'
def test_cookie_sent_on_redirect(self):
s = requests.session()
s.get(httpbin('cookies/set?foo=bar'))
r = s.get(httpbin('redirect/1')) # redirects to httpbin('get')
assert 'Cookie' in r.json()['headers']
def test_cookie_removed_on_expire(self):
s = requests.session()
s.get(httpbin('cookies/set?foo=bar'))
assert s.cookies['foo'] == 'bar'
s.get(
httpbin('response-headers'),
params={
'Set-Cookie':
'foo=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT'
}
)
assert 'foo' not in s.cookies
def test_cookie_quote_wrapped(self):
s = requests.session()
s.get(httpbin('cookies/set?foo="bar:baz"'))
assert s.cookies['foo'] == '"bar:baz"'
def test_cookie_persists_via_api(self):
s = requests.session()
r = s.get(httpbin('redirect/1'), cookies={'foo': 'bar'})
assert 'foo' in r.request.headers['Cookie']
assert 'foo' in r.history[0].request.headers['Cookie']
def test_request_cookie_overrides_session_cookie(self):
s = requests.session()
s.cookies['foo'] = 'bar'
r = s.get(httpbin('cookies'), cookies={'foo': 'baz'})
assert r.json()['cookies']['foo'] == 'baz'
# Session cookie should not be modified
assert s.cookies['foo'] == 'bar'
def test_request_cookies_not_persisted(self):
s = requests.session()
s.get(httpbin('cookies'), cookies={'foo': 'baz'})
# Sending a request with cookies should not add cookies to the session
assert not s.cookies
def test_generic_cookiejar_works(self):
cj = cookielib.CookieJar()
cookiejar_from_dict({'foo': 'bar'}, cj)
s = requests.session()
s.cookies = cj
r = s.get(httpbin('cookies'))
# Make sure the cookie was sent
assert r.json()['cookies']['foo'] == 'bar'
# Make sure the session cj is still the custom one
assert s.cookies is cj
def test_param_cookiejar_works(self):
cj = cookielib.CookieJar()
cookiejar_from_dict({'foo': 'bar'}, cj)
s = requests.session()
r = s.get(httpbin('cookies'), cookies=cj)
# Make sure the cookie was sent
assert r.json()['cookies']['foo'] == 'bar'
def test_requests_in_history_are_not_overridden(self):
resp = requests.get(httpbin('redirect/3'))
urls = [r.url for r in resp.history]
req_urls = [r.request.url for r in resp.history]
assert urls == req_urls
def test_history_is_always_a_list(self):
"""
Show that even with redirects, Response.history is always a list.
"""
resp = requests.get(httpbin('get'))
assert isinstance(resp.history, list)
resp = requests.get(httpbin('redirect/1'))
assert isinstance(resp.history, list)
assert not isinstance(resp.history, tuple)
def test_headers_on_session_with_None_are_not_sent(self):
"""Do not send headers in Session.headers with None values."""
ses = requests.Session()
ses.headers['Accept-Encoding'] = None
req = requests.Request('GET', 'http://httpbin.org/get')
prep = ses.prepare_request(req)
assert 'Accept-Encoding' not in prep.headers
def test_user_agent_transfers(self):
heads = {
'User-agent': 'Mozilla/5.0 (github.com/kennethreitz/requests)'
}
r = requests.get(httpbin('user-agent'), headers=heads)
assert heads['User-agent'] in r.text
heads = {
'user-agent': 'Mozilla/5.0 (github.com/kennethreitz/requests)'
}
r = requests.get(httpbin('user-agent'), headers=heads)
assert heads['user-agent'] in r.text
def test_HTTP_200_OK_HEAD(self):
r = requests.head(httpbin('get'))
assert r.status_code == 200
def test_HTTP_200_OK_PUT(self):
r = requests.put(httpbin('put'))
assert r.status_code == 200
def test_BASICAUTH_TUPLE_HTTP_200_OK_GET(self):
auth = ('user', 'pass')
url = httpbin('basic-auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert r.status_code == 200
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = auth
r = s.get(url)
assert r.status_code == 200
def test_connection_error(self):
"""Connecting to an unknown domain should raise a ConnectionError"""
with pytest.raises(ConnectionError):
requests.get("http://fooobarbangbazbing.httpbin.org")
with pytest.raises(ConnectionError):
requests.get("http://httpbin.org:1")
def test_basicauth_with_netrc(self):
auth = ('user', 'pass')
wrong_auth = ('wronguser', 'wrongpass')
url = httpbin('basic-auth', 'user', 'pass')
def get_netrc_auth_mock(url):
return auth
requests.sessions.get_netrc_auth = get_netrc_auth_mock
# Should use netrc and work.
r = requests.get(url)
assert r.status_code == 200
# Given auth should override and fail.
r = requests.get(url, auth=wrong_auth)
assert r.status_code == 401
s = requests.session()
# Should use netrc and work.
r = s.get(url)
assert r.status_code == 200
# Given auth should override and fail.
s.auth = wrong_auth
r = s.get(url)
assert r.status_code == 401
def test_DIGEST_HTTP_200_OK_GET(self):
auth = HTTPDigestAuth('user', 'pass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert r.status_code == 200
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = HTTPDigestAuth('user', 'pass')
r = s.get(url)
assert r.status_code == 200
def test_DIGEST_AUTH_RETURNS_COOKIE(self):
url = httpbin('digest-auth', 'auth', 'user', 'pass')
auth = HTTPDigestAuth('user', 'pass')
r = requests.get(url)
assert r.cookies['fake'] == 'fake_value'
r = requests.get(url, auth=auth)
assert r.status_code == 200
def test_DIGEST_AUTH_SETS_SESSION_COOKIES(self):
url = httpbin('digest-auth', 'auth', 'user', 'pass')
auth = HTTPDigestAuth('user', 'pass')
s = requests.Session()
s.get(url, auth=auth)
assert s.cookies['fake'] == 'fake_value'
def test_DIGEST_STREAM(self):
auth = HTTPDigestAuth('user', 'pass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth, stream=True)
assert r.raw.read() != b''
r = requests.get(url, auth=auth, stream=False)
assert r.raw.read() == b''
def test_DIGESTAUTH_WRONG_HTTP_401_GET(self):
auth = HTTPDigestAuth('user', 'wrongpass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert r.status_code == 401
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = auth
r = s.get(url)
assert r.status_code == 401
def test_DIGESTAUTH_QUOTES_QOP_VALUE(self):
auth = HTTPDigestAuth('user', 'pass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert '"auth"' in r.request.headers['Authorization']
def test_POSTBIN_GET_POST_FILES(self):
url = httpbin('post')
post1 = requests.post(url).raise_for_status()
post1 = requests.post(url, data={'some': 'data'})
assert post1.status_code == 200
with open('requirements.txt') as f:
post2 = requests.post(url, files={'some': f})
assert post2.status_code == 200
post4 = requests.post(url, data='[{"some": "json"}]')
assert post4.status_code == 200
with pytest.raises(ValueError):
requests.post(url, files=['bad file data'])
def test_POSTBIN_GET_POST_FILES_WITH_DATA(self):
url = httpbin('post')
post1 = requests.post(url).raise_for_status()
post1 = requests.post(url, data={'some': 'data'})
assert post1.status_code == 200
with open('requirements.txt') as f:
post2 = requests.post(url,
data={'some': 'data'}, files={'some': f})
assert post2.status_code == 200
post4 = requests.post(url, data='[{"some": "json"}]')
assert post4.status_code == 200
with pytest.raises(ValueError):
requests.post(url, files=['bad file data'])
def test_conflicting_post_params(self):
url = httpbin('post')
with open('requirements.txt') as f:
pytest.raises(ValueError, "requests.post(url, data='[{\"some\": \"data\"}]', files={'some': f})")
pytest.raises(ValueError, "requests.post(url, data=u('[{\"some\": \"data\"}]'), files={'some': f})")
def test_request_ok_set(self):
r = requests.get(httpbin('status', '404'))
assert not r.ok
def test_status_raising(self):
r = requests.get(httpbin('status', '404'))
with pytest.raises(requests.exceptions.HTTPError):
r.raise_for_status()
r = requests.get(httpbin('status', '500'))
assert not r.ok
def test_decompress_gzip(self):
r = requests.get(httpbin('gzip'))
r.content.decode('ascii')
def test_unicode_get(self):
url = httpbin('/get')
requests.get(url, params={'foo': 'føø'})
requests.get(url, params={'føø': 'føø'})
requests.get(url, params={'føø': 'føø'})
requests.get(url, params={'foo': 'foo'})
requests.get(httpbin('ø'), params={'foo': 'foo'})
def test_unicode_header_name(self):
requests.put(
httpbin('put'),
headers={str('Content-Type'): 'application/octet-stream'},
data='\xff') # compat.str is unicode.
def test_pyopenssl_redirect(self):
requests.get('https://httpbin.org/status/301')
def test_urlencoded_get_query_multivalued_param(self):
r = requests.get(httpbin('get'), params=dict(test=['foo', 'baz']))
assert r.status_code == 200
assert r.url == httpbin('get?test=foo&test=baz')
def test_different_encodings_dont_break_post(self):
r = requests.post(httpbin('post'),
data={'stuff': json.dumps({'a': 123})},
params={'blah': 'asdf1234'},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
def test_unicode_multipart_post(self):
r = requests.post(httpbin('post'),
data={'stuff': u('ëlïxr')},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
r = requests.post(httpbin('post'),
data={'stuff': u('ëlïxr').encode('utf-8')},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
r = requests.post(httpbin('post'),
data={'stuff': 'elixr'},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
r = requests.post(httpbin('post'),
data={'stuff': 'elixr'.encode('utf-8')},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
def test_unicode_multipart_post_fieldnames(self):
filename = os.path.splitext(__file__)[0] + '.py'
r = requests.Request(method='POST',
url=httpbin('post'),
data={'stuff'.encode('utf-8'): 'elixr'},
files={'file': ('test_requests.py',
open(filename, 'rb'))})
prep = r.prepare()
assert b'name="stuff"' in prep.body
assert b'name="b\'stuff\'"' not in prep.body
def test_unicode_method_name(self):
files = {'file': open('test_requests.py', 'rb')}
r = requests.request(
method=u('POST'), url=httpbin('post'), files=files)
assert r.status_code == 200
def test_custom_content_type(self):
r = requests.post(
httpbin('post'),
data={'stuff': json.dumps({'a': 123})},
files={'file1': ('test_requests.py', open(__file__, 'rb')),
'file2': ('test_requests', open(__file__, 'rb'),
'text/py-content-type')})
assert r.status_code == 200
assert b"text/py-content-type" in r.request.body
def test_hook_receives_request_arguments(self):
def hook(resp, **kwargs):
assert resp is not None
assert kwargs != {}
requests.Request('GET', HTTPBIN, hooks={'response': hook})
def test_session_hooks_are_used_with_no_request_hooks(self):
hook = lambda x, *args, **kwargs: x
s = requests.Session()
s.hooks['response'].append(hook)
r = requests.Request('GET', HTTPBIN)
prep = s.prepare_request(r)
assert prep.hooks['response'] != []
assert prep.hooks['response'] == [hook]
def test_session_hooks_are_overriden_by_request_hooks(self):
hook1 = lambda x, *args, **kwargs: x
hook2 = lambda x, *args, **kwargs: x
assert hook1 is not hook2
s = requests.Session()
s.hooks['response'].append(hook2)
r = requests.Request('GET', HTTPBIN, hooks={'response': [hook1]})
prep = s.prepare_request(r)
assert prep.hooks['response'] == [hook1]
def test_prepared_request_hook(self):
def hook(resp, **kwargs):
resp.hook_working = True
return resp
req = requests.Request('GET', HTTPBIN, hooks={'response': hook})
prep = req.prepare()
s = requests.Session()
s.proxies = getproxies()
resp = s.send(prep)
assert hasattr(resp, 'hook_working')
def test_prepared_from_session(self):
class DummyAuth(requests.auth.AuthBase):
def __call__(self, r):
r.headers['Dummy-Auth-Test'] = 'dummy-auth-test-ok'
return r
req = requests.Request('GET', httpbin('headers'))
assert not req.auth
s = requests.Session()
s.auth = DummyAuth()
prep = s.prepare_request(req)
resp = s.send(prep)
assert resp.json()['headers'][
'Dummy-Auth-Test'] == 'dummy-auth-test-ok'
def test_prepare_request_with_bytestring_url(self):
req = requests.Request('GET', b'https://httpbin.org/')
s = requests.Session()
prep = s.prepare_request(req)
assert prep.url == "https://httpbin.org/"
def test_links(self):
r = requests.Response()
r.headers = {
'cache-control': 'public, max-age=60, s-maxage=60',
'connection': 'keep-alive',
'content-encoding': 'gzip',
'content-type': 'application/json; charset=utf-8',
'date': 'Sat, 26 Jan 2013 16:47:56 GMT',
'etag': '"6ff6a73c0e446c1f61614769e3ceb778"',
'last-modified': 'Sat, 26 Jan 2013 16:22:39 GMT',
'link': ('<https://api.github.com/users/kennethreitz/repos?'
'page=2&per_page=10>; rel="next", <https://api.github.'
'com/users/kennethreitz/repos?page=7&per_page=10>; '
' rel="last"'),
'server': 'GitHub.com',
'status': '200 OK',
'vary': 'Accept',
'x-content-type-options': 'nosniff',
'x-github-media-type': 'github.beta',
'x-ratelimit-limit': '60',
'x-ratelimit-remaining': '57'
}
assert r.links['next']['rel'] == 'next'
def test_cookie_parameters(self):
key = 'some_cookie'
value = 'some_value'
secure = True
domain = 'test.com'
rest = {'HttpOnly': True}
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value, secure=secure, domain=domain, rest=rest)
assert len(jar) == 1
assert 'some_cookie' in jar
cookie = list(jar)[0]
assert cookie.secure == secure
assert cookie.domain == domain
assert cookie._rest['HttpOnly'] == rest['HttpOnly']
def test_cookie_as_dict_keeps_len(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
d1 = dict(jar)
d2 = dict(jar.iteritems())
d3 = dict(jar.items())
assert len(jar) == 2
assert len(d1) == 2
assert len(d2) == 2
assert len(d3) == 2
def test_cookie_as_dict_keeps_items(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
d1 = dict(jar)
d2 = dict(jar.iteritems())
d3 = dict(jar.items())
assert d1['some_cookie'] == 'some_value'
assert d2['some_cookie'] == 'some_value'
assert d3['some_cookie1'] == 'some_value1'
def test_cookie_as_dict_keys(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
keys = jar.keys()
assert keys == list(keys)
# make sure one can use keys multiple times
assert list(keys) == list(keys)
def test_cookie_as_dict_values(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
values = jar.values()
assert values == list(values)
# make sure one can use values multiple times
assert list(values) == list(values)
def test_cookie_as_dict_items(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
items = jar.items()
assert items == list(items)
# make sure one can use items multiple times
assert list(items) == list(items)
def test_time_elapsed_blank(self):
r = requests.get(httpbin('get'))
td = r.elapsed
total_seconds = ((td.microseconds + (td.seconds + td.days * 24 * 3600)
* 10**6) / 10**6)
assert total_seconds > 0.0
def test_response_is_iterable(self):
r = requests.Response()
io = StringIO.StringIO('abc')
read_ = io.read
def read_mock(amt, decode_content=None):
return read_(amt)
setattr(io, 'read', read_mock)
r.raw = io
assert next(iter(r))
io.close()
def test_response_decode_unicode(self):
"""
When called with decode_unicode, Response.iter_content should always
return unicode.
"""
r = requests.Response()
r._content_consumed = True
r._content = b'the content'
r.encoding = 'ascii'
chunks = r.iter_content(decode_unicode=True)
assert all(isinstance(chunk, str) for chunk in chunks)
# also for streaming
r = requests.Response()
r.raw = io.BytesIO(b'the content')
r.encoding = 'ascii'
chunks = r.iter_content(decode_unicode=True)
assert all(isinstance(chunk, str) for chunk in chunks)
def test_request_and_response_are_pickleable(self):
r = requests.get(httpbin('get'))
# verify we can pickle the original request
assert pickle.loads(pickle.dumps(r.request))
# verify we can pickle the response and that we have access to
# the original request.
pr = pickle.loads(pickle.dumps(r))
assert r.request.url == pr.request.url
assert r.request.headers == pr.request.headers
def test_get_auth_from_url(self):
url = 'http://user:pass@complex.url.com/path?query=yes'
assert ('user', 'pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_encoded_spaces(self):
url = 'http://user:pass%20pass@complex.url.com/path?query=yes'
assert ('user', 'pass pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_not_encoded_spaces(self):
url = 'http://user:pass pass@complex.url.com/path?query=yes'
assert ('user', 'pass pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_percent_chars(self):
url = 'http://user%25user:pass@complex.url.com/path?query=yes'
assert ('user%user', 'pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_encoded_hashes(self):
url = 'http://user:pass%23pass@complex.url.com/path?query=yes'
assert ('user', 'pass#pass') == requests.utils.get_auth_from_url(url)
def test_cannot_send_unprepared_requests(self):
r = requests.Request(url=HTTPBIN)
with pytest.raises(ValueError):
requests.Session().send(r)
def test_http_error(self):
error = requests.exceptions.HTTPError()
assert not error.response
response = requests.Response()
error = requests.exceptions.HTTPError(response=response)
assert error.response == response
error = requests.exceptions.HTTPError('message', response=response)
assert str(error) == 'message'
assert error.response == response
def test_session_pickling(self):
r = requests.Request('GET', httpbin('get'))
s = requests.Session()
s = pickle.loads(pickle.dumps(s))
s.proxies = getproxies()
r = s.send(r.prepare())
assert r.status_code == 200
def test_fixes_1329(self):
"""
Ensure that header updates are done case-insensitively.
"""
s = requests.Session()
s.headers.update({'ACCEPT': 'BOGUS'})
s.headers.update({'accept': 'application/json'})
r = s.get(httpbin('get'))
headers = r.request.headers
assert headers['accept'] == 'application/json'
assert headers['Accept'] == 'application/json'
assert headers['ACCEPT'] == 'application/json'
def test_uppercase_scheme_redirect(self):
parts = urlparse(httpbin('html'))
url = "HTTP://" + parts.netloc + parts.path
r = requests.get(httpbin('redirect-to'), params={'url': url})
assert r.status_code == 200
assert r.url.lower() == url.lower()
def test_transport_adapter_ordering(self):
s = requests.Session()
order = ['https://', 'http://']
assert order == list(s.adapters)
s.mount('http://git', HTTPAdapter())
s.mount('http://github', HTTPAdapter())
s.mount('http://github.com', HTTPAdapter())
s.mount('http://github.com/about/', HTTPAdapter())
order = [
'http://github.com/about/',
'http://github.com',
'http://github',
'http://git',
'https://',
'http://',
]
assert order == list(s.adapters)
s.mount('http://gittip', HTTPAdapter())
s.mount('http://gittip.com', HTTPAdapter())
s.mount('http://gittip.com/about/', HTTPAdapter())
order = [
'http://github.com/about/',
'http://gittip.com/about/',
'http://github.com',
'http://gittip.com',
'http://github',
'http://gittip',
'http://git',
'https://',
'http://',
]
assert order == list(s.adapters)
s2 = requests.Session()
s2.adapters = {'http://': HTTPAdapter()}
s2.mount('https://', HTTPAdapter())
assert 'http://' in s2.adapters
assert 'https://' in s2.adapters
def test_header_remove_is_case_insensitive(self):
# From issue #1321
s = requests.Session()
s.headers['foo'] = 'bar'
r = s.get(httpbin('get'), headers={'FOO': None})
assert 'foo' not in r.request.headers
def test_params_are_merged_case_sensitive(self):
s = requests.Session()
s.params['foo'] = 'bar'
r = s.get(httpbin('get'), params={'FOO': 'bar'})
assert r.json()['args'] == {'foo': 'bar', 'FOO': 'bar'}
def test_long_authinfo_in_url(self):
url = 'http://{0}:{1}@{2}:9000/path?query#frag'.format(
'E8A3BE87-9E3F-4620-8858-95478E385B5B',
'EA770032-DA4D-4D84-8CE9-29C6D910BF1E',
'exactly-------------sixty-----------three------------characters',
)
r = requests.Request('GET', url).prepare()
assert r.url == url
def test_header_keys_are_native(self):
headers = {u('unicode'): 'blah', 'byte'.encode('ascii'): 'blah'}
r = requests.Request('GET', httpbin('get'), headers=headers)
p = r.prepare()
# This is testing that they are builtin strings. A bit weird, but there
# we go.
assert 'unicode' in p.headers.keys()
assert 'byte' in p.headers.keys()
def test_can_send_nonstring_objects_with_files(self):
data = {'a': 0.0}
files = {'b': 'foo'}
r = requests.Request('POST', httpbin('post'), data=data, files=files)
p = r.prepare()
assert 'multipart/form-data' in p.headers['Content-Type']
def test_autoset_header_values_are_native(self):
data = 'this is a string'
length = '16'
req = requests.Request('POST', httpbin('post'), data=data)
p = req.prepare()
assert p.headers['Content-Length'] == length
def test_nonhttp_schemes_dont_check_URLs(self):
test_urls = (
'data:image/gif;base64,R0lGODlhAQABAHAAACH5BAUAAAAALAAAAAABAAEAAAICRAEAOw==',
'file:///etc/passwd',
'magnet:?xt=urn:btih:be08f00302bc2d1d3cfa3af02024fa647a271431',
)
for test_url in test_urls:
req = requests.Request('GET', test_url)
preq = req.prepare()
assert test_url == preq.url
def test_auth_is_stripped_on_redirect_off_host(self):
r = requests.get(
httpbin('redirect-to'),
params={'url': 'http://www.google.co.uk'},
auth=('user', 'pass'),
)
assert r.history[0].request.headers['Authorization']
assert not r.request.headers.get('Authorization', '')
def test_auth_is_retained_for_redirect_on_host(self):
r = requests.get(httpbin('redirect/1'), auth=('user', 'pass'))
h1 = r.history[0].request.headers['Authorization']
h2 = r.request.headers['Authorization']
assert h1 == h2
def test_manual_redirect_with_partial_body_read(self):
s = requests.Session()
r1 = s.get(httpbin('redirect/2'), allow_redirects=False, stream=True)
assert r1.is_redirect
rg = s.resolve_redirects(r1, r1.request, stream=True)
# read only the first eight bytes of the response body,
# then follow the redirect
r1.iter_content(8)
r2 = next(rg)
assert r2.is_redirect
# read all of the response via iter_content,
# then follow the redirect
for _ in r2.iter_content():
pass
r3 = next(rg)
assert not r3.is_redirect
def _patch_adapter_gzipped_redirect(self, session, url):
adapter = session.get_adapter(url=url)
org_build_response = adapter.build_response
self._patched_response = False
def build_response(*args, **kwargs):
resp = org_build_response(*args, **kwargs)
if not self._patched_response:
resp.raw.headers['content-encoding'] = 'gzip'
self._patched_response = True
return resp
adapter.build_response = build_response
def test_redirect_with_wrong_gzipped_header(self):
s = requests.Session()
url = httpbin('redirect/1')
self._patch_adapter_gzipped_redirect(s, url)
s.get(url)
def test_basic_auth_str_is_always_native(self):
s = _basic_auth_str("test", "test")
assert isinstance(s, builtin_str)
assert s == "Basic dGVzdDp0ZXN0"
def test_requests_history_is_saved(self):
r = requests.get('https://httpbin.org/redirect/5')
total = r.history[-1].history
i = 0
for item in r.history:
assert item.history == total[0:i]
i=i+1
def test_json_param_post_content_type_works(self):
r = requests.post(
httpbin('post'),
json={'life': 42}
)
assert r.status_code == 200
assert 'application/json' in r.request.headers['Content-Type']
assert {'life': 42} == r.json()['json']
class TestContentEncodingDetection(unittest.TestCase):
def test_none(self):
encodings = requests.utils.get_encodings_from_content('')
assert not len(encodings)
def test_html_charset(self):
"""HTML5 meta charset attribute"""
content = '<meta charset="UTF-8">'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_html4_pragma(self):
"""HTML4 pragma directive"""
content = '<meta http-equiv="Content-type" content="text/html;charset=UTF-8">'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_xhtml_pragma(self):
"""XHTML 1.x served with text/html MIME type"""
content = '<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_xml(self):
"""XHTML 1.x served as XML"""
content = '<?xml version="1.0" encoding="UTF-8"?>'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_precedence(self):
content = '''
<?xml version="1.0" encoding="XML"?>
<meta charset="HTML5">
<meta http-equiv="Content-type" content="text/html;charset=HTML4" />
'''.strip()
encodings = requests.utils.get_encodings_from_content(content)
assert encodings == ['HTML5', 'HTML4', 'XML']
class TestCaseInsensitiveDict(unittest.TestCase):
def test_mapping_init(self):
cid = CaseInsensitiveDict({'Foo': 'foo', 'BAr': 'bar'})
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_iterable_init(self):
cid = CaseInsensitiveDict([('Foo', 'foo'), ('BAr', 'bar')])
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_kwargs_init(self):
cid = CaseInsensitiveDict(FOO='foo', BAr='bar')
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_docstring_example(self):
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
assert cid['aCCEPT'] == 'application/json'
assert list(cid) == ['Accept']
def test_len(self):
cid = CaseInsensitiveDict({'a': 'a', 'b': 'b'})
cid['A'] = 'a'
assert len(cid) == 2
def test_getitem(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
assert cid['spam'] == 'blueval'
assert cid['SPAM'] == 'blueval'
def test_fixes_649(self):
"""__setitem__ should behave case-insensitively."""
cid = CaseInsensitiveDict()
cid['spam'] = 'oneval'
cid['Spam'] = 'twoval'
cid['sPAM'] = 'redval'
cid['SPAM'] = 'blueval'
assert cid['spam'] == 'blueval'
assert cid['SPAM'] == 'blueval'
assert list(cid.keys()) == ['SPAM']
def test_delitem(self):
cid = CaseInsensitiveDict()
cid['Spam'] = 'someval'
del cid['sPam']
assert 'spam' not in cid
assert len(cid) == 0
def test_contains(self):
cid = CaseInsensitiveDict()
cid['Spam'] = 'someval'
assert 'Spam' in cid
assert 'spam' in cid
assert 'SPAM' in cid
assert 'sPam' in cid
assert 'notspam' not in cid
def test_get(self):
cid = CaseInsensitiveDict()
cid['spam'] = 'oneval'
cid['SPAM'] = 'blueval'
assert cid.get('spam') == 'blueval'
assert cid.get('SPAM') == 'blueval'
assert cid.get('sPam') == 'blueval'
assert cid.get('notspam', 'default') == 'default'
def test_update(self):
cid = CaseInsensitiveDict()
cid['spam'] = 'blueval'
cid.update({'sPam': 'notblueval'})
assert cid['spam'] == 'notblueval'
cid = CaseInsensitiveDict({'Foo': 'foo', 'BAr': 'bar'})
cid.update({'fOO': 'anotherfoo', 'bAR': 'anotherbar'})
assert len(cid) == 2
assert cid['foo'] == 'anotherfoo'
assert cid['bar'] == 'anotherbar'
def test_update_retains_unchanged(self):
cid = CaseInsensitiveDict({'foo': 'foo', 'bar': 'bar'})
cid.update({'foo': 'newfoo'})
assert cid['bar'] == 'bar'
def test_iter(self):
cid = CaseInsensitiveDict({'Spam': 'spam', 'Eggs': 'eggs'})
keys = frozenset(['Spam', 'Eggs'])
assert frozenset(iter(cid)) == keys
def test_equality(self):
cid = CaseInsensitiveDict({'SPAM': 'blueval', 'Eggs': 'redval'})
othercid = CaseInsensitiveDict({'spam': 'blueval', 'eggs': 'redval'})
assert cid == othercid
del othercid['spam']
assert cid != othercid
assert cid == {'spam': 'blueval', 'eggs': 'redval'}
def test_setdefault(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
assert cid.setdefault('spam', 'notblueval') == 'blueval'
assert cid.setdefault('notspam', 'notblueval') == 'notblueval'
def test_lower_items(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
keyset = frozenset(lowerkey for lowerkey, v in cid.lower_items())
lowerkeyset = frozenset(['accept', 'user-agent'])
assert keyset == lowerkeyset
def test_preserve_key_case(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
keyset = frozenset(['Accept', 'user-Agent'])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
def test_preserve_last_key_case(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
cid.update({'ACCEPT': 'application/json'})
cid['USER-AGENT'] = 'requests'
keyset = frozenset(['ACCEPT', 'USER-AGENT'])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
class UtilsTestCase(unittest.TestCase):
def test_super_len_io_streams(self):
""" Ensures that we properly deal with different kinds of IO streams. """
# uses StringIO or io.StringIO (see import above)
from io import BytesIO
from requests.utils import super_len
assert super_len(StringIO.StringIO()) == 0
assert super_len(
StringIO.StringIO('with so much drama in the LBC')) == 29
assert super_len(BytesIO()) == 0
assert super_len(
BytesIO(b"it's kinda hard bein' snoop d-o-double-g")) == 40
try:
import cStringIO
except ImportError:
pass
else:
assert super_len(
cStringIO.StringIO('but some how, some way...')) == 25
def test_get_environ_proxies_ip_ranges(self):
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable."""
from requests.utils import get_environ_proxies
os.environ['no_proxy'] = "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1"
assert get_environ_proxies('http://192.168.0.1:5000/') == {}
assert get_environ_proxies('http://192.168.0.1/') == {}
assert get_environ_proxies('http://172.16.1.1/') == {}
assert get_environ_proxies('http://172.16.1.1:5000/') == {}
assert get_environ_proxies('http://192.168.1.1:5000/') != {}
assert get_environ_proxies('http://192.168.1.1/') != {}
def test_get_environ_proxies(self):
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable."""
from requests.utils import get_environ_proxies
os.environ['no_proxy'] = "127.0.0.1,localhost.localdomain,192.168.0.0/24,172.16.1.1"
assert get_environ_proxies(
'http://localhost.localdomain:5000/v1.0/') == {}
assert get_environ_proxies('http://www.requests.com/') != {}
def test_is_ipv4_address(self):
from requests.utils import is_ipv4_address
assert is_ipv4_address('8.8.8.8')
assert not is_ipv4_address('8.8.8.8.8')
assert not is_ipv4_address('localhost.localdomain')
def test_is_valid_cidr(self):
from requests.utils import is_valid_cidr
assert not is_valid_cidr('8.8.8.8')
assert is_valid_cidr('192.168.1.0/24')
def test_dotted_netmask(self):
from requests.utils import dotted_netmask
assert dotted_netmask(8) == '255.0.0.0'
assert dotted_netmask(24) == '255.255.255.0'
assert dotted_netmask(25) == '255.255.255.128'
def test_address_in_network(self):
from requests.utils import address_in_network
assert address_in_network('192.168.1.1', '192.168.1.0/24')
assert not address_in_network('172.16.0.1', '192.168.1.0/24')
def test_get_auth_from_url(self):
"""Ensures that username and password in well-encoded URI as per
RFC 3986 are correclty extracted."""
from requests.utils import get_auth_from_url
from requests.compat import quote
percent_encoding_test_chars = "%!*'();:@&=+$,/?#[] "
url_address = "request.com/url.html#test"
url = "http://" + quote(
percent_encoding_test_chars, '') + ':' + quote(
percent_encoding_test_chars, '') + '@' + url_address
(username, password) = get_auth_from_url(url)
assert username == percent_encoding_test_chars
assert password == percent_encoding_test_chars
class TestMorselToCookieExpires(unittest.TestCase):
"""Tests for morsel_to_cookie when morsel contains expires."""
def test_expires_valid_str(self):
"""Test case where we convert expires from string time."""
morsel = Morsel()
morsel['expires'] = 'Thu, 01-Jan-1970 00:00:01 GMT'
cookie = morsel_to_cookie(morsel)
assert cookie.expires == 1
def test_expires_invalid_int(self):
"""Test case where an invalid type is passed for expires."""
morsel = Morsel()
morsel['expires'] = 100
with pytest.raises(TypeError):
morsel_to_cookie(morsel)
def test_expires_invalid_str(self):
"""Test case where an invalid string is input."""
morsel = Morsel()
morsel['expires'] = 'woops'
with pytest.raises(ValueError):
morsel_to_cookie(morsel)
def test_expires_none(self):
"""Test case where expires is None."""
morsel = Morsel()
morsel['expires'] = None
cookie = morsel_to_cookie(morsel)
assert cookie.expires is None
class TestMorselToCookieMaxAge(unittest.TestCase):
"""Tests for morsel_to_cookie when morsel contains max-age."""
def test_max_age_valid_int(self):
"""Test case where a valid max age in seconds is passed."""
morsel = Morsel()
morsel['max-age'] = 60
cookie = morsel_to_cookie(morsel)
assert isinstance(cookie.expires, int)
def test_max_age_invalid_str(self):
"""Test case where a invalid max age is passed."""
morsel = Morsel()
morsel['max-age'] = 'woops'
with pytest.raises(TypeError):
morsel_to_cookie(morsel)
class TestTimeout:
def test_stream_timeout(self):
try:
requests.get('https://httpbin.org/delay/10', timeout=2.0)
except requests.exceptions.Timeout as e:
assert 'Read timed out' in e.args[0].args[0]
def test_invalid_timeout(self):
with pytest.raises(ValueError) as e:
requests.get(httpbin('get'), timeout=(3, 4, 5))
assert '(connect, read)' in str(e)
with pytest.raises(ValueError) as e:
requests.get(httpbin('get'), timeout="foo")
assert 'must be an int or float' in str(e)
def test_none_timeout(self):
""" Check that you can set None as a valid timeout value.
To actually test this behavior, we'd want to check that setting the
timeout to None actually lets the request block past the system default
timeout. However, this would make the test suite unbearably slow.
Instead we verify that setting the timeout to None does not prevent the
request from succeeding.
"""
r = requests.get(httpbin('get'), timeout=None)
assert r.status_code == 200
def test_read_timeout(self):
try:
requests.get(httpbin('delay/10'), timeout=(None, 0.1))
assert False, "The recv() request should time out."
except ReadTimeout:
pass
def test_connect_timeout(self):
try:
requests.get(TARPIT, timeout=(0.1, None))
assert False, "The connect() request should time out."
except ConnectTimeout as e:
assert isinstance(e, ConnectionError)
assert isinstance(e, Timeout)
def test_total_timeout_connect(self):
try:
requests.get(TARPIT, timeout=(0.1, 0.1))
assert False, "The connect() request should time out."
except ConnectTimeout:
pass
def test_encoded_methods(self):
"""See: https://github.com/kennethreitz/requests/issues/2316"""
r = requests.request(b'GET', httpbin('get'))
assert r.ok
SendCall = collections.namedtuple('SendCall', ('args', 'kwargs'))
class RedirectSession(SessionRedirectMixin):
def __init__(self, order_of_redirects):
self.redirects = order_of_redirects
self.calls = []
self.max_redirects = 30
self.cookies = {}
self.trust_env = False
def send(self, *args, **kwargs):
self.calls.append(SendCall(args, kwargs))
return self.build_response()
def build_response(self):
request = self.calls[-1].args[0]
r = requests.Response()
try:
r.status_code = int(self.redirects.pop(0))
except IndexError:
r.status_code = 200
r.headers = CaseInsensitiveDict({'Location': '/'})
r.raw = self._build_raw()
r.request = request
return r
def _build_raw(self):
string = StringIO.StringIO('')
setattr(string, 'release_conn', lambda *args: args)
return string
class TestRedirects:
default_keyword_args = {
'stream': False,
'verify': True,
'cert': None,
'timeout': None,
'allow_redirects': False,
'proxies': {},
}
def test_requests_are_updated_each_time(self):
session = RedirectSession([303, 307])
prep = requests.Request('POST', 'http://httpbin.org/post').prepare()
r0 = session.send(prep)
assert r0.request.method == 'POST'
assert session.calls[-1] == SendCall((r0.request,), {})
redirect_generator = session.resolve_redirects(r0, prep)
for response in redirect_generator:
assert response.request.method == 'GET'
send_call = SendCall((response.request,),
TestRedirects.default_keyword_args)
assert session.calls[-1] == send_call
@pytest.fixture
def list_of_tuples():
return [
(('a', 'b'), ('c', 'd')),
(('c', 'd'), ('a', 'b')),
(('a', 'b'), ('c', 'd'), ('e', 'f')),
]
def test_data_argument_accepts_tuples(list_of_tuples):
"""
Ensure that the data argument will accept tuples of strings
and properly encode them.
"""
for data in list_of_tuples:
p = PreparedRequest()
p.prepare(
method='GET',
url='http://www.example.com',
data=data,
hooks=default_hooks()
)
assert p.body == urlencode(data)
def assert_copy(p, p_copy):
for attr in ('method', 'url', 'headers', '_cookies', 'body', 'hooks'):
assert getattr(p, attr) == getattr(p_copy, attr)
def test_prepared_request_empty_copy():
p = PreparedRequest()
assert_copy(p, p.copy())
def test_prepared_request_no_cookies_copy():
p = PreparedRequest()
p.prepare(
method='GET',
url='http://www.example.com',
data='foo=bar',
hooks=default_hooks()
)
assert_copy(p, p.copy())
def test_prepared_request_complete_copy():
p = PreparedRequest()
p.prepare(
method='GET',
url='http://www.example.com',
data='foo=bar',
hooks=default_hooks(),
cookies={'foo': 'bar'}
)
assert_copy(p, p.copy())
def test_prepare_unicode_url():
p = PreparedRequest()
p.prepare(
method='GET',
url=u('http://www.example.com/üniçø∂é'),
hooks=[]
)
assert_copy(p, p.copy())
if __name__ == '__main__':
unittest.main()
| 34.078227
| 112
| 0.591457
|
4a05cb35e176a19b4d37aecd2a0f922763b1997d
| 7,017
|
py
|
Python
|
entity/item_entity.py
|
Danycraft98/pythonProject
|
1b1be6576e018776be0caf869d427edde2214976
|
[
"Cube",
"Unlicense"
] | null | null | null |
entity/item_entity.py
|
Danycraft98/pythonProject
|
1b1be6576e018776be0caf869d427edde2214976
|
[
"Cube",
"Unlicense"
] | null | null | null |
entity/item_entity.py
|
Danycraft98/pythonProject
|
1b1be6576e018776be0caf869d427edde2214976
|
[
"Cube",
"Unlicense"
] | null | null | null |
import pygame as pg
from entity import equips
from settings import tools, prepare
ITEM_SHEET = prepare.GFX["objects"]["items"]
ITEM_COORDS = {"heart": [(0, 0), (1, 0)],
"diamond": [(0, 1), (1, 1)],
"potion": [(0, 2), (1, 2)],
"key": [(0, 3), (1, 3)]}
RISE_SPEED = 1.5
MAX_RISE = 50
class _Item(pg.sprite.Sprite):
"""Base class for specific items."""
def __init__(self, name, pos, duration, chest=False, ident=None, *groups):
"""
The argument name is the type of item corresponding to the ITEMS dict;
pos is the location on the map the item is located; if the item is in
a treasure chest, pass chest=True; if the player can only get this item
once, pass a unique (to the map) ident string to be stored in the
player's identifiers attribute.
"""
pg.sprite.Sprite.__init__(self, *groups)
coords, size = ITEM_COORDS[name], prepare.CELL_SIZE
self.frames = tools.strip_coords_from_sheet(ITEM_SHEET, coords, size)
self.anim = tools.Anim(self.frames, 7)
self.image = self.anim.get_next_frame(pg.time.get_ticks())
# Subtract 1 from y axis to make item drop appear behind death anim.
self.rect = pg.Rect((pos[0], pos[1] - 1), prepare.CELL_SIZE)
self.exact_position = list(self.rect.topleft)
self.old_position = self.exact_position[:]
self.mask = pg.Mask(prepare.CELL_SIZE)
self.mask.fill()
self.timer = tools.Timer(duration * 1000, 1) if duration else None
self.from_chest = chest
self.identifier = ident # Used to stop respawning of unique items.
self.height = 0 # Used when item rises from chest.
self.sound_effect = None
@property
def frame_speed(self):
"""Get the total amount the object has been displaced this frame."""
return (self.exact_position[0] - self.old_position[0],
self.exact_position[1] - self.old_position[1])
def collide_with_player(self, player):
"""
Objects that aren't inside treasure chests bestow their effects and
disappear on collision with the player.
"""
if not self.from_chest:
self.get_item(player)
self.kill()
def get_item(self, player):
"""
Play sound effect; bestow effect of item; add unique identifier to
player's identifiers if applicable.
"""
if self.sound_effect:
self.sound_effect.play()
self.process_result(player)
if self.identifier:
identifiers = player.identifiers
map_name, key = self.identifier
identifiers.setdefault(map_name, set())
identifiers[map_name].add(key)
def update(self, now, *args):
"""
If the object has a duration check to see if it has expired;
If the item came from a chest animate it rising appropriately;
Get next frame of animation.
"""
self.old_position = self.exact_position[:]
if self.timer:
self.timer.check_tick(now)
if self.timer.done:
self.kill()
if self.from_chest:
self.height += RISE_SPEED
self.exact_position[1] -= RISE_SPEED
if self.height >= MAX_RISE:
self.kill()
self.rect.topleft = self.exact_position
if hasattr(self, "anim"):
self.image = self.anim.get_next_frame(now)
def draw(self, surface, interpolate):
"""Basic draw function."""
surface.blit(self.image, self.rect)
class Heart(_Item):
"""Fundamental healing item."""
def __init__(self, pos, duration, chest=False, ident=None, *groups):
_Item.__init__(self, "heart", pos, duration, chest, ident, *groups)
self.heal = 3
def process_result(self, player):
"""Restore self.heal amount of health up to the player's max."""
player.health = min(player.health + self.heal, prepare.MAX_HEALTH)
class Diamond(_Item):
"""A currency item worth 5 units."""
def __init__(self, pos, duration, chest=False, ident=None, *groups):
_Item.__init__(self, "diamond", pos, duration, chest, ident, *groups)
self.value = 5
def process_result(self, player):
"""
Add self.value to the player's inventory["money"] up to MAX_MONEY.
"""
money = player.inventory["money"]
player.inventory["money"] = min(money + self.value, prepare.MAX_MONEY)
class Potion(_Item):
"""Cure poison effect. (not implemented)."""
def __init__(self, pos, duration, chest=False, ident=None, *groups):
_Item.__init__(self, "potion", pos, duration, chest, ident, *groups)
def process_result(self, player):
"""Not implemented."""
pass
# Insert effect here.
class Key(_Item):
"""Basic key for generic doors."""
def __init__(self, pos, duration, chest=False, ident=None, *groups):
_Item.__init__(self, "key", pos, duration, chest, ident, *groups)
def process_result(self, player):
"""Add 1 to player's inventory["keys"]."""
player.inventory["keys"] += 1
def make_equip_drop(GearClass):
"""Given an equipment class, return a corresponding drop item class."""
class EquipDrop(_Item):
"""This class works for creating drops for all equipment."""
def __init__(self, pos, dur, chest=False, ident=None, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.item = GearClass()
self.image = self.item.display
self.rect = pg.Rect((pos[0], pos[1] - 1), prepare.CELL_SIZE)
self.exact_position = list(self.rect.topleft)
self.old_position = self.exact_position[:]
self.mask = pg.Mask(prepare.CELL_SIZE)
self.mask.fill()
self.timer = None
self.from_chest = chest
self.identifier = ident # Used to stop respawning of unique items.
self.height = 0 # Used when item rises from chest.
self.sound_effect = None
def process_result(self, player):
"""Add the gear item to the player's inventory."""
gear, name = self.item.sheet, self.item.name
player.inventory[gear][name] = self.item
return EquipDrop # Return the class, not an instance.
ITEMS = {"heart": Heart,
"diamond": Diamond,
"potion": Potion,
"key": Key,
("head", "helm"): make_equip_drop(equips.Helm),
("head", "sader"): make_equip_drop(equips.Sader),
("head", "diver"): make_equip_drop(equips.Diver),
("head", "goggles"): make_equip_drop(equips.TopGoggles),
("body", "chain"): make_equip_drop(equips.ChainMail),
("shield", "tin"): make_equip_drop(equips.TinShield),
("weapon", "labrys"): make_equip_drop(equips.Labrys),
("weapon", "pitch"): make_equip_drop(equips.PitchFork)}
| 36.931579
| 79
| 0.608665
|
4a05cb8502905e5d7ae4c9a4bc32797740b27e17
| 2,715
|
py
|
Python
|
toughio/_io/_common.py
|
keurfonluu/ToughMeshio
|
9f374f5c72df4d76bf63ae4f87f2f2d4e52c81e0
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
toughio/_io/_common.py
|
keurfonluu/ToughMeshio
|
9f374f5c72df4d76bf63ae4f87f2f2d4e52c81e0
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
toughio/_io/_common.py
|
keurfonluu/ToughMeshio
|
9f374f5c72df4d76bf63ae4f87f2f2d4e52c81e0
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import numpy as np
def read_record(data, fmt):
"""Parse string to data given format."""
token_to_type = {
"s": str,
"S": str,
"d": int,
"f": to_float,
"e": to_float,
}
i = 0
out = []
for token in fmt.split(","):
n = int(token[:-1].split(".")[0])
tmp = data[i : i + n]
tmp = tmp if token[-1] == "S" else tmp.strip()
out.append(token_to_type[token[-1]](tmp) if tmp else None)
i += n
return out
def write_record(data, fmt, multi=False):
"""Return a list of record strings given format."""
if not multi:
data = [to_str(d, f) for d, f in zip(data, fmt)]
out = ["{:80}\n".format("".join(data))]
else:
n = len(data)
ncol = len(fmt)
data = [
data[ncol * i : min(ncol * i + ncol, n)]
for i in range(int(np.ceil(n / ncol)))
]
out = []
for d in data:
d = [to_str(dd, f) for dd, f in zip(d, fmt)]
out += ["{:80}\n".format("".join(d))]
return out
def to_float(s):
"""Convert variable string to float."""
try:
return float(s.replace("d", "e"))
except ValueError:
# It's probably something like "0.0001-001"
significand, exponent = s[:-4], s[-4:]
return float("{}e{}".format(significand, exponent))
def to_str(x, fmt):
"""Convert variable to string."""
x = "" if x is None else x
if not isinstance(x, str):
# Special handling for floating point numbers
if "f" in fmt:
# Number of decimals is specified
if "." in fmt:
n = int(fmt[3:].split(".")[0])
tmp = fmt.format(x)
if len(tmp) > n:
return fmt.replace("f", "e").format(x)
else:
return tmp
# Let Python decides the format
else:
n = int(fmt[3:].split("f")[0])
tmp = str(float(x))
if len(tmp) > n:
fmt = "{{:>{}.{}e}}".format(n, n - 7)
return fmt.format(x)
else:
fmt = "{{:>{}}}".format(n)
return fmt.format(tmp)
else:
return fmt.format(x)
else:
return fmt.replace("g", "").replace("e", "").replace("f", "").format(x)
def prune_nones_dict(data):
"""Remove None key/value pairs from dict."""
return {k: v for k, v in data.items() if v is not None}
def prune_nones_list(data):
"""Remove trailing None values from list."""
return [x for i, x in enumerate(data) if any(xx is not None for xx in data[i:])]
| 25.373832
| 84
| 0.47477
|
4a05cc13916a38f996aba10f2098ef887d00c755
| 8,696
|
py
|
Python
|
heat/tests/openstack/neutron/lbaas/test_pool.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 265
|
2015-01-02T09:33:22.000Z
|
2022-03-26T23:19:54.000Z
|
heat/tests/openstack/neutron/lbaas/test_pool.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 8
|
2015-09-01T15:43:19.000Z
|
2021-12-14T05:18:23.000Z
|
heat/tests/openstack/neutron/lbaas/test_pool.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 295
|
2015-01-06T07:00:40.000Z
|
2021-09-06T08:05:06.000Z
|
#
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import yaml
from neutronclient.common import exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine.resources.openstack.neutron.lbaas import pool
from heat.tests import common
from heat.tests.openstack.neutron import inline_templates
from heat.tests import utils
class PoolTest(common.HeatTestCase):
def test_resource_mapping(self):
mapping = pool.resource_mapping()
self.assertEqual(pool.Pool,
mapping['OS::Neutron::LBaaS::Pool'])
@mock.patch('heat.engine.clients.os.neutron.'
'NeutronClientPlugin.has_extension', return_value=True)
def _create_stack(self, ext_func, tmpl=inline_templates.POOL_TEMPLATE):
self.t = template_format.parse(tmpl)
self.stack = utils.parse_stack(self.t)
self.pool = self.stack['pool']
self.neutron_client = mock.MagicMock()
self.pool.client = mock.MagicMock(return_value=self.neutron_client)
self.pool.client_plugin().find_resourceid_by_name_or_id = (
mock.MagicMock(return_value='123'))
self.pool.client_plugin().client = mock.MagicMock(
return_value=self.neutron_client)
def test_validate_no_cookie_name(self):
tmpl = yaml.safe_load(inline_templates.POOL_TEMPLATE)
sp = tmpl['resources']['pool']['properties']['session_persistence']
sp['type'] = 'APP_COOKIE'
self._create_stack(tmpl=yaml.dump(tmpl))
msg = _('Property cookie_name is required when '
'session_persistence type is set to APP_COOKIE.')
with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
'has_extension', return_value=True):
self.assertRaisesRegex(exception.StackValidationFailed,
msg, self.pool.validate)
def test_validate_source_ip_cookie_name(self):
tmpl = yaml.safe_load(inline_templates.POOL_TEMPLATE)
sp = tmpl['resources']['pool']['properties']['session_persistence']
sp['type'] = 'SOURCE_IP'
sp['cookie_name'] = 'cookie'
self._create_stack(tmpl=yaml.dump(tmpl))
msg = _('Property cookie_name must NOT be specified when '
'session_persistence type is set to SOURCE_IP.')
with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
'has_extension', return_value=True):
self.assertRaisesRegex(exception.StackValidationFailed,
msg, self.pool.validate)
def test_create(self):
self._create_stack()
self.neutron_client.show_loadbalancer.side_effect = [
{'loadbalancer': {'provisioning_status': 'PENDING_UPDATE'}},
{'loadbalancer': {'provisioning_status': 'PENDING_UPDATE'}},
{'loadbalancer': {'provisioning_status': 'ACTIVE'}},
]
self.neutron_client.create_lbaas_pool.side_effect = [
exceptions.StateInvalidClient,
{'pool': {'id': '1234'}}
]
expected = {
'pool': {
'name': 'my_pool',
'description': 'my pool',
'session_persistence': {
'type': 'HTTP_COOKIE'
},
'lb_algorithm': 'ROUND_ROBIN',
'listener_id': '123',
'loadbalancer_id': 'my_lb',
'protocol': 'HTTP',
'admin_state_up': True
}
}
props = self.pool.handle_create()
self.assertFalse(self.pool.check_create_complete(props))
self.neutron_client.create_lbaas_pool.assert_called_with(expected)
self.assertFalse(self.pool.check_create_complete(props))
self.neutron_client.create_lbaas_pool.assert_called_with(expected)
self.assertFalse(self.pool.check_create_complete(props))
self.assertTrue(self.pool.check_create_complete(props))
def test_create_missing_properties(self):
self.patchobject(pool.Pool, 'is_service_available',
return_value=(True, None))
for prop in ('lb_algorithm', 'listener', 'protocol'):
tmpl = yaml.safe_load(inline_templates.POOL_TEMPLATE)
del tmpl['resources']['pool']['properties']['loadbalancer']
del tmpl['resources']['pool']['properties'][prop]
self._create_stack(tmpl=yaml.dump(tmpl))
if prop == 'listener':
self.assertRaises(exception.PropertyUnspecifiedError,
self.pool.validate)
else:
self.assertRaises(exception.StackValidationFailed,
self.pool.validate)
def test_show_resource(self):
self._create_stack()
self.pool.resource_id_set('1234')
self.neutron_client.show_lbaas_pool.return_value = {
'pool': {'id': '1234'}
}
self.assertEqual(self.pool._show_resource(), {'id': '1234'})
self.neutron_client.show_lbaas_pool.assert_called_with('1234')
def test_update(self):
self._create_stack()
self.pool.resource_id_set('1234')
self.neutron_client.show_loadbalancer.side_effect = [
{'loadbalancer': {'provisioning_status': 'PENDING_UPDATE'}},
{'loadbalancer': {'provisioning_status': 'PENDING_UPDATE'}},
{'loadbalancer': {'provisioning_status': 'ACTIVE'}},
]
self.neutron_client.update_lbaas_pool.side_effect = [
exceptions.StateInvalidClient, None]
prop_diff = {
'admin_state_up': False,
'name': 'your_pool',
'lb_algorithm': 'SOURCE_IP'
}
prop_diff = self.pool.handle_update(None, None, prop_diff)
self.assertFalse(self.pool.check_update_complete(prop_diff))
self.assertFalse(self.pool._update_called)
self.neutron_client.update_lbaas_pool.assert_called_with(
'1234', {'pool': prop_diff})
self.assertFalse(self.pool.check_update_complete(prop_diff))
self.assertTrue(self.pool._update_called)
self.neutron_client.update_lbaas_pool.assert_called_with(
'1234', {'pool': prop_diff})
self.assertFalse(self.pool.check_update_complete(prop_diff))
self.assertTrue(self.pool.check_update_complete(prop_diff))
def test_delete(self):
self._create_stack()
self.pool.resource_id_set('1234')
self.neutron_client.show_loadbalancer.side_effect = [
{'loadbalancer': {'provisioning_status': 'PENDING_UPDATE'}},
{'loadbalancer': {'provisioning_status': 'PENDING_UPDATE'}},
{'loadbalancer': {'provisioning_status': 'ACTIVE'}},
]
self.neutron_client.delete_lbaas_pool.side_effect = [
exceptions.StateInvalidClient, None]
self.pool.handle_delete()
self.assertFalse(self.pool.check_delete_complete(None))
self.assertFalse(self.pool._delete_called)
self.assertFalse(self.pool.check_delete_complete(None))
self.assertTrue(self.pool._delete_called)
self.neutron_client.delete_lbaas_pool.assert_called_with('1234')
self.assertFalse(self.pool.check_delete_complete(None))
self.assertTrue(self.pool.check_delete_complete(None))
def test_delete_already_gone(self):
self._create_stack()
self.pool.resource_id_set('1234')
self.neutron_client.delete_lbaas_pool.side_effect = (
exceptions.NotFound)
self.pool.handle_delete()
self.assertTrue(self.pool.check_delete_complete(None))
def test_delete_failed(self):
self._create_stack()
self.pool.resource_id_set('1234')
self.neutron_client.delete_lbaas_pool.side_effect = (
exceptions.Unauthorized)
self.pool.handle_delete()
self.assertRaises(exceptions.Unauthorized,
self.pool.check_delete_complete, None)
| 41.409524
| 78
| 0.645009
|
4a05cd915b66ef2be12b655af2302b7e7c8e280b
| 980
|
py
|
Python
|
pigeon_sort.py
|
gary-mayfield/AOTW
|
e320342f8918d2bf0352d8479d866dbc7db58e5e
|
[
"MIT"
] | null | null | null |
pigeon_sort.py
|
gary-mayfield/AOTW
|
e320342f8918d2bf0352d8479d866dbc7db58e5e
|
[
"MIT"
] | null | null | null |
pigeon_sort.py
|
gary-mayfield/AOTW
|
e320342f8918d2bf0352d8479d866dbc7db58e5e
|
[
"MIT"
] | null | null | null |
from random import randint
def pigeon_sort(array):
if len(array) == 0:
return array
min = array[0]
max = array[0]
for i in range(len(array)):
if array[i] < min:
min = array[i]
elif array[i] > max:
max = array[i]
holes_range = max - min + 1
holes = [0 for _ in range(holes_range)]
holes_repeat = [0 for _ in range(holes_range)]
for i in range(len(array)):
index = array[i] - min
if holes[index] != array[i]:
holes[index] = array[i]
holes_repeat[index] += 1
else:
holes_repeat[index] += 1
index = 0
for i in range(holes_range):
while holes_repeat[i] > 0:
array[index] = holes[i]
index += 1
holes_repeat[i] -= 1
return array
if __name__ == '__main__':
random_list = [randint(1, 100) for i in range(25)]
print(random_list)
arr = pigeon_sort(random_list)
print(arr)
| 23.902439
| 54
| 0.537755
|
4a05ce0705d273cc1716f0d56fb9622251e2fbef
| 801
|
py
|
Python
|
test_project/urls.py
|
trotmarin/test_project_2
|
a50c588398e86f6427eb18efb3c715ab7151e697
|
[
"MIT"
] | null | null | null |
test_project/urls.py
|
trotmarin/test_project_2
|
a50c588398e86f6427eb18efb3c715ab7151e697
|
[
"MIT"
] | null | null | null |
test_project/urls.py
|
trotmarin/test_project_2
|
a50c588398e86f6427eb18efb3c715ab7151e697
|
[
"MIT"
] | null | null | null |
"""test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('testapp.urls'))
]
| 34.826087
| 77
| 0.705368
|
4a05ce19aafb13843c5f1eaf3b36eabc0646a8ca
| 6,000
|
py
|
Python
|
src/oidctest/cp/op_handler.py
|
dannysauer/oidctest
|
e7593e02af7caa71f92220ad0f5b67bb40e30f97
|
[
"Apache-2.0"
] | 53
|
2017-05-11T18:21:28.000Z
|
2021-01-04T02:17:20.000Z
|
src/oidctest/cp/op_handler.py
|
dannysauer/oidctest
|
e7593e02af7caa71f92220ad0f5b67bb40e30f97
|
[
"Apache-2.0"
] | 222
|
2017-04-14T17:45:18.000Z
|
2020-06-24T17:32:42.000Z
|
src/oidctest/cp/op_handler.py
|
dannysauer/oidctest
|
e7593e02af7caa71f92220ad0f5b67bb40e30f97
|
[
"Apache-2.0"
] | 19
|
2017-06-21T11:19:45.000Z
|
2020-09-30T23:41:47.000Z
|
import copy
import json
import os
from oic import rndstr
from oic.utils.keyio import KeyJar
from oic.utils.keyio import key_summary
from oic.utils.sdb import create_session_db
from otest.conversation import Conversation
from oidctest import UnknownTestID
def write_jwks_uri(op, op_arg, folder):
_name = "jwks_{}.json".format(rndstr())
filename = os.path.join(folder,'static',_name)
with open(filename, "w") as f:
f.write(json.dumps(op_arg["jwks"]))
f.close()
op.jwks_uri = "{}static/{}".format(op_arg["baseurl"], _name)
op.jwks_name = filename
def init_keyjar(op, kj, com_args):
op.keyjar = KeyJar()
try:
op.keyjar.verify_ssl = com_args['verify_ssl']
except KeyError:
pass
for kb in kj.issuer_keys['']:
for k in kb.keys():
k.inactive_since = 0
op.keyjar.add_kb('', copy.copy(kb))
class OPHandler(object):
def __init__(self, provider_cls, op_args, com_args, test_conf, folder,
check_session_iframe=''):
self.provider_cls = provider_cls
self.op_args = op_args
self.com_args = com_args
self.test_conf = test_conf # elsewhere called flows
self.folder = folder
self.op = {}
self.check_session_iframe = check_session_iframe
def get(self, oper_id, test_id, events, endpoint):
# addr = get_client_address(environ)
key = path = '{}/{}'.format(oper_id, test_id)
try:
_op = self.op[key]
_op.events = events
if endpoint == '.well-known/openid-configuration':
if test_id == 'rp-key-rotation-op-sign-key-native':
pass
elif test_id == 'rp-id_token-kid-absent-multiple-jwks':
setattr(_op, 'keys', self.op_args['marg']['keys'])
_op_args = {
'baseurl': self.op_args['baseurl'],
'jwks': self.op_args['marg']['jwks']
}
write_jwks_uri(_op, _op_args, self.folder)
else:
init_keyjar(_op, self.op_args['keyjar'], self.com_args)
_kj = _op.keyjar.export_jwks(True, '')
_op.keyjar.import_jwks(_kj, _op.name)
write_jwks_uri(_op, self.op_args, self.folder)
except KeyError:
if test_id in ['rp-id_token-kid-absent-multiple-jwks']:
_op_args = {}
for param in ['baseurl', 'cookie_name', 'cookie_ttl',
'endpoints']:
_op_args[param] = self.op_args[param]
for param in ["jwks", "keys"]:
_op_args[param] = self.op_args["marg"][param]
_op = self.setup_op(oper_id, test_id, self.com_args, _op_args,
self.test_conf, events)
else:
_op = self.setup_op(oper_id, test_id, self.com_args,
self.op_args, self.test_conf, events)
if test_id.startswith('rp-init-logout-session'):
_csi = self.check_session_iframe.replace(
'<PATH>', '{}/{}'.format(oper_id, test_id))
_op.capabilities['check_session_iframe'] = _csi
elif test_id.startswith('rp-backchannel-'):
_op.capabilities['backchannel_logout_supported'] = True
_op.capabilities['backchannel_logout_session_supported'] = True
elif test_id.startswith('rp-frontchannel-'):
_op.capabilities['frontchannel_logout_supported'] = True
_op.capabilities['frontchannel_logout_session_supported'] = True
_op.conv = Conversation(test_id, _op, None)
_op.orig_keys = key_summary(_op.keyjar, '').split(', ')
self.op[key] = _op
return _op, path, key
def setup_op(self, oper_id, test_id, com_args, op_arg, test_conf, events):
_sdb = create_session_db(com_args["baseurl"], 'automover', '430X')
op = self.provider_cls(sdb=_sdb, **com_args)
op.events = events
op.oper_id = oper_id
op.test_id = test_id
for _authn in com_args["authn_broker"]:
_authn.srv = op
for key, val in list(op_arg.items()):
if key == 'keyjar':
init_keyjar(op, val, com_args)
else:
setattr(op, key, val)
if not op.cookie_path:
op.cookie_path = '/'
write_jwks_uri(op, op_arg, self.folder)
if op.baseurl.endswith("/"):
div = ""
else:
div = "/"
op.name = op.baseurl = "{}{}{}/{}".format(op.baseurl, div, oper_id,
test_id)
op.logout_verify_url = '{}/{}'.format(op.name, op.logout_path)
op.post_logout_page = "{}/{}".format(op.baseurl, "post_logout_page")
_kj = op.keyjar.export_jwks(True, '')
op.keyjar.import_jwks(_kj, op.name)
_tc = test_conf[test_id]
if not _tc:
raise UnknownTestID(test_id)
try:
_capa = _tc['capabilities']
except KeyError:
pass
else:
op.capabilities.update(_capa)
# update jwx
for _typ in ["signing_alg", "encryption_alg", "encryption_enc"]:
for item in ["id_token", "userinfo"]:
cap_param = '{}_{}_values_supported'.format(item, _typ)
try:
op.jwx_def[_typ][item] = _capa[cap_param][0]
except KeyError:
pass
try:
op.claims_type = _tc["claims"]
except KeyError:
pass
try:
op.behavior_type = _tc["behavior"]
op.server.behavior_type = _tc["behavior"]
except KeyError:
pass
return op
| 36.363636
| 84
| 0.539333
|
4a05d091e0eb3798b3d3e986b010f3ca21b32356
| 2,652
|
py
|
Python
|
tests/unit/utils/test_misc.py
|
satrialoka/trieste
|
b58eb924a49ad86e27fa2e082defe2d37afcc14a
|
[
"Apache-2.0"
] | 119
|
2020-10-06T16:27:05.000Z
|
2022-03-28T00:27:18.000Z
|
tests/unit/utils/test_misc.py
|
satrialoka/trieste
|
b58eb924a49ad86e27fa2e082defe2d37afcc14a
|
[
"Apache-2.0"
] | 275
|
2020-10-07T22:32:53.000Z
|
2022-03-31T15:57:44.000Z
|
tests/unit/utils/test_misc.py
|
satrialoka/trieste
|
b58eb924a49ad86e27fa2e082defe2d37afcc14a
|
[
"Apache-2.0"
] | 30
|
2020-10-08T23:00:01.000Z
|
2022-02-25T17:04:22.000Z
|
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import ShapeLike, various_shapes
from trieste.types import TensorType
from trieste.utils.misc import Err, Ok, jit, shapes_equal, to_numpy
@pytest.mark.parametrize("apply", [True, False])
@pytest.mark.parametrize(
"kwargs",
[
{},
{"autograph": False},
{"input_signature": [tf.TensorSpec(()), tf.TensorSpec(())]},
],
)
def test_jit_function_behaviour_unchanged(apply: bool, kwargs: Any) -> None:
@jit(apply, **kwargs)
def add(t: tf.Tensor, u: tf.Tensor) -> tf.Tensor:
return t + u
assert add(tf.constant(1.0), tf.constant(2.0)) == tf.constant(3.0)
@pytest.mark.parametrize("apply", [True, False])
@pytest.mark.parametrize("kwargs", [{}, {"autograph": False}])
def test_jit_compiles_function(apply: bool, kwargs: Any) -> None:
@jit(apply, **kwargs)
def one() -> tf.Tensor:
return tf.constant(0)
tf_function_type = type(tf.function(lambda x: x))
assert isinstance(one, tf_function_type) == apply
@pytest.mark.parametrize("this_shape", various_shapes())
@pytest.mark.parametrize("that_shape", various_shapes())
def test_shapes_equal(this_shape: ShapeLike, that_shape: ShapeLike) -> None:
assert shapes_equal(tf.ones(this_shape), tf.ones(that_shape)) == (this_shape == that_shape)
@pytest.mark.parametrize(
"t, expected",
[
(tf.constant(0), np.array(0)),
(np.arange(12).reshape(3, -1), np.arange(12).reshape(3, -1)),
(tf.reshape(tf.range(12), [3, -1]), np.arange(12).reshape(3, -1)),
],
)
def test_to_numpy(t: TensorType, expected: np.ndarray) -> None:
npt.assert_array_equal(to_numpy(t), expected)
def test_ok() -> None:
assert Ok(1).unwrap() == 1
assert Ok(1).is_ok is True
assert Ok(1).is_err is False
def test_err() -> None:
with pytest.raises(ValueError):
Err(ValueError()).unwrap()
assert Err(ValueError()).is_ok is False
assert Err(ValueError()).is_err is True
| 31.571429
| 95
| 0.688537
|
4a05d212fb6ebb36722dea75c5b3348cc5f4fd11
| 1,512
|
py
|
Python
|
disciplinesite/demo/models.py
|
pombredanne/discipline
|
f9d2041c9f1fbb5577a1682d858d487c1f4fce1b
|
[
"MIT"
] | null | null | null |
disciplinesite/demo/models.py
|
pombredanne/discipline
|
f9d2041c9f1fbb5577a1682d858d487c1f4fce1b
|
[
"MIT"
] | null | null | null |
disciplinesite/demo/models.py
|
pombredanne/discipline
|
f9d2041c9f1fbb5577a1682d858d487c1f4fce1b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db.models import *
from discipline.models import DisciplinedModel
class LanguageKey(DisciplinedModel):
code = CharField(max_length=6,unique=True)
def __unicode__(self):
return self.code
class Word(DisciplinedModel):
def __unicode__(self):
return "%s (%s)" % (self.full, self.language.code)
# Full representation
full = CharField(max_length=70,db_index=True)
# The language of the word
language = ForeignKey(LanguageKey, related_name="words")
class Concept(DisciplinedModel):
def __unicode__(self, exclude=None):
cons = [con.word.__unicode__() for con
in self.word_connections.all() if con != exclude]
if cons:
return "Concept: " + ", ".join(cons)
return "Abstract Concept"
def word_list(self):
cons = [con.word.__unicode__() for con
in self.word_connections.all()]
if cons:
return ", ".join(cons)
return ""
word_list.allow_tags = True
# A connection between a concept and a word
class WordConceptConnection(DisciplinedModel):
word = ForeignKey(Word, related_name="concept_connections")
concept = ForeignKey(Concept, related_name="word_connections")
class Meta:
verbose_name = "word-concept connection"
def __unicode__(self):
return u"%s \u2194 %s" % (
self.word.__unicode__(),
self.concept.__unicode__(exclude=self)
)
| 27
| 66
| 0.635582
|
4a05d48f07f00d7e9a55df39c41db0a2d2ea31cb
| 12,752
|
py
|
Python
|
bearfield/document.py
|
lunixbochs/bearfield
|
1dd2f6932af900393ca764d8aa1ec6c043dd24ed
|
[
"BSD-3-Clause"
] | 1
|
2020-02-10T04:10:47.000Z
|
2020-02-10T04:10:47.000Z
|
bearfield/document.py
|
lunixbochs/bearfield
|
1dd2f6932af900393ca764d8aa1ec6c043dd24ed
|
[
"BSD-3-Clause"
] | null | null | null |
bearfield/document.py
|
lunixbochs/bearfield
|
1dd2f6932af900393ca764d8aa1ec6c043dd24ed
|
[
"BSD-3-Clause"
] | null | null | null |
"""Document and subdocument classes."""
from __future__ import absolute_import
from .cursor import Cursor
from .encoders import SortEncoder, UpdateEncoder
from .errors import OperationError, ValidationError
from .meta import DocumentBuilder
from .query import Query
from .utils import get_projection
import six
class Document(six.with_metaclass(DocumentBuilder, object)):
"""
A document or subdocument. Document properties are defined in an optional Meta subclass. In
order for a document to be saved to a database it must associate itself with a named connection
by setting the 'connection' meta attribute to that connection name. The collection name is the
connection prefix plus the snake cased class name of the document. This may be overridden by
setting the 'collection' property to the desired name of the collection. The connection's
prefix will still be prepended to the name.
Fields are defined on the document by assigning Field objects to class attributes. See the
Field class for details on field parameters.
A document may be provided as the type to a Field. This will cause that field to be treated as
a subdocument.
"""
def __new__(cls, *args, **kwargs):
"""Create new instance of Document."""
doc = object.__new__(cls)
doc._raw = {}
doc._attrs = {}
doc._dirty = set()
doc._partial = None
return doc
@classmethod
def _decode(cls, raw, fields=None):
"""
Return a document decoded from a MongoDB record. If fields is not None then a partial
document will be created with those field values.
"""
if raw is None:
return None
doc = cls.__new__(cls)
doc._raw = raw.copy()
doc._partial = cls._meta.get_partial(fields)
doc.__init__()
return doc
@classmethod
def _validate(cls, raw, partial=None, update=False):
"""
Validate the raw document. Raise a ValidationError if validation fails. If the raw document
is an update document then update should be set to True.
"""
if update:
raw = raw.get('$set', {})
required = []
for name, field in six.iteritems(cls._meta.get_fields(partial)):
value = raw.get(name)
if value is None:
if field.require:
required.append(name)
else:
field.validate(cls, name, value)
if not update and required:
doc = cls.__name__
required = ', '.join(sorted(required))
raise ValidationError("{} is missing required fields: {}".format(doc, required))
@classmethod
def create_indexes(cls, connection=None, indexes=None, **kwargs):
"""
Create all indexes from this document's Meta, or on the collection.
Indexes must be a list of pymongo.IndexModel().
Be careful about calling this for large collections.
"""
if indexes is None:
indexes = cls._meta.indexes
if not indexes:
return
collection = cls._meta.get_collection(connection)
return collection.create_indexes(indexes, **kwargs)
@classmethod
def find(cls, query=None, fields=None, connection=None, raw=None, sort=None, **options):
"""
Query the database for documents. Return a cursor for further refining or iterating over
the results. If fields is not None only return the field values in that list. Additional
args are passed to pymongo's find().
"""
collection = cls._meta.get_collection(connection)
fields = cls._meta.get_partial(fields)
if not raw:
sort = SortEncoder(cls).encode(sort)
return Cursor(cls, collection, query, fields, raw, sort=sort, **options)
@classmethod
def find_one(cls, query=None, fields=None, connection=None, raw=None, sort=None, **options):
"""
Query the database for a single document. Return the document or None if not found.
Additional args are passed to pymongo's find(). If fields is not None only return the field
values in that list.
"""
collection = cls._meta.get_collection(connection)
fields = cls._meta.get_partial(fields)
options.pop('manipulate', None)
criteria = Query(query).encode(cls, raw)
if not raw:
sort = SortEncoder(cls).encode(sort)
return cls._decode(collection.find_one(criteria, projection=get_projection(fields),
sort=sort, **options), fields)
@classmethod
def find_and_modify(cls, query, update, fields=None, connection=None, raw=None, sort=None,
new=None, **options):
"""
Query the database for a document and update it. If new is true, returns the modified
document, otherwise returns the original document. Additional args are passed to pymongo's
find_one_and_update() and include:
upsert: When true, if no documents match, a new document is created.
"""
if cls._meta.disable_update:
msg = "updates to {} are disabled".format(cls.__class__.__name__)
raise OperationError(msg)
collection = cls._meta.get_collection(connection)
fields = cls._meta.get_partial(fields)
criteria = Query(query).encode(cls, raw)
if new is None:
new = False
if not raw:
sort = SortEncoder(cls).encode(sort)
update = UpdateEncoder(cls).encode(update)
if options.get('upsert'):
specified_update_fields = {fieldname
for doc in update.keys()
for fieldname in update[doc]}
defaults = {}
for name, default in six.iteritems(cls._meta.defaults):
if default is not None and name not in specified_update_fields:
if hasattr(default, '__call__'):
field = cls._meta.get_field(name)
default = field.encode(cls._meta.cls, name, default())
defaults[name] = default
set_on_insert = update.get('$setOnInsert', {})
defaults.update(set_on_insert)
update.update({
'$setOnInsert': defaults
})
raw = collection.find_one_and_update(criteria, update, projection=get_projection(fields),
new=new, sort=sort, **options)
return cls._decode(raw, fields)
@classmethod
def count(cls, connection=None):
"""Count the number of objects in this collection."""
collection = cls._meta.get_collection(connection)
return collection.count()
def __init__(self, *args, **kwargs):
"""Initialize the document with values."""
for name, value in six.iteritems(kwargs):
setattr(self, name, value)
def _encode(self, update=False):
"""
Return the document as a dictionary suitable for saving. If update is
True then an update document is returned.
"""
def modify(name, field):
if getattr(field, 'modifier', None):
setattr(self, name, field.modifier(getattr(self, name)))
raw = {}
if update:
sets = {}
unsets = {}
for name, field in six.iteritems(self._meta.get_fields(self._partial)):
modify(name, field)
if name not in self._dirty:
continue
value = self._attrs.get(name)
if value is None:
unsets[name] = ""
else:
sets[name] = field.encode(self.__class__, name, value)
if sets:
raw['$set'] = sets
if unsets:
raw['$unset'] = unsets
else:
for name, field in six.iteritems(self._meta.get_fields(self._partial)):
modify(name, field)
if name in self._attrs:
value = self._attrs[name]
if value is not None:
value = field.encode(self.__class__, name, value)
else:
value = self._raw.get(name)
if value is not None:
raw[name] = value
return raw
def _reset(self, raw):
"""Reset internal field storage using the raw document."""
self._raw.update(raw)
self._attrs = {}
self._dirty = set()
def __repr__(self):
attrs = ['{}={}'.format(name, repr(value)) for name, value in self._encode().items()]
return '{}({})'.format(self.__class__.__name__, ', '.join(attrs))
def save(self, connection=None, **options):
"""
Save the model to the database. Effectively performs an insert if the _id field is None and
a full document update otherwise. Additional args are passed to pymongo's save().
Returns self for assignment.
"""
if self._meta.disable_save:
msg = "saves to {} are disabled".format(self.__class__.__name__)
raise OperationError(msg)
if self._partial:
raise OperationError("unable to save partial document")
collection = self._meta.get_collection(connection)
raw = self._encode()
self._validate(raw, self._partial)
options.pop('manipulate', None)
self._id = collection.save(raw, manipulate=True, **options)
self._reset(raw)
return self
def insert(self, connection=None, **options):
"""
Insert the document. This ignores the state of the _id field and forces an insert. This may
necessitate setting _id to None prior to calling insert. Though this could be used to
insert the same document into multiple databases. Additional args are passed to pymongo's
insert().
Returns self for assignment.
"""
if self._meta.disable_insert:
msg = "inserts to {} are disabled".format(self.__class__.__name__)
raise OperationError(msg)
collection = self._meta.get_collection(connection)
raw = self._encode()
self._validate(raw, self._partial)
options.pop('manipulate', None)
self._id = collection.insert(raw, manipulate=True, **options)
self._reset(raw)
return self
def update(self, update=None, connection=None, raw=None, sort=None, **options):
"""
Update the document in the database using the provided update statement. If update is None
(the default) an update statement is created to set all of the dirty fields in the
document. This uses the _id field to find the document to update and will raise an error if
no _id is set. Additional args are passed to pymongo's update(). Return True if an update
was performed or False if no update was needed.
"""
if self._meta.disable_update:
msg = "updates to {} are disabled".format(self.__class__.__name__)
raise OperationError(msg)
if not self._id:
raise OperationError("unable to update document without an _id")
collection = self._meta.get_collection(connection)
if not raw:
sort = SortEncoder(self.__class__).encode(sort)
if not update:
update = self._encode(True)
elif not raw:
update = UpdateEncoder(self.__class__).encode(update)
self._validate(update.get('$set', {}), self._partial, True)
if update:
options.pop('multi', None)
options.pop('new', None)
options.pop('fields', None)
res = collection.find_and_modify(
{'_id': self._id}, update, projection=get_projection(self._partial), multi=False,
new=True, sort=sort, **options)
self._reset(res)
return True
return False
def remove(self, connection=None, **options):
"""
Remove the document from the database. Additional args are passed to pymongo's remove().
Return True if the document was removed or False if there was nothing to remove.
"""
if self._meta.disable_remove:
msg = "removal of {} is disabled".format(self.__class__.__name__)
raise OperationError(msg)
collection = self._meta.get_collection(connection)
if self._id:
res = collection.remove(self._id)
return res.get('n', 0) > 0
return False
| 41.268608
| 99
| 0.602651
|
4a05d5ed13016a7d19743655b5b2184982e8b05e
| 2,224
|
py
|
Python
|
util/image_pool.py
|
Huage001/Artistic-Video-Partial-Conv-Depth-Loss
|
c990b8bcc88ce0655f3ac78b526324b1ff5deb41
|
[
"BSD-3-Clause"
] | 9
|
2020-10-14T03:32:51.000Z
|
2022-01-14T20:38:05.000Z
|
util/image_pool.py
|
Huage001/Artistic-Video-Partial-Conv-Depth-Loss
|
c990b8bcc88ce0655f3ac78b526324b1ff5deb41
|
[
"BSD-3-Clause"
] | 1
|
2021-03-03T20:15:42.000Z
|
2021-08-07T12:29:22.000Z
|
util/image_pool.py
|
Huage001/Artistic-Video-Partial-Conv-Depth-Loss
|
c990b8bcc88ce0655f3ac78b526324b1ff5deb41
|
[
"BSD-3-Clause"
] | 2
|
2020-09-02T00:58:29.000Z
|
2020-11-25T16:41:51.000Z
|
import random
import torch
class ImagePool:
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| 40.436364
| 140
| 0.607014
|
4a05d6cec22b74e37ab76aefd3932969bfd3c0bd
| 8,892
|
py
|
Python
|
fixture/contact.py
|
MakatevaElvira/python_training
|
323c7dd0b4f6316348d7a4c66085cae445b7edbe
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
MakatevaElvira/python_training
|
323c7dd0b4f6316348d7a4c66085cae445b7edbe
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
MakatevaElvira/python_training
|
323c7dd0b4f6316348d7a4c66085cae445b7edbe
|
[
"Apache-2.0"
] | null | null | null |
import re
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def init_contact_creation(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def open_contact_page(self):
wd = self.app.wd
self.app.navigation.home()
def init_first_contact_edition(self):
self.init_contact_edition_by_index(0)
def init_contact_edition_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//*[@title='Edit']")[index].click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//tr[@name='entry']/td[7]")[index].click()
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def fill_contact_form(self, contact):
self.app.fill_field("firstname", contact.name)
self.app.fill_field("middlename", contact.middle_name)
self.app.fill_field("lastname", contact.last_name)
self.app.fill_field("company", contact.company)
self.app.fill_field("home", contact.home_phone)
self.app.fill_field("email", contact.email)
def return_home(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
# @staticmethod
def create(self, contact):
self.init_contact_creation()
self.fill_contact_form(contact)
self.app.submit()
self.return_home()
self.contact_cash = None
def edit_first(self, contact):
self.edit_by_index(contact, 0)
def edit_by_index(self, contact, index):
self.open_contact_page()
self.init_contact_edition_by_index(index)
self.fill_contact_form(contact)
self.app.update()
self.return_home()
self.contact_cash = None
def delete_first(self):
self.delete_by_index(0)
def delete_by_index(self, index):
self.open_contact_page()
self.init_contact_edition_by_index(index)
self.app.delete()
self.app.navigation.home()
self.contact_cash = None
def count(self):
wd = self.app.wd
self.open_contact_page()
return len(wd.find_elements_by_name("selected[]"))
"""кеширование трудоемкой операции"""
contact_cash = None
def get_contact_list(self):
if self.contact_cash is None:
wd = self.app.wd
self.open_contact_page()
self.contact_cash = []
for element in wd.find_elements_by_xpath("//tr[@name='entry']"):
text = element.find_element_by_xpath("./td[3]").text
tex2 = element.find_element_by_xpath("./td[2]").text
id = element.find_element_by_name("selected[]").get_attribute("value")
all_phones = element.find_element_by_xpath("./td[6]").text.splitlines()
print("second!!!= " + all_phones[1])
self.contact_cash.append(Contact(name=text, last_name=tex2, id=id,
home_phone=all_phones[0],
mobile_phone=all_phones[1],
work_phone=all_phones[2]))
return list(self.contact_cash)
def get_contact_list_size(self):
wd = self.app.wd
self.open_contact_page()
elements = wd.find_elements_by_xpath("//tr[@name='entry']")
return len(elements)
def get_contact_list_with_allPhones(self):
if self.contact_cash is None:
wd = self.app.wd
self.open_contact_page()
self.contact_cash = []
for element in wd.find_elements_by_xpath("//tr[@name='entry']"):
text = element.find_element_by_xpath("./td[3]").text
tex2 = element.find_element_by_xpath("./td[2]").text
id = element.find_element_by_name("selected[]").get_attribute("value")
all_phones = element.find_element_by_xpath("./td[6]").text
print("second!!!= " + all_phones[1])
self.contact_cash.append(Contact(name=text, last_name=tex2, id=id,
all_phones=all_phones))
return list(self.contact_cash)
def get_contact_list_full(self):
if self.contact_cash is None:
wd = self.app.wd
self.open_contact_page()
self.contact_cash = []
for element in wd.find_elements_by_xpath("//tr[@name='entry']"):
name = element.find_element_by_xpath("./td[3]").text
last_name = element.find_element_by_xpath("./td[2]").text
id = element.find_element_by_name("selected[]").get_attribute("value")
address = element.find_element_by_xpath("./td[4]").text
all_emails = element.find_element_by_xpath("./td[5]").text
all_phones = element.find_element_by_xpath("./td[6]").text
print("second!!!= " + all_phones[1])
self.contact_cash.append(Contact(name=name, last_name=last_name, id=id,
address=address, all_emails=all_emails,
all_phones=all_phones))
return list(self.contact_cash)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_page()
self.init_contact_edition_by_index( index)
name = wd.find_element_by_name("firstname").get_attribute("value")
last_name = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
return (Contact(name=name, last_name=last_name, id=id, home_phone=home,
mobile_phone=mobile, work_phone=work))
def get_contact_info_from_edit_page_full(self, index):
wd = self.app.wd
self.open_contact_page()
self.init_contact_edition_by_index( index)
name = wd.find_element_by_name("firstname").get_attribute("value")
last_name = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
e_mail = wd.find_element_by_name("email").get_attribute("value")
e_mail2 = wd.find_element_by_name("email2").get_attribute("value")
e_mail3 = wd.find_element_by_name("email3").get_attribute("value")
return (Contact(name=name, last_name=last_name, id=id,address=address,
home_phone=home,mobile_phone=mobile, work_phone=work,
email=e_mail,email2=e_mail2,email3=e_mail3))
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_page()
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home_phone = re.search("H: (.*)", text)
if home_phone != None:
print(home_phone.group(1))
else:
print('None!')
home_phone = re.search("H: (.*)", text).group(1)
mobile_phone = re.search("M: (.*)", text).group(1)
work_phone = re.search("W: (.*)", text).group(1)
return (Contact( home_phone=home_phone,
mobile_phone=mobile_phone,
work_phone=work_phone))
def clear(self, string):
return re.sub("[() -]", "", string)
# работа с данными в функциональном стиле= СКЛЕЙКА
def merge_phones_like_on_homePage(self, contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear(x),
filter(lambda x: x is not None,
(contact.home_phone, contact.mobile_phone, contact.work_phone)))))
# работа с данными в функциональном стиле= СКЛЕЙКА
def merge_emails_like_on_homePage(self, contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
(contact.email, contact.email2, contact.email3))))
| 42.14218
| 109
| 0.600652
|
4a05d77ea72c1b02954db7777409a5ddf4d360d4
| 5,246
|
py
|
Python
|
tests/ti_deps/deps/test_prev_dagrun_dep.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79
|
2021-10-15T07:32:27.000Z
|
2022-03-28T04:10:19.000Z
|
tests/ti_deps/deps/test_prev_dagrun_dep.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153
|
2021-10-15T05:23:46.000Z
|
2022-02-23T06:07:10.000Z
|
tests/ti_deps/deps/test_prev_dagrun_dep.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23
|
2021-10-15T02:36:37.000Z
|
2022-03-17T02:59:27.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from unittest.mock import Mock
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.utils.state import State
class TestPrevDagrunDep(unittest.TestCase):
def _get_task(self, **kwargs):
return BaseOperator(task_id='test_task', dag=DAG('test_dag'), **kwargs)
def test_not_depends_on_past(self):
"""
If depends on past isn't set in the task then the previous dagrun should be
ignored, even though there is no previous_ti which would normally fail the dep
"""
task = self._get_task(
depends_on_past=False, start_date=datetime(2016, 1, 1), wait_for_downstream=False
)
prev_ti = Mock(
task=task,
state=State.SUCCESS,
are_dependents_done=Mock(return_value=True),
execution_date=datetime(2016, 1, 2),
)
ti = Mock(task=task, previous_ti=prev_ti, execution_date=datetime(2016, 1, 3))
dep_context = DepContext(ignore_depends_on_past=False)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_context_ignore_depends_on_past(self):
"""
If the context overrides depends_on_past then the dep should be met,
even though there is no previous_ti which would normally fail the dep
"""
task = self._get_task(
depends_on_past=True, start_date=datetime(2016, 1, 1), wait_for_downstream=False
)
prev_ti = Mock(
task=task,
state=State.SUCCESS,
are_dependents_done=Mock(return_value=True),
execution_date=datetime(2016, 1, 2),
)
ti = Mock(task=task, previous_ti=prev_ti, execution_date=datetime(2016, 1, 3))
dep_context = DepContext(ignore_depends_on_past=True)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_first_task_run(self):
"""
The first task run for a TI should pass since it has no previous dagrun.
"""
task = self._get_task(
depends_on_past=True, start_date=datetime(2016, 1, 1), wait_for_downstream=False
)
prev_ti = None
ti = Mock(task=task, previous_ti=prev_ti, execution_date=datetime(2016, 1, 1))
dep_context = DepContext(ignore_depends_on_past=False)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_prev_ti_bad_state(self):
"""
If the previous TI did not complete execution this dep should fail.
"""
task = self._get_task(
depends_on_past=True, start_date=datetime(2016, 1, 1), wait_for_downstream=False
)
prev_ti = Mock(state=State.NONE, are_dependents_done=Mock(return_value=True))
ti = Mock(task=task, previous_ti=prev_ti, execution_date=datetime(2016, 1, 2))
dep_context = DepContext(ignore_depends_on_past=False)
self.assertFalse(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_failed_wait_for_downstream(self):
"""
If the previous TI specified to wait for the downstream tasks of the
previous dagrun then it should fail this dep if the downstream TIs of
the previous TI are not done.
"""
task = self._get_task(depends_on_past=True, start_date=datetime(2016, 1, 1), wait_for_downstream=True)
prev_ti = Mock(state=State.SUCCESS, are_dependents_done=Mock(return_value=False))
ti = Mock(task=task, previous_ti=prev_ti, execution_date=datetime(2016, 1, 2))
dep_context = DepContext(ignore_depends_on_past=False)
self.assertFalse(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_all_met(self):
"""
Test to make sure all of the conditions for the dep are met
"""
task = self._get_task(depends_on_past=True, start_date=datetime(2016, 1, 1), wait_for_downstream=True)
prev_ti = Mock(state=State.SUCCESS, are_dependents_done=Mock(return_value=True))
ti = Mock(task=task, execution_date=datetime(2016, 1, 2), **{'get_previous_ti.return_value': prev_ti})
dep_context = DepContext(ignore_depends_on_past=False)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
| 43.355372
| 110
| 0.695959
|
4a05d7cf7c2c31ba13402ce16e6ae6416b88cbfb
| 78
|
py
|
Python
|
kwat/density/__init__.py
|
KwatME/ccal
|
d96dfa811482eee067f346386a2181ec514625f4
|
[
"MIT"
] | 5
|
2017-05-05T17:50:28.000Z
|
2019-01-30T19:23:02.000Z
|
kwat/density/__init__.py
|
KwatME/ccal
|
d96dfa811482eee067f346386a2181ec514625f4
|
[
"MIT"
] | 5
|
2017-05-05T01:52:31.000Z
|
2019-04-20T21:06:05.000Z
|
kwat/density/__init__.py
|
KwatME/ccal
|
d96dfa811482eee067f346386a2181ec514625f4
|
[
"MIT"
] | 5
|
2017-07-17T18:55:54.000Z
|
2019-02-02T04:46:19.000Z
|
from .get_bandwidth import get_bandwidth
from .get_density import get_density
| 26
| 40
| 0.871795
|
4a05d7f5f7d8f05e899cd5385943a59a16c0e934
| 162
|
py
|
Python
|
version.py
|
fake-name/AutoOrg
|
db17a3db7a89c7bc1e9d4bd333e82f11402c69a5
|
[
"BSD-3-Clause"
] | 5
|
2021-11-07T01:20:12.000Z
|
2022-02-02T22:13:31.000Z
|
version.py
|
fake-name/AutoOrg
|
db17a3db7a89c7bc1e9d4bd333e82f11402c69a5
|
[
"BSD-3-Clause"
] | null | null | null |
version.py
|
fake-name/AutoOrg
|
db17a3db7a89c7bc1e9d4bd333e82f11402c69a5
|
[
"BSD-3-Clause"
] | null | null | null |
# Do not touch the next line's formatting, it's (manually) extracted in the EXE assembly process.
# You can increment the number, if you want.
VERSION = "0.0.1"
| 32.4
| 97
| 0.728395
|
4a05d815306de3445d951ea30dce027e46841473
| 4,949
|
py
|
Python
|
models/imagenet/mobilenetv2.py
|
RuiLin0212/EZCrop
|
5cfdc4f2413e37c1338a5f85e5562a70eabf1099
|
[
"MIT"
] | 1
|
2021-11-05T09:21:34.000Z
|
2021-11-05T09:21:34.000Z
|
models/imagenet/mobilenetv2.py
|
scott-mao/CPD
|
953a6d1dfcafca44b9960aec17b65ac9d1ad576b
|
[
"MIT"
] | null | null | null |
models/imagenet/mobilenetv2.py
|
scott-mao/CPD
|
953a6d1dfcafca44b9960aec17b65ac9d1ad576b
|
[
"MIT"
] | 1
|
2021-11-05T09:21:35.000Z
|
2021-11-05T09:21:35.000Z
|
import torch.nn as nn
import math
import pdb
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def make_divisible(x, divisible_by=8):
import numpy as np
return int(np.ceil(x * 1. / divisible_by) * divisible_by)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, compress_rate, n_class=1000, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t-ex, c-channel, n-blocknum, s-stride
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
self.compress_rate=compress_rate[:]
# building first layer
assert input_size % 32 == 0
# input_channel = make_divisible(input_channel * width_mult) # first channel is always 32!
self.last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
cnt=1
for t, c, n, s in interverted_residual_setting:
output_channel = make_divisible(c * width_mult) if t > 1 else c
output_channel = int((1-self.compress_rate[cnt])*output_channel)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
cnt+=1
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
#self.classifier = nn.Linear(self.last_channel, n_class)
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenet_v2(compress_rate):
model = MobileNetV2(compress_rate=compress_rate, width_mult=1)
return model
| 35.099291
| 108
| 0.53728
|
4a05db535329e1a08150be11eca216738e02794a
| 932
|
py
|
Python
|
dashboard/urls.py
|
fiterace/slamapp
|
a921b07f02fba6efbdf391afbca3fb4ecfe62a56
|
[
"MIT"
] | 2
|
2020-09-10T16:52:33.000Z
|
2021-06-27T07:15:58.000Z
|
dashboard/urls.py
|
bhu800/slamapp
|
aaeb6af2e4799b1089f74e69dea56caa92f518fb
|
[
"MIT"
] | 19
|
2020-07-03T22:29:58.000Z
|
2022-03-12T00:39:12.000Z
|
dashboard/urls.py
|
bhu800/slamapp
|
aaeb6af2e4799b1089f74e69dea56caa92f518fb
|
[
"MIT"
] | 2
|
2020-06-30T12:04:58.000Z
|
2020-06-30T12:39:42.000Z
|
from django.urls import path
from dashboard import views
from .views import *
#template tagging
app_name = 'myapp'
urlpatterns = [
path('', HomePageView, name='home'),
path('dashboard', dashboardPageView, name="dashboard"),
path('base', basePageView, name="base"),
path('user', userPageView, name="user"),
path('showSlambooks_all', showSlambooksAll.as_view(), name="showSlambooks_all"),
path('showSlambooks_my', showSlambooksMyListView.as_view(), name="showSlambooks_my"),
path('showentry/<int:pk>', showSlambookMyDetailView.as_view(), name="showentry"),
path('fillSlambook/<int:pk>', fillSlambook_PageView, name='fillSlambook'),
# path('accounts/login/',HomePageView), # <--- This is removed temporarily for development purpose!
path('letter', letterPageView, name="letter"),
path("hiddenAdmin", hiddenAdminPageView, name="hiddenAdmin"),
]
| 42.363636
| 147
| 0.683476
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.