blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3129c685b29c51fb57e952d3b3a7fe68f9796ef0 | 950863fecff0b39ca7afa77f0df4a8e5b1d4702a | /bin/bin.py | ce18e798d42c42e4331c06d660d0db0e5f621531 | [] | no_license | CkyBlue/ADT_Wise | b8fc9487ed4ed4529fca069077a912d10db318f7 | 9aa885911aa5d46d6fc09287016cdc6d51b7ef68 | refs/heads/master | 2021-07-06T03:19:49.740118 | 2019-04-04T05:37:48 | 2019-04-04T05:37:48 | 142,835,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | class func():
def __init__(self, parameters):
self.p = parameters
self.log = []
self.log.append("Firing")
return self.fn1
def fn1(self):
self.log.append("All done")
return lambda x: False
class Queue():
def doSmth(self, par):
return func(par)
class Model():
pass
class Prompt():
pass
class View():
def __init__(self):
self.controller = myController
self.fireCommand("Call1")
def fireCommand(self, cmd):
self.setPrompt(self.controller.qns["Call1"])
def setPrompt(self, para):
x = input(para)
def giveValInPrompt
class Controller():
adt = Queue()
calls = ["Call1"]
qns = {"Call1": "Do you?"}
funcs = [Queue.doSmth]
if __name__ == "__main__":
myController = Controller()
myView = View()
| [
"sakrit.karmacharya77@gmail.com"
] | sakrit.karmacharya77@gmail.com |
0688a52f8ae4c05f203b5eebe0b2d75375d55d27 | 0ba42fcdc96edcbae4682b52c2c919e56e12db6f | /qurator/sbb_ner/models/tokenization.py | d3ebf20096fc8df9d136989fbc9f59cb54a60efc | [
"Apache-2.0"
] | permissive | qurator-spk/sbb_ner | f9b4e8807befa4db959dfe867dcc4fa57278e1bc | d7d6d612803819c44942295f0b4e34f0925c1a52 | refs/heads/master | 2023-02-07T13:43:54.886989 | 2023-02-01T08:00:31 | 2023-02-01T08:00:31 | 204,686,076 | 14 | 1 | Apache-2.0 | 2022-06-10T08:08:27 | 2019-08-27T11:11:58 | Python | UTF-8 | Python | false | false | 17,276 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from pytorch_pretrained_bert.file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
# if len(chars) > self.max_input_chars_per_word:
# output_tokens.append(self.unk_token)
# continue
# is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
# is_bad = True
# break
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(cur_substr)
start = end
# if is_bad:
# output_tokens.append(self.unk_token)
# else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| [
"Kai.Labusch@sbb.spk-berlin.de"
] | Kai.Labusch@sbb.spk-berlin.de |
4f119652f96bb3fe74c40c0185c5a5b58e1155f7 | 41e9d6edf49701c62471bddf2a5ccd041ec5c31e | /libraries/website/docs/snippets/ml/zero_profiling.py | b0ea002fadbe1a65cea32d0b23b49d5850a9c526 | [
"MIT",
"Apache-2.0"
] | permissive | Guillemdb/mathy | b0fc5537aa267c4e9f386d36d3c17ff74fd4d0ab | 06204396ac39445b732c199078076b18fd1bbccc | refs/heads/master | 2021-05-22T18:57:59.097614 | 2020-04-02T19:25:55 | 2020-04-02T19:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #!pip install gym
import os
import shutil
import tempfile
from mathy.agents.zero import SelfPlayConfig, self_play_runner
from mathy.cli import setup_tf_env
model_folder = tempfile.mkdtemp()
args = SelfPlayConfig(
num_workers=1,
profile=True,
model_dir=model_folder,
# All options below here can be deleted if you're actually training
max_eps=1,
self_play_problems=1,
training_iterations=1,
epochs=1,
mcts_sims=3,
)
self_play_runner(args)
assert os.path.isfile(os.path.join(args.model_dir, "worker_0.profile"))
# Comment this out to keep your model
shutil.rmtree(model_folder)
| [
"justin@dujardinconsulting.com"
] | justin@dujardinconsulting.com |
2b43980f401fc20884576fe5b39260203c3a7de9 | ab79f8297105a7d412303a8b33eaa25038f38c0b | /imersia/vit_product/stock.py | d8ccbb76ecfe1daf893e0694292440f3b1ff45a0 | [] | no_license | adahra/addons | 41a23cbea1e35079f7a9864ade3c32851ee2fb09 | c5a5678379649ccdf57a9d55b09b30436428b430 | refs/heads/master | 2022-06-17T21:22:22.306787 | 2020-05-15T10:51:14 | 2020-05-15T10:51:14 | 264,167,002 | 1 | 0 | null | 2020-05-15T10:39:26 | 2020-05-15T10:39:26 | null | UTF-8 | Python | false | false | 5,172 | py | import time
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare
class StockMove(osv.osv):
_inherit = 'stock.move'
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'location_production')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
_columns = {
'custom_production_id': fields.many2one('mrp.production.custom', 'Production Order for Produced Products', select=True, copy=False),
'waste_qty':fields.float('Waste (%)'),
# 'raw_material_production_id': fields.many2one('mrp.production', 'Production Order for Raw Materials', select=True),
'consumed_for': fields.many2one('stock.move', 'Consumed for', help='Technical field used to make the traceability of produced products'),
}
def action_consume_custom(self, cr, uid, ids, product_qty, location_id=False, restrict_lot_id=False, restrict_partner_id=False,
consumed_for=False, context=None):
""" Consumed product with specific quantity from specific source location.
@param product_qty: Consumed/produced product quantity (= in quantity of UoM of product)
@param location_id: Source location
@param restrict_lot_id: optionnal parameter that allows to restrict the choice of quants on this specific lot
@param restrict_partner_id: optionnal parameter that allows to restrict the choice of quants to this specific partner
@param consumed_for: optionnal parameter given to this function to make the link between raw material consumed and produced product, for a better traceability
@return: New lines created if not everything was consumed for this line
"""
if context is None:
context = {}
res = []
production_obj = self.pool.get('mrp.production.custom')
if product_qty <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
#because of the action_confirm that can create extra moves in case of phantom bom, we need to make 2 loops
ids2 = []
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'draft':
ids2.extend(self.action_confirm(cr, uid, [move.id], context=context))
else:
ids2.append(move.id)
prod_orders = set()
for move in self.browse(cr, uid, ids2, context=context):
prod_orders.add(move.custom_production_id.id)
print"Total Qty>>>",product_qty
move_qty = product_qty
if move_qty <= 0.00:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move_qty - product_qty
print"Rest Qty>>>",quantity_rest
# Compare with numbers of move uom as we want to avoid a split with 0 qty
quantity_rest_uom = move.product_uom_qty - self.pool.get("product.uom")._compute_qty_obj(cr, uid, move.product_id.uom_id, product_qty, move.product_uom)
if float_compare(quantity_rest_uom, 0, precision_rounding=move.product_uom.rounding) != 0:
new_mov = self.split(cr, uid, move, quantity_rest, context=context)
print"New Move>>>",new_mov
res.append(new_mov)
vals = {'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
'consumed_for': consumed_for}
if location_id:
vals.update({'location_id': location_id})
self.write(cr, uid, [move.id], vals, context=context)
# Original moves will be the quantities consumed, so they need to be done
self.action_done(cr, uid, ids2, context=context)
if res:
self.action_assign(cr, uid, res, context=context)
if prod_orders:
production_obj.action_in_production(cr, uid, list(prod_orders), context=None)
#production_obj.signal_workflow(cr, uid, list(prod_orders), 'button_produce')
return res
_defaults = {
'location_id': _src_id_default,
'location_dest_id': _dest_id_default
} | [
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] | prog1@381544ba-743e-41a5-bf0d-221725b9d5af |
6343752dd269bc7d88d0650fcc42fd02bb191453 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/1760.py | 751381d98be1f23c884db62b177e89e978aaa771 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 512 | py | ii = [('CookGHP3.py', 1), ('CoolWHM2.py', 1), ('SadlMLP.py', 5), ('ShawHDE.py', 1), ('UnitAI.py', 1), ('PeckJNG.py', 1), ('AubePRP.py', 13), ('AdamWEP.py', 1), ('FitzRNS3.py', 1), ('ClarGE2.py', 53), ('CookGHP2.py', 4), ('CrokTPS.py', 1), ('ClarGE.py', 16), ('LyelCPG.py', 13), ('WestJIT2.py', 3), ('WadeJEB.py', 12), ('SoutRD2.py', 1), ('MereHHB3.py', 4), ('HogaGMM.py', 3), ('BabbCEM.py', 1), ('SomeMMH.py', 8), ('ClarGE3.py', 30), ('DibdTRL.py', 1), ('HogaGMM2.py', 10), ('EvarJSP.py', 1), ('SadlMLP2.py', 4)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
b39b9496da392e9f5dd5eabdfe28dd6fba4660d3 | c3d162c4c88e86844f7768c15ff59f464e5ae773 | /src/get_stats.py | f33067bba8a86e7894d5150887d85fe6d4a805e3 | [
"MIT"
] | permissive | chipgwyn/xfinity_gateway_stats | 4a623d0b5ffc9178fd0cec0bb8a3279393fc39b0 | 6575ff8b4eca193898d3c711885b9a066c51bc2a | refs/heads/main | 2022-12-26T10:41:51.021476 | 2020-10-11T16:16:55 | 2020-10-11T16:16:55 | 301,253,967 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | #!/usr/bin/env python3
import os
import xfg
import json
password = os.getenv('XFG_PASSWD', "password")
connx = xfg.Session(password=password)
print(json.dumps(connx.xfinity_network(), indent=2))
| [
"chip@verizonmedia.com"
] | chip@verizonmedia.com |
e766b2d9fcef852a0135fd9e7ea6a38a3e1fa9d3 | 7477988df9650b20503812d4199eb217085f0e89 | /abramov/ABR0006.py | 782ac550b987d6b6635767d6cfe2748f62a71bec | [] | no_license | nyamba/algorithm | 2aa97f2ff1d2b0a6ff50066ebeeca6871eab99b3 | 9d9577442d84d3f7cf30df5cb5899409356c6680 | refs/heads/master | 2021-01-01T18:03:24.406987 | 2014-02-11T04:49:28 | 2014-02-11T04:49:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | def calc(a, b):
'''
>>> calc(1.6, 2.74)
3.2
2.2
'''
print '%.1f' % ((a**2 + b**2)**0.5)
print '%.1f' % ((a*b)/2.0)
if __name__ == '__main__':
calc(*[float(i) for i in raw_input().split()])
| [
"t.nyambayar@gmail.com"
] | t.nyambayar@gmail.com |
7ecbe0b308cb8371f7ee5198762f1a81ddafae19 | fca80c6a22bcce507a81e05cd31e0d5ebbc43a57 | /Chapter_05/samples/guestPicnic.py | cfe00f043c0535219fe766ef773df1d474944cd1 | [
"MIT"
] | permissive | GSantos23/Automate_Python | 6b1ce29f1ee5a22b53ef6c1d45fef56d8d8e0b06 | 4bf3eadb5a330d5f22329bdcd08d37ab01a9454f | refs/heads/master | 2021-06-29T04:12:32.910835 | 2020-12-26T22:28:31 | 2020-12-26T22:28:31 | 197,512,449 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | allGuests = {'Alice': {'apples': 5, 'pretzels': 12},
'Bob': {'ham sandwiches': 3, 'apples': 2},
'Carol': {'cups': 3, 'apple pies': 1}}
def totalBrought(guests, item):
numBrought = 0
for k, v in guests.items():
numBrought = numBrought + v.get(item, 0)
return numBrought
print('Number of things being brought:')
print(' - Apples ' + str(totalBrought(allGuests, 'apples')))
print(' - Cups ' + str(totalBrought(allGuests, 'cups')))
print(' - Cakes ' + str(totalBrought(allGuests, 'cakes')))
print(' - Ham sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))
print(' - Apple Pies ' + str(totalBrought(allGuests, 'apple pies')))
| [
"santosgerson64@gmail.com"
] | santosgerson64@gmail.com |
a8161c8a1801653604c64a4f70918d8ca1a7016e | 42165f51c0c7f467c49794e3f6ca09ef13f6ab1e | /SMSpoll/login/views.py | 2e471e771b2e66c11602341f259128b1eb4a66ee | [] | no_license | kedark3/SMSpoll | e4969cca0cc6645fd7f091b297c33e2e34e0326c | dcfd08326c826bfe33a2fcd780bd322b3edfb888 | refs/heads/master | 2021-01-10T08:53:40.899448 | 2018-12-05T02:33:43 | 2018-12-05T02:33:43 | 44,923,741 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,274 | py | # Create your views here.
from __future__ import division
from django.http import HttpResponse,HttpResponseRedirect
from django.template import Template, Context
from file_read import read
from csv_write import csv_write
import MySQLdb as msdb
from models import InstReg,StudReg,InstCourse,Course, ClassTest
from twilio.rest import TwilioRestClient
from django.core.mail import send_mail
import random,string
from datetime import datetime
from django.utils import timezone
from dateutil import parser
import csv
#connecting to twilio account
import os
from urlparse import urlparse
from django.core.urlresolvers import resolve
from django.conf import settings
from twilio.rest.resources import Connection
from twilio.rest.resources.connection import PROXY_TYPE_HTTP
from twilio import twiml
from django.views.decorators.csrf import csrf_exempt
from django import forms
#for ajax trial
from django.template import RequestContext
proxy_url = os.environ.get("http_proxy")
host, port = urlparse(proxy_url).netloc.split(":")
Connection.set_proxy_info(host, int(port), proxy_type=PROXY_TYPE_HTTP)
ACCOUNT_SID = "AC5d22427eb1a348f92d96e38ac7f77b6f"
AUTH_TOKEN = "de92cc787190562f371eebf5971d0a2a"
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
class NameForm(forms.Form):
your_name = forms.CharField(label='Your name', max_length=100)
class CourseList(object):
def __init__(self,crn,c_id,c_name):
self.crn=crn
self.c_id=c_id
self.c_name=c_name
class StudList(object):
def __init__(self,s_id,phone):
self.s_id=s_id
self.phone=phone
class TestNameList(object):
def __init__(self,crn,test_id):
self.crn=crn
self.test_id=test_id
def connect():
try:
conn=msdb.connect('mysql.server','ssdiprojectfall2','smspoll','ssdiprojectfall2$default')
return conn
except Exception as e:
return HttpResponse("Error DB")
questions=()
def home(request):
try:
mail=request.session['email2']
conn=connect()
cur=conn.cursor()
cur.execute("select i.crn,c.c_id,c.course_name from login_instcourse as i, login_course as c where i.c_id_id=c.id and email_id='%s'"%mail)
results=[]
for row in cur.fetchall():
results.append(CourseList(row[0],row[1],row[2]))
conn.close()
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/InstructorHome.html')
t= Template(code)
#form = NameForm()
c = Context({'courses':results})
return HttpResponse(t.render(c))
except Exception as e:
error='Please login first!'
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
c = Context({'error':error,'questions':questions})
return HttpResponse(t.render(c))
def logout(request):
request.session['email2']=''
return HttpResponseRedirect("/auth/login")
def login(request):
global error
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
c = Context()
return HttpResponse(t.render(c))
def login_check(request):
mail= request.POST['email2']
pswd = request.POST['pswd2']
try:
l=InstReg.objects.get(email=mail)
if l.password==pswd:
request.session['email2']=mail
return HttpResponseRedirect("/auth")
except Exception:
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
error="Wrong email or password"
c = Context({'error':error})
return HttpResponse(t.render(c))
def signup(request):
global error
fname= request.POST['fname']
lname= request.POST['lname']
email= request.POST['email']
password = request.POST['pswd']
cnfm_pswd=request.POST['cnfm_pswd']
if password == cnfm_pswd:
i = InstReg(fname,lname,email,password)
i.save()
error="Signed up Successfully,You may login now!!"
return HttpResponseRedirect('/auth/login')
else:
error="Password and Confirm password didn't match!!"
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
c = Context({'error': error})
return HttpResponse(t.render(c))
def recover_pswd(request):
pname=request.POST['pname']
pmail=request.POST['pmail']
l=InstReg.objects.get(email=pmail)
if pname == (l.fname+" "+l.lname):
send_mail('Your Password for SMS Poll' , 'Here is your password for your account on SMSpoll:'+ l.password, 'ssdiprojectfall2015@gmail.com',[pmail], fail_silently=False)
error="Please Check your email for Password."
else:
error="Please check your name or email you entered."
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
c = Context({'error': error})
return HttpResponse(t.render(c))
def contact_us(request):
cname=request.POST['cname']
cmail=request.POST['cmail']
message=request.POST['message']
send_mail('Message from'+ cname , 'Here is message:'+ message, cmail,['ssdiprojectfall2015@gmail.com'], fail_silently=False)
error="Thank you for Contacting us, we will get back to you shortly!"
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
c = Context({'error': error})
return HttpResponse(t.render(c))
@csrf_exempt
def student_reg(request):
r = twiml.Response()
try:
sid=int(request.POST['Body'].split()[0])
crnn=int(request.POST['Body'].split()[1])
inst= InstCourse(crn=crnn)
s= StudReg(s_id=sid,phone_no=request.POST['From'],crn=inst)
s.save()
r.message('Registered successfully!')
except Exception as e:
#r.message('Registration error, format should be your 800 ID and CRN. E.g. 800891239 25145. Or already registered!')
sid=0
return HttpResponse(r.toxml(), content_type='text/xml')
def after_course(request):
conn=connect()
cur=conn.cursor()
crn=request.GET['crn']
cid=request.GET['c-id']
course_details=list(Course.objects.filter(c_id=cid))
cur.execute("select s_id, phone_no from login_studreg as s, login_instcourse as c where s.crn_id=c.crn and crn="+crn)
students=[]
for row in cur.fetchall():
students.append(StudList(row[0],row[1]))
tests=[]
cur.execute("select distinct crn_id,test_id from login_classtest where crn_id="+ crn)
for row in cur.fetchall():
tests.append(TestNameList(row[0],row[1]))
conn.close()
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/AfterCourseSelection.html')
t= Template(code)
c = Context({'course':course_details,'students':students,'count':len(students), 'crn':crn, 'tests':tests})
return HttpResponse(t.render(c))
def attendace_string(request):
random_string=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
request.session['random_string']=random_string
#request.session['time1']= datetime.now().replace(microsecond=0)
request.session['count']=request.GET['count']
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/AttendaceCounter.html')
t= Template(code)
c = Context({'string':random_string,'count':request.GET['count']})
return HttpResponse(t.render(c))
def show_attendance(request):
request.session['time2']= datetime.now(timezone.utc).replace(microsecond=0)
messages = client.messages.list()
numbers=[]
for m in messages:
#==================================Change or to and in statement below later====================================================================
if m.status == "received" and ((request.session['time2']-parser.parse(m.date_sent)).seconds)\
<=request.session['count'] and m.body==request.session['random_string']:
numbers.append(m.from_)
numbers=list(set(numbers)) # remove duplicates
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/Attendance.html')
t= Template(code)
c = Context({'numbers':numbers})
return HttpResponse(t.render(c))
def download_attendance(request):
numbers=request.GET.getlist('numbers')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="attendance'+str(datetime.now())+'.csv"'
writer = csv.writer(response)
for n in numbers:
writer.writerow([n])
return response
#View for Add Classes
def addrem(request):
courses=Course.objects.all()
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/AddRemove.html')
t= Template(code)
c = Context({'courses':courses,'email':request.session['email2']})
return HttpResponse(t.render(c))
def add_course(request):
cid=request.GET['cid']
crn=request.GET['crn']
email=request.GET['email']
email2=request.session['email2']
if email==email2:
conn=connect()
cur=conn.cursor()
cur.execute("Insert into login_instcourse values ("+crn+","+cid+",'"+email+"')")
conn.commit()
conn.close()
return HttpResponseRedirect("http://ssdiprojectfall2015.pythonanywhere.com/auth")
def remove_course(request):
CRN=request.GET['crn']
email=request.GET['email']
email2=request.session['email2']
if email==email2:
InstCourse.objects.filter(crn=CRN).delete()
return HttpResponseRedirect("http://ssdiprojectfall2015.pythonanywhere.com/auth")
#-------------------------Test related views-------------------------------------------------------
def create_test(request):
conn=connect()
cur= conn.cursor()
cur.execute("select c_id from login_course where id=(select c_id_id from login_instcourse where crn="+request.GET['crn']+")")
cid=cur.fetchone()[0]
conn.close()
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/CreateTest.html')
t= Template(code)
c = Context({'TestName':request.GET['test_id'], 'crn': request.GET['crn'], 'cid':cid,'qid':request.GET['qid']})
return HttpResponse(t.render(c))
def add_question(request):
crn=request.GET['crn']
test_id= request.GET['test_id']
question= request.GET['question']
A=request.GET['A']
B=request.GET['B']
C=request.GET['C']
D=request.GET['D']
correct=request.GET['correct']
timer= request.GET['timer']
qid = int(request.GET['qid'])
conn=connect()
cur=conn.cursor()
cur.execute("select max(id) from login_classtest")
max_id= cur.fetchone()[0]
if max_id is None:
max_id=0
i= ClassTest(max_id+1,crn,test_id,qid,question,A,B,C,D,correct,timer)
i.save()
conn.close()
qid=str(qid+1)
return HttpResponseRedirect('/auth/create-test/?crn='+crn+'&test_id='+test_id+'&qid='+ qid)
def conduct_test(request):
request.session['time3']= datetime.now(timezone.utc).replace(microsecond=0)
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/Test.html')
try:
request.sesssion['random_string']
except Exception:
random_string=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
request.session['random_string']=random_string
qid=str(int(request.GET['qid']))
conn=connect()
cur=conn.cursor()
try:
cur.execute("SELECT * FROM login_classtest WHERE qid='"+qid+"' AND test_id='"+request.GET['test_id']+"' AND crn_id='"+request.GET['crn']+"'")
question_data=cur.fetchone()
request.session['test_id']=request.GET['test_id']
request.session['timer']=question_data[10]
request.session['ans']=question_data[9]
conn.close()
t= Template(code)
c = Context({'question':question_data[4],'A':question_data[5],'B':question_data[6],
'C':question_data[7],'D':question_data[8],
'timer':question_data[10], 'qid':qid, 'crn':request.GET['crn'],
'random':request.session['random_string']})
return HttpResponse(t.render(c))
except Exception:
cur.execute("select c_id from login_course where id=(select c_id_id from login_instcourse where crn="+request.GET['crn']+")")
cid=cur.fetchone()[0]
conn.close()
return HttpResponseRedirect("/auth/after-course/?crn="+request.GET['crn']+"&c-id="+str(cid))
def show_stats(request):
request.session['time4']= datetime.now(timezone.utc).replace(microsecond=0)
messages = client.messages.list()
messages.sort()
numbers=[]
A=[]
B=[]
C=[]
D=[]
for m in messages:
#==================================Change or to and in statement below later====================================================================
if len(m.body)==7:
if m.status == "received" and ((request.session['time4']-parser.parse(m.date_sent)).seconds)<=request.session['timer']:
if m.body[0:5]== request.session['random_string']:
if m.body[6]=='A':
A.append(m.from_)
if m.body[6]=='B':
B.append(m.from_)
if m.body[6]=='C':
C.append(m.from_)
if m.body[6]=='D':
D.append(m.from_)
if m.body[6]==request.session['ans']:
numbers.append(m.from_)
numbers=list(set(numbers))
csv_write(numbers,str(request.GET['qid']),str(request.session['test_id']),str(request.GET['crn'])) #Writing to csv file
count=len(A)+len(B)+len(C)+len(D)
totalStudents= []
totalStudents.append(A)
totalStudents.append(B)
totalStudents.append(C)
totalStudents.append(D)
totalStudents= [item for sublist in totalStudents for item in sublist]
total= len(list(set(totalStudents)))
percentA=0
percentB=0
percentC=0
percentD=0
if count>0:
percentA=(len(A)*100/count)
percentB=(len(B)*100/count)
percentC=(len(C)*100/count)
percentD=(len(D)*100/count)
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/Stats.html')
t= Template(code)
c = Context({'numbers':numbers,'countA':percentA,'countB':percentB,'countC':percentC,'countD':percentD,'count':total,
'crn':request.GET['crn'],'qid':int(request.GET['qid'])+1,'test_id':request.session['test_id']})
return HttpResponse(t.render(c))
#Download results view===================================
def download(request):
try:
os.stat(settings.MEDIA_ROOT+'/result/result'+request.GET['test_id']+request.GET['crn']+'.csv')
return HttpResponseRedirect('/media/result/result'+request.GET['test_id']+request.GET['crn']+'.csv')
except Exception:
conn=connect()
cur=conn.cursor()
cur.execute("select c_id from login_course where id=(select c_id_id from login_instcourse where crn="+request.GET['crn']+")")
cid=cur.fetchone()[0]
conn.close()
return HttpResponseRedirect("/auth/after-course/?crn="+request.GET['crn']+"&c-id="+str(cid))
def like_category(request):
context = RequestContext(request)
likes = "I like it"+request.GET['category_id']
code = read('/home/ssdiprojectfall2015/SMSpoll/templates/login.html')
t= Template(code)
context =Context({'likes':likes})
return HttpResponse(likes)
| [
"kedar.kulkarni0@gmail.com"
] | kedar.kulkarni0@gmail.com |
49a444041a14c567034fd6e3f60a77a4c2427966 | 0ee2d0a08f08997a2cfa7d65549a08bf7b82bbd2 | /accounts/views.py | 72db3d94bf3cab0bb255d9626ede9d8bb29e27a1 | [] | no_license | drogon98/tutorial | 9f4742efbdf0d70c7756197c98fb5f0fc3b7362a | 9eb486ed23279c30e734bd52e1bc982ddf669bbe | refs/heads/master | 2022-07-08T11:21:05.912755 | 2019-01-10T12:28:46 | 2019-01-10T12:28:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,829 | py | from django.shortcuts import render,reverse
from django.http import HttpResponse,HttpResponseRedirect
from .forms import (
RegistrationForm,
EditProfileForm,
)
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User
def register(request):
if request.method=="POST":
form=RegistrationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('accounts:login'))
else:
form=RegistrationForm()
return render(request,"accounts/register.html",{'form':form})
def profile(request,pk=None):# the request has access to many of the user objects
if pk:
user=User.objects.get(pk=pk)
else:
user=request.user
context={'user':user}
return render(request,"accounts/profile.html",context)
def edit_profile(request):
if request.method=="POST":
form=EditProfileForm(request.POST,instance=request.user)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("accounts:profile"))
else:
form=EditProfileForm(instance=request.user)
return render(request,"accounts/prof_edit.html",{"form":form})
def change_password(request):
if request.method=="POST":
form=PasswordChangeForm(data=request.POST,user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request,form.user)
return HttpResponseRedirect(reverse("accounts:profile"))
else:
return HttpResponseRedirect(reverse("accounts:change_password"))
else:
form=PasswordChangeForm(user=request.user)
return render(request,"accounts/pass_change.html",{"form":form})
| [
"ndegwaharryxon1998@gmail.com"
] | ndegwaharryxon1998@gmail.com |
c694b3c710fa1d7d763f6b5b2c107b194a665936 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations/_route_tables_operations.py | adc44f5b31718ed15851b48a3e9e38cdbd048214 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 25,722 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> "models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> AsyncLROPoller["models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.RouteTable":
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
26d639c4fbab5876c769b3ea6ae7da455fd84403 | 1f7847055332e16614f5358f0ec39b39bb9a66a7 | /exercises/12_oop_inheritance/test_task_12_4.py | ec440e80a177b6ac47dabd01f370487663a50659 | [] | no_license | satperm/advpyneng-examples-exercises | 6641dae31fa7f44db7e99547bc70d740988f21b9 | 6b12c320cace1d303dae38ddba9b19550a8708ec | refs/heads/master | 2022-12-14T09:28:48.255804 | 2020-09-06T14:14:42 | 2020-09-06T14:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | import pytest
import task_12_4
import sys
sys.path.append('..')
from common_functions import check_class_exists, check_attr_or_method
def test_class_created():
check_class_exists(task_12_4, 'OrderingMixin')
def test_special_methods_created():
class IntTest(task_12_4.OrderingMixin):
def __init__(self, number):
self._number = number
def __eq__(self, other):
return self._number == other._number
def __lt__(self, other):
return self._number < other._number
int1 = IntTest(5)
check_attr_or_method(int1, method='__ge__')
check_attr_or_method(int1, method='__ne__')
check_attr_or_method(int1, method='__le__')
check_attr_or_method(int1, method='__gt__')
def test_methods():
class IntTest(task_12_4.OrderingMixin):
def __init__(self, number):
self._number = number
def __eq__(self, other):
return self._number == other._number
def __lt__(self, other):
return self._number < other._number
int1 = IntTest(5)
int2 = IntTest(3)
assert int1 != int2
assert int1 >= int2
assert int1 > int2
assert not int1 < int2
def test_methods():
class DoThing(task_12_4.OrderingMixin):
def __init__(self, num):
self.num = num
def __eq__(self, other):
return self.num == other.num
def __lt__(self, other):
return self.num < other.num
small_num = DoThing(1)
big_num = DoThing(100)
assert small_num < big_num
assert small_num <= big_num
assert not small_num > big_num
assert not small_num >= big_num
assert small_num != big_num
small_num = DoThing(1)
big_num = DoThing(100)
assert not big_num < small_num
assert not big_num <= small_num
assert big_num > small_num
assert big_num >= small_num
assert big_num != small_num
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
cd9bb1eb10be89931f7564472027e88621ad041e | 8143bfdbda6fdbef40bc570f48773edd365fcb62 | /project/Kyb-TestProject/businessView/loginView.py | 895c338f89df61341cca470210d9b35b905c1f74 | [] | no_license | CaptainJi/Kyb-TestProject | 199caef0f1e58d6bb45273114596daf6ebdc424c | 38d200d4d8436d4ad699682c3606f035446093cc | refs/heads/master | 2022-10-16T15:36:20.499879 | 2020-06-06T07:06:22 | 2020-06-06T07:06:22 | 259,554,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,798 | py | import logging
from common.commonFun import Common, NoSuchElementException
from common.desiredCaps import appium_desired
from selenium.webdriver.common.by import By
# 封装登录业务逻辑类
class LoginView(Common):
# 获取用户名、密码输入框元素
username_type = (By.ID, 'com.tal.kaoyan:id/login_email_edittext')
password_type = (By.ID, 'com.tal.kaoyan:id/login_password_edittext')
# 获取登录按钮元素
loginBtn = (By.ID, 'com.tal.kaoyan:id/login_login_btn')
tip_commit = (By.ID, 'com.tal.kaoyan:id/tip_commit')
# 获取“我的”按钮元素
button_mysefl = (By.ID, 'com.tal.kaoyan:id/mainactivity_button_mysefl')
usercenter_username = (By.ID, 'com.tal.kaoyan:id/activity_usercenter_username')
right_button = (By.ID, 'com.tal.kaoyan:id/myapptitle_RightButton_textview')
# 获取退出元素
logout = (By.ID, 'com.tal.kaoyan:id/setting_logout_text')
def login_action(self, username, password):
# 取消升级
self.check_cancel_btn()
# 跳过
self.check_skipBtn()
logging.info('开始登录')
logging.info('用户名:%s' % username)
self.driver.find_element(*self.username_type).send_keys(username)
logging.info('密码:%s' % password)
self.driver.find_element(*self.password_type).send_keys(password)
logging.info('点击登录按钮')
self.driver.find_element(*self.loginBtn).click()
def check_account_alert(self):
logging.info('检查登录警告信息')
try:
element = self.driver.find_element(*self.tip_commit)
except NoSuchElementException:
pass
else:
logging.info('跳过登录警告信息')
element.click()
def check_login_status(self):
logging.info('检查登录状态')
self.check_market_ad()
self.check_account_alert()
try:
self.driver.find_element(*self.button_mysefl).click()
self.driver.find_element(*self.usercenter_username)
except NoSuchElementException:
logging.error('登陆失败')
self.getScreenShot('登陆失败')
return False
else:
logging.info('登陆成功')
self.getScreenShot('登陆成功')
self.logout_action()
return True
def logout_action(self):
logging.info('退出登录')
self.driver.find_element(*self.right_button).click()
self.driver.find_element(*self.logout).click()
self.driver.find_element(*self.tip_commit).click()
if __name__ == '__main__':
driver = appium_desired()
l = LoginView(driver)
l.check_cancel_btn()
l.check_skipBtn()
l.login_action('', '')
l.check_login_status()
| [
"jiqing19861123@163.com"
] | jiqing19861123@163.com |
b6f6432a451ac396f4378d34ae642e68e475e1e3 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/machinelearningservices/v20210101/get_aks_service.py | d436cd9c6fca64cd385942d716820e2980e1cc9c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,922 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAKSServiceResult',
'AwaitableGetAKSServiceResult',
'get_aks_service',
]
@pulumi.output_type
class GetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Service properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetAKSServiceResult(GetAKSServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAKSServiceResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_aks_service(expand: Optional[bool] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
:param bool expand: Set to True to include Model details.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str service_name: Name of the Azure Machine Learning service.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20210101:getAKSService', __args__, opts=opts, typ=GetAKSServiceResult).value
return AwaitableGetAKSServiceResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | noreply@github.com |
76c03caa2147554d79216e2095e861e2d45ac88e | 570c6633045d84ab681319acd41fedfd2ad04d95 | /pages/login_page.py | 17b5aed411139154def5a4bde1af627939bedf28 | [] | no_license | mlozowska/automationpractice | 60351d3398286cc1529b374fd3e426c5919593ef | 5a6f2d6e40fd438c2c0a070f3206c18bd6d380cc | refs/heads/main | 2023-02-10T18:32:50.246882 | 2021-01-08T20:20:09 | 2021-01-08T20:20:09 | 316,590,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pages.base_page import BasePage
class LoginPage(BasePage):
email_selector = (By.ID, "email")
password_selector = (By.ID, "passwd")
sign_in_button_selector = (By.ID, "SubmitLogin")
def login(self):
WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(self.email_selector)).send_keys(
"seleniumwsh@gmail.com")
self.driver.find_element(*self.password_selector).send_keys("test123")
self.driver.find_element(*self.sign_in_button_selector).click() | [
"noreply@github.com"
] | noreply@github.com |
a17ff98c13deb171611269acdc6d017228a1a35c | 3ce4e436ea3b124543b0b9102f86cbeb7f5ec0a3 | /demo/NotebookUtils.py | a03c4cf2f7b864c15fa44cd413e0badea1751504 | [] | no_license | EQ4/mir_utils | 01feb1835e4b118a223196da59e3529d443fa005 | 6d2153f466c831a715579b721b6c78abbe9c5990 | refs/heads/master | 2020-12-03T05:11:48.247160 | 2015-07-08T12:23:38 | 2015-07-08T12:23:38 | 44,550,947 | 1 | 0 | null | 2015-10-19T17:19:08 | 2015-10-19T17:19:08 | null | UTF-8 | Python | false | false | 8,731 | py |
from IPython.display import HTML
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import io
import base64
# soundcloud player
embedded_player_soundcloud = '<iframe width="100%" height="{1}" scrolling="no" frameborder="no"' + \
'src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/tracks/{0}&' + \
'auto_play=false&hide_related=true&show_comments=false&show_user=false&' + \
'show_reposts=false&visual={2}"></iframe>'
# free music archive player
embedded_player_fma = "<object width='600' height='60'><param name='movie' value='http://freemusicarchive.org/swf/trackplayer.swf'/>" + \
"<param name='flashvars' value='track=http://freemusicarchive.org/services/playlists/embed/track/{0}.xml'/>" + \
"<param name='allowscriptaccess' value='sameDomain'/>" + \
"<embed type='application/x-shockwave-flash' src='http://freemusicarchive.org/swf/trackplayer.swf'" + \
"width='500' height='80' flashvars='track=http://freemusicarchive.org/services/playlists/embed/track/{0}.xml'" + \
"allowscriptaccess='sameDomain' /></object>"
class SoundcloudTracklist(list):
def __init__(self, *args, **kwargs):
super(SoundcloudTracklist, self).__init__(args[0])
self.width = kwargs['width']
self.height = kwargs['height']
if kwargs['visual']:
self.visual = "true"
else:
self.visual = "false"
def _repr_html_(self):
html = ["<table width='{0}%' style='border:none'>".format(self.width)]
for row in self:
html.append("<tr style='border:none'>")
html.append("<td style='border:none'>{0}</td>".format(embedded_player_soundcloud.format(row,
self.height,
self.visual)))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
class FMATracklist(list):
def __init__(self, width=100, height=120, visual=False):
super(SoundcloudTracklist, self).__init__()
self.width = width
self.height = height
if visual:
self.visual = "true"
else:
self.visual = "false"
def _repr_html_(self):
html = ["<table width='{0}%' style='border:none'>".format()]
for row in self:
html.append("<tr style='border:none'>")
html.append("<td style='border:none'>{0}</td>".format(embedded_player_soundcloud.format(row),
self.height,
self.visual))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
class compareSimilarityResults(list):
def __init__(self, *args, **kwargs):
super(compareSimilarityResults, self).__init__(args[0])
self.width = kwargs['width']
self.height = kwargs['height']
self.columns = kwargs['columns']
if kwargs['visual']:
self.visual = "true"
else:
self.visual = "false"
def _repr_html_(self):
data = np.asarray(self).T.tolist()
html = ["<table width='{0}%' style='border:none'>".format(self.width)]
# === titles ===
html.append("<tr style='border:none'>")
for col_name in self.columns:
html.append("<td style='border:none'><center><b>{0}</b></center></td>".format(col_name))
html.append("</tr>")
for row in data:
html.append("<tr style='border:none'>")
for col in row:
html.append("<td style='border:none'>{0}</td>".format(embedded_player_soundcloud.format(col,
self.height,
self.visual)))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
def get_rp_as_imagebuf(features, width=493, height=352, dpi=72, cmap='jet'):
features = features.reshape(24,60,order='F')
plt.ioff()
fig = plt.figure(figsize=(int(width/dpi), int(height/dpi)), dpi=dpi);
ax = fig.add_subplot(111)
fig.suptitle('Rhythm Patterns')
ax.imshow(features, origin='lower', aspect='auto',interpolation='nearest',cmap=cmap);
ax.set_xlabel('Mod. Frequency Index');
ax.set_ylabel('Frequency [Bark]');
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format = 'png');
img_buffer.seek(0)
plt.close()
plt.ion()
return base64.b64encode(img_buffer.getvalue())
def get_rh_as_imagebuf(hist, width=493, height=352, dpi=72, normalize=True):
if len(hist.shape) == 2:
hist = hist[0]
if normalize:
hist /= np.sum(hist)
plt.ioff()
fig = plt.figure(figsize=(int(width/dpi), int(height/dpi)), dpi=dpi);
#plt.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0)
ax = fig.add_subplot(111)
fig.suptitle('Rhythm Histogram')
ax.bar(np.arange(0,60) / 6.0,hist);
ax.set_xlim([0.0, 10.0])
ax.set_xlabel('Mod. Frequency Index');
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format = 'png');
img_buffer.seek(0)
plt.close()
plt.ion()
return base64.b64encode(img_buffer.getvalue())
def get_ssd_as_imagebuf(features, width=493, height=352, dpi=72, cmap='jet', std=False):
features = features.reshape(24,7,order='F')
if std:
features[:,1] = np.sqrt(features[:,1])
plt.ioff()
fig = plt.figure(figsize=(int(width/dpi), int(height/dpi)), dpi=dpi);
ax = fig.add_subplot(111)
fig.suptitle('Statistical Spectrum Descriptors')
ax.imshow(features, origin='lower', aspect='auto',interpolation='nearest',cmap=cmap);
ax.set_xticklabels(['','mean', 'var', 'skew', 'kurt', 'median', 'min', 'max'])
ax.set_ylabel('Frequency [Bark]')
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format = 'png');
img_buffer.seek(0)
plt.close()
plt.ion()
return base64.b64encode(img_buffer.getvalue())
def show_rp_features_with_souncloud_player(scds, soundcloud_ids, feature_sets, width=900, margin=10):
img_width = np.min([430, int((width - 2*margin) / len(soundcloud_ids))])
img_height = int(img_width * (288.0/432.0))
supported_features = ['rp', 'rh', 'ssd']
html = ["<table width='100%' style='border:none'>"]
if img_width <= 300:
html.append("<tr style='border:none'>")
for scid in soundcloud_ids:
html.append("<td style='border:none;text-align:center'><center><b>{0}</b></center></td>".format(scds.getNameByID(scid)))
html.append("</tr>")
# === Soundcloud Players ===
html.append("<tr style='border:none'>")
for scid in soundcloud_ids:
html.append("<td style='border:none;text-align:center'>{0}</td>".format(scds.getPlayerHTMLForID(scid, width=90, visual=False)))
html.append("</tr>")
# === feature-Plots ===
for f_set in feature_sets:
html.append("<tr style='border:none'>")
for scid in soundcloud_ids:
if f_set in supported_features:
if f_set == 'rp':
features = scds.getFeaturesForID(scid, 'rp')
img_tag = "<img src='data:image/png;base64," + get_rp_as_imagebuf(features, width=img_width, height=img_height) + "'/>"
elif f_set == 'rh':
features = scds.getFeaturesForID(scid, 'rh')
img_tag = "<img src='data:image/png;base64," + get_rh_as_imagebuf(features, width=img_width, height=img_height) + "'/>"
elif f_set == 'ssd':
features = scds.getFeaturesForID(scid, 'ssd')
img_tag = "<img src='data:image/png;base64," + get_ssd_as_imagebuf(features, width=img_width, height=img_height, std=True) + "'/>"
html.append("<td align='center' style='border:none;text-align:center'>{0}</td>".format(img_tag))
else:
html.append("<td align='center' style='border:none;text-align:center'>Featureset '{0}' not supported!</td>".format(f_set))
html.append("</tr>")
html.append("</table>")
result = ''.join(html)
return HTML(result) | [
"alexander.schindler@slychief.com"
] | alexander.schindler@slychief.com |
93d2f68025d7de1391004e92dc70bd485daaf7c6 | cc909ddfeef7104f9ab8312ec55062ae5f880cc2 | /algorithm-study/study/bigNumber.py | 9af48f163edd687f064269759ded9d9e51184202 | [
"MIT"
] | permissive | Seongkyun-Yu/TIL | 2faebc82932a039a52e892d415e265eb04ea9c32 | 0098a38d32b8c40e75f4173254d2c23fff62a4ec | refs/heads/master | 2023-09-02T04:02:53.767439 | 2023-09-01T16:04:43 | 2023-09-01T16:04:43 | 234,017,423 | 2 | 0 | MIT | 2023-03-04T15:22:10 | 2020-01-15T06:51:10 | JavaScript | UTF-8 | Python | false | false | 273 | py | n, m, k = map(int, input().split())
numbers = list(map(int, input().split()))
numbers.sort(reverse=True)
result = 0
count = 0
index = 0
for _ in range(m):
count += 1
index = 0
if count > k:
index = 1
count = 0
result += numbers[index]
print(result) | [
"ysungkyun@gmail.com"
] | ysungkyun@gmail.com |
4c15bf757d374d5e5c239580434f90d6b8c3c0d3 | c3cbafe782e44b5c514e65bc57f7366e78354474 | /ex_script2.py | 55d5b143e892c75f1005e4b1d90ca43bcd76f5e5 | [] | no_license | michaelbell1011/bootcamp_hwk3_python | bf37da17b2b2ebcfcbf1227a5c4382051c338215 | 51ceca30e8613ebe7115fac7980920bb0607a424 | refs/heads/master | 2020-03-24T12:00:29.622835 | 2019-01-27T23:30:41 | 2019-01-27T23:30:41 | 142,700,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # * Create a dictionary to store the following:
# * Your name
# * Your age
# * A list of a few of your hobbies
# * A dictionary of a few times you wake up during the week
# * Print out your name, how many hobbies you have and a time you get up during the week
personal_info = {
"name": "Michael Bell",
"age": 28,
"hobbies": ["data", "Rocket League", "Cooking", "Walking Maggie"],
"wake_up_times" : {
"M": 5,
"T": 6,
"W": 7
}
}
print(personal_info)
print(personal_info ["name"])
print(str(len(personal_info ["hobbies"])) + " hobbies")
print(str(personal_info ["wake_up_times"]["M"]) + " AM") | [
"michaelbell1011@gmail.com"
] | michaelbell1011@gmail.com |
b6c38dbb18271645e5e811b27aa5e9b3529daa0d | fed4606ed87fb67214b96c17679731ff9799de72 | /envFilm/Scripts/django-admin.py | 550bd7161434a30001b104c13cae2dbaa0aedfc2 | [] | no_license | xKaterina/films | 8f3dd2c46a901e2075738ca1551dc726f675ebc2 | 8105ba5697ea4ad1c6e5a5abcfd2795208111668 | refs/heads/master | 2020-03-07T19:28:13.273194 | 2018-04-05T08:27:13 | 2018-04-05T08:27:13 | 127,671,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!c:\pythonproject\filmsenv\envfilm\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"ekaterina.kalachnikova@gmail.com"
] | ekaterina.kalachnikova@gmail.com |
ce3aea9787e33c1377f9a0450ca3aa11540c041c | 6abc8ca8ea3dd4bcb58d44ef5d9b10a222898d8c | /Lecture3/examplelec3.py | c67d3213f46350ce3fc75796aeed0e950963dc7d | [] | no_license | addisonv/SI364 | ed31167a9a67162f25ba8cc669cec38102728a93 | 313e1150cd495e82a48e2e11370e45f0d5c092b2 | refs/heads/master | 2021-05-13T17:44:35.708613 | 2018-02-19T02:04:58 | 2018-02-19T02:04:58 | 116,834,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from flask import Flask, request, render_template, url_for
app = Flask(__name__)
@app.route('/oranges')
def lemons():
title_var = "My Ice Cream Form"
options = ['Chocolate', 'Vanilla', 'Superman', 'Pistachio', 'Butter Pecan']
return render_template('seeform.html',title=title_var, lst_stuff=options)
@app.route('/apples', methods = ['GET'])
def plants():
if request.method == 'GET':
name = request.args.get('name','')
name_len = len(name)
flavor_options = []
form_items = dict(request.args)
for x in form_items:
if x != 'name':
flavor_options.append(x)
return render_template('results.html',flavors=flavor_options, name_len=name_len, name=name)
if __name__ == "__main__":
app.run(use_reloader=True,debug=True)
| [
"addisonv@umich.edu"
] | addisonv@umich.edu |
b65f96bee6c891e742a26f9d3d76f59dec94b3e2 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/cdn/v20200331/_enums.py | 656075cbb5232d664bcd6a85457bace0d55ba8c4 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 10,641 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ActionType',
'CacheBehavior',
'CacheType',
'CookiesOperator',
'CustomRuleEnabledState',
'DeliveryRuleAction',
'DestinationProtocol',
'GeoFilterActions',
'HeaderAction',
'HealthProbeRequestType',
'HttpVersionOperator',
'IsDeviceOperator',
'ManagedRuleEnabledState',
'MatchVariable',
'Operator',
'OptimizationType',
'PolicyEnabledState',
'PolicyMode',
'PostArgsOperator',
'ProbeProtocol',
'QueryStringBehavior',
'QueryStringCachingBehavior',
'QueryStringOperator',
'RedirectType',
'RemoteAddressOperator',
'RequestBodyOperator',
'RequestHeaderOperator',
'RequestMethodOperator',
'RequestUriOperator',
'ResponseBasedDetectedErrorTypes',
'SkuName',
'Transform',
'TransformType',
'UrlFileExtensionOperator',
'UrlFileNameOperator',
'UrlPathOperator',
]
class ActionType(str, Enum):
"""
Describes what action to be applied when rule matches
"""
ALLOW = "Allow"
BLOCK = "Block"
LOG = "Log"
REDIRECT = "Redirect"
class CacheBehavior(str, Enum):
"""
Caching behavior for the requests
"""
BYPASS_CACHE = "BypassCache"
OVERRIDE = "Override"
SET_IF_MISSING = "SetIfMissing"
class CacheType(str, Enum):
"""
The level at which the content needs to be cached.
"""
ALL = "All"
class CookiesOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class CustomRuleEnabledState(str, Enum):
"""
Describes if the custom rule is in enabled or disabled state. Defaults to Enabled if not specified.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class DeliveryRuleAction(str, Enum):
"""
The name of the action for the delivery rule.
"""
CACHE_EXPIRATION = "CacheExpiration"
CACHE_KEY_QUERY_STRING = "CacheKeyQueryString"
MODIFY_REQUEST_HEADER = "ModifyRequestHeader"
MODIFY_RESPONSE_HEADER = "ModifyResponseHeader"
URL_REDIRECT = "UrlRedirect"
URL_REWRITE = "UrlRewrite"
URL_SIGNING = "UrlSigning"
class DestinationProtocol(str, Enum):
"""
Protocol to use for the redirect. The default value is MatchRequest
"""
MATCH_REQUEST = "MatchRequest"
HTTP = "Http"
HTTPS = "Https"
class GeoFilterActions(str, Enum):
"""
Action of the geo filter, i.e. allow or block access.
"""
BLOCK = "Block"
ALLOW = "Allow"
class HeaderAction(str, Enum):
"""
Action to perform
"""
APPEND = "Append"
OVERWRITE = "Overwrite"
DELETE = "Delete"
class HealthProbeRequestType(str, Enum):
"""
The type of health probe request that is made.
"""
NOT_SET = "NotSet"
GET = "GET"
HEAD = "HEAD"
class HttpVersionOperator(str, Enum):
"""
Describes operator to be matched
"""
EQUAL = "Equal"
class IsDeviceOperator(str, Enum):
"""
Describes operator to be matched
"""
EQUAL = "Equal"
class ManagedRuleEnabledState(str, Enum):
"""
Describes if the managed rule is in enabled or disabled state. Defaults to Disabled if not specified.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class MatchVariable(str, Enum):
"""
Match variable to compare against.
"""
REMOTE_ADDR = "RemoteAddr"
SOCKET_ADDR = "SocketAddr"
REQUEST_METHOD = "RequestMethod"
REQUEST_HEADER = "RequestHeader"
REQUEST_URI = "RequestUri"
QUERY_STRING = "QueryString"
REQUEST_BODY = "RequestBody"
COOKIES = "Cookies"
POST_ARGS = "PostArgs"
class Operator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
IP_MATCH = "IPMatch"
GEO_MATCH = "GeoMatch"
EQUAL = "Equal"
CONTAINS = "Contains"
LESS_THAN = "LessThan"
GREATER_THAN = "GreaterThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
REG_EX = "RegEx"
class OptimizationType(str, Enum):
"""
Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization.
"""
GENERAL_WEB_DELIVERY = "GeneralWebDelivery"
GENERAL_MEDIA_STREAMING = "GeneralMediaStreaming"
VIDEO_ON_DEMAND_MEDIA_STREAMING = "VideoOnDemandMediaStreaming"
LARGE_FILE_DOWNLOAD = "LargeFileDownload"
DYNAMIC_SITE_ACCELERATION = "DynamicSiteAcceleration"
class PolicyEnabledState(str, Enum):
"""
describes if the policy is in enabled state or disabled state
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class PolicyMode(str, Enum):
"""
Describes if it is in detection mode or prevention mode at policy level.
"""
PREVENTION = "Prevention"
DETECTION = "Detection"
class PostArgsOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class ProbeProtocol(str, Enum):
"""
Protocol to use for health probe.
"""
NOT_SET = "NotSet"
HTTP = "Http"
HTTPS = "Https"
class QueryStringBehavior(str, Enum):
"""
Caching behavior for the requests
"""
INCLUDE = "Include"
INCLUDE_ALL = "IncludeAll"
EXCLUDE = "Exclude"
EXCLUDE_ALL = "ExcludeAll"
class QueryStringCachingBehavior(str, Enum):
"""
Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
"""
IGNORE_QUERY_STRING = "IgnoreQueryString"
BYPASS_CACHING = "BypassCaching"
USE_QUERY_STRING = "UseQueryString"
NOT_SET = "NotSet"
class QueryStringOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class RedirectType(str, Enum):
"""
The redirect type the rule will use when redirecting traffic.
"""
MOVED = "Moved"
FOUND = "Found"
TEMPORARY_REDIRECT = "TemporaryRedirect"
PERMANENT_REDIRECT = "PermanentRedirect"
class RemoteAddressOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
IP_MATCH = "IPMatch"
GEO_MATCH = "GeoMatch"
class RequestBodyOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class RequestHeaderOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class RequestMethodOperator(str, Enum):
"""
Describes operator to be matched
"""
EQUAL = "Equal"
class RequestUriOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class ResponseBasedDetectedErrorTypes(str, Enum):
"""
Type of response errors for real user requests for which origin will be deemed unhealthy
"""
NONE = "None"
TCP_ERRORS_ONLY = "TcpErrorsOnly"
TCP_AND_HTTP_ERRORS = "TcpAndHttpErrors"
class SkuName(str, Enum):
"""
Name of the pricing tier.
"""
STANDARD_VERIZON = "Standard_Verizon"
PREMIUM_VERIZON = "Premium_Verizon"
CUSTOM_VERIZON = "Custom_Verizon"
STANDARD_AKAMAI = "Standard_Akamai"
STANDARD_CHINA_CDN = "Standard_ChinaCdn"
STANDARD_MICROSOFT = "Standard_Microsoft"
PREMIUM_CHINA_CDN = "Premium_ChinaCdn"
class Transform(str, Enum):
"""
Describes what transforms are applied before matching
"""
LOWERCASE = "Lowercase"
UPPERCASE = "Uppercase"
class TransformType(str, Enum):
"""
Describes what transforms were applied before matching.
"""
LOWERCASE = "Lowercase"
UPPERCASE = "Uppercase"
TRIM = "Trim"
URL_DECODE = "UrlDecode"
URL_ENCODE = "UrlEncode"
REMOVE_NULLS = "RemoveNulls"
class UrlFileExtensionOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class UrlFileNameOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class UrlPathOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
WILDCARD = "Wildcard"
| [
"noreply@github.com"
] | noreply@github.com |
9deed2e10501ba1a8d6f3c0f052412d7cbb1bb3d | dd097c7ae744227b0312d762ee0482a3380ff8c6 | /plot_tg.py | 9f751bebfaa64b4b76be445e5325e06e65df06b0 | [] | no_license | moflaher/workspace_python | 0d6e98274d923a721db2b345f65c20b02ca59d08 | 6551e3602ead3373eafce10d11ce7b96bdcb106f | refs/heads/master | 2023-03-06T02:15:01.945481 | 2023-03-01T19:15:51 | 2023-03-01T19:15:51 | 20,814,932 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,079 | py | from __future__ import division,print_function
import matplotlib as mpl
import scipy as sp
from folderpath import *
from datatools import *
from gridtools import *
from plottools import *
from projtools import *
from stattools import *
import interptools as ipt
import matplotlib.tri as mplt
import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import os as os
import sys
np.set_printoptions(precision=8,suppress=True,threshold=sys.maxsize)
import pandas as pd
import netCDF4 as n4
import copy
import matplotlib.dates as dates
import argparse
try:
import ttide
tide=True
except:
print('No ttide')
tide=False
parser = argparse.ArgumentParser()
parser.add_argument("grid", help="name of the grid", type=str)
parser.add_argument("name", help="name of the run", type=str,default=None, nargs='?')
parser.add_argument("--station", help="switch to station output instead of fvcom output", default=False,action='store_true')
parser.add_argument("-dates", help="specify start and end date",type=str,nargs=2,default=None)
parser.add_argument("-snr", help="signal to noise ratio value used for constituent cutoff", type=float,default=2.0)
parser.add_argument("-skipdays", help="number of days to skip at start of timeseries", type=float,default=14.0)
args = parser.parse_args()
print("The current commandline arguments being used are")
print(args)
name=args.name
grid=args.grid
if args.station:
tag='station'
else:
tag='fvcom'
# find tg ncfiles
months = dates.MonthLocator()
monthsFmt = dates.DateFormatter('%b')
savepath='{}png/{}/tg/{}/'.format(figpath,grid,name)
if not os.path.exists(savepath): os.makedirs(savepath)
savepath2='{}png/{}/tg/{}/csv/'.format(figpath,grid,name)
if not os.path.exists(savepath2): os.makedirs(savepath2)
inpath='{}{}/tg/{}/'.format(datapath,grid,name)
filenames=glob.glob('{}tg_*_{}.nc'.format(inpath,tag))
filenames.sort()
#tg_*.nc'.format(obspath)
for i,filename in enumerate(filenames):
print('='*80)
print(i)
print(filename)
tgm = loadnc('',filename,False)
tgo = loadnc('{}east/all/'.format(obspath),'tg_{:05d}.nc'.format(tgm['tgnumber'][0]),False)
if args.dates is not None:
din=dates.datestr2num(args.dates)
figstr='{}{}_{}_tg_{:05d}_{}_to_{}.png'.format(savepath,grid,name,tgm['tgnumber'][0],args.dates[0],args.dates[1])
figstr2='{}{}_{}_tg_{:05d}_residual_{}_to_{}.png'.format(savepath,grid,name,tgm['tgnumber'][0],args.dates[0],args.dates[1])
figstr3='{}{}_{}_tg_{:05d}_{}_to_{}'.format(savepath2,grid,name,tgm['tgnumber'][0],args.dates[0],args.dates[1])
else:
din=np.array([tgm['time'][0]+args.skipdays,tgm['time'][-1]])
figstr='{}{}_{}_tg_{:05d}.png'.format(savepath,grid,name,tgm['tgnumber'][0])
figstr2='{}{}_{}_tg_{:05d}_residual.png'.format(savepath,grid,name,tgm['tgnumber'][0])
figstr3='{}{}_{}_tg_{:05d}'.format(savepath2,grid,name,tgm['tgnumber'][0])
idx=np.argwhere((tgo['time']>=din[0]) & (tgo['time']<=din[1]))
idx=np.ravel(idx)
time1,data1,data2=interp_clean_common(tgo['time'][idx],tgo['zeta'][idx],tgm['time'],tgm['zeta'],500,-500)
stats=residual_stats(data2-np.mean(data2), data1-np.mean(data1))
a=pd.DataFrame(stats,index=[0]).round(2).T[0]
f=plt.figure(figsize=(15,5));
ax=f.add_axes([.125,.1,.775,.8]);
ax.plot(time1,data1-np.mean(data1),'k',label='TG: {:05d}'.format(tgm['tgnumber'][0]))
ax.plot(time1,data2-np.mean(data2),'r',lw=.5,label='{}'.format(name))
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.legend()
ax.set_ylabel('Elevation (m)')
f.suptitle('Removed TG means - Obs: {} Model: {}\n Bias: {} Std: {} RMSE: {} RAE: {} Corr: {} Skew: {} Skill: {}'.format(np.mean(data1),np.mean(data2),a[0],a[1],a[2],a[3],a[4],a[5],a[6]))
f.savefig(figstr,dpi=600)
if tide:
time=np.arange(time1[0],time1[-1]+1/24.0,1/24.0)
tgm_int=ipt.interp1d(tgm['time'],tgm['zeta'],time)
tgonan=tgo['zeta'][idx]
tgonan[tgonan>500]=np.nan
tgo_int=ipt.interp1d(tgo['time'][idx],tgonan,time)
tgm_tcon_pre=ttide.t_tide(tgm_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,out_style=None)
tgo_tcon_pre=ttide.t_tide(tgo_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,out_style=None)
tgm_tcon=ttide.t_tide(tgm_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,constitnames=tgm_tcon_pre['nameu'][tgm_tcon_pre['snr']>=args.snr],out_style=None)
tgo_tcon=ttide.t_tide(tgo_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,constitnames=tgo_tcon_pre['nameu'][tgo_tcon_pre['snr']>=args.snr],out_style=None)
f=plt.figure(figsize=(15,5));
ax=f.add_axes([.125,.1,.775,.8]);
ax.plot(time[:len(tgo_tcon['xres'])],tgo_tcon['xres']-np.nanmean(tgo_tcon['xres']),'k',label='TG: {:05d}'.format(tgm['tgnumber'][0]))
ax.plot(time[:len(tgm_tcon['xres'])],tgm_tcon['xres']-np.nanmean(tgm_tcon['xres']),'r',lw=.5,label='{}'.format(name))
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.legend()
ax.set_ylabel('Residual Elevation (m)')
o,m=remove_common_nan(tgo_tcon['xres']-np.nanmean(tgo_tcon['xres']), tgm_tcon['xres']-np.nanmean(tgm_tcon['xres']))
stats=residual_stats(o,m)
a=pd.DataFrame(stats,index=[0]).round(2).T[0]
f.suptitle('Removed TG means - Obs: {} Model: {}\n Bias: {} Std: {} RMSE: {} RAE: {} Corr: {} Skew: {} Skill: {}'.format(np.nanmean(tgo_tcon['xres']),np.nanmean(tgm_tcon['xres']),a[0],a[1],a[2],a[3],a[4],a[5],a[6]))
f.savefig(figstr2,dpi=600)
df=pd.DataFrame(tgm_tcon['tidecon'],columns=['Amp','AmpE','Phase','PhaseE'],index=tgm_tcon['nameu']).round(2).sort_values('Amp',ascending=False)
df.to_csv('{}_model_full.csv'.format(figstr3))
df=pd.DataFrame(tgo_tcon['tidecon'],columns=['Amp','AmpE','Phase','PhaseE'],index=tgo_tcon['nameu']).round(2).sort_values('Amp',ascending=False)
df.to_csv('{}_obs_full.csv'.format(figstr3))
namesm=tgm_tcon['nameu']
cnames=np.array([])
for namea in namesm:
if namea in tgo_tcon['nameu']:
cnames=np.append(cnames,namea)
oidx=np.in1d(tgo_tcon['nameu'],cnames)
midx=np.in1d(tgm_tcon['nameu'],cnames)
diff=np.vstack([tgo_tcon['tidecon'][oidx,0],tgm_tcon['tidecon'][midx,0],tgo_tcon['tidecon'][oidx,0]-tgm_tcon['tidecon'][midx,0],
tgo_tcon['tidecon'][oidx,2],tgm_tcon['tidecon'][midx,2],tgo_tcon['tidecon'][oidx,2]-tgm_tcon['tidecon'][midx,2]]).T
df=pd.DataFrame(diff,columns=['AmpObs','AmpMod','AmpDiff','PhaseObs','PhaseMod','PhaseDiff'],index=cnames).round(2).sort_values('AmpObs',ascending=False)
df.to_csv('{}_obsmod_common_diff.csv'.format(figstr3))
#kill
| [
"073208o@acadiau.ca"
] | 073208o@acadiau.ca |
965aa37a6c591e7b8311628628acd353092d440d | 8ef7e34b930e4a5e3e0913d41471c6affaa7e416 | /papier.py | 2f23bd8efc523a298d9ed2069c9a5fc02c685d20 | [] | no_license | Lanranyan/Hello-World | a4ecb46bb535d149a842cfa3b0817e5846a383e2 | 726341a17812e2371ecff58a1f3bba540cff8415 | refs/heads/master | 2020-04-05T00:35:27.398201 | 2019-03-07T15:45:57 | 2019-03-07T15:45:57 | 156,403,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py |
import random
from textlist import text
from textlist import word_w
def gallows(penal):
if penal == 0 :
gal = " _______| \n"\
" | \n"\
" | \n"\
" | \n"\
" | \n"\
" | \n"\
" / \ \n"\
" / \ \n"
if penal == 1:
gal = " _______|\n" \
" | |\n" \
" |\n" \
" |\n" \
" |\n" \
" |\n" \
" / \ \n" \
" / \ \n"
if penal == 2:
gal = " -------|\n" \
" | |\n" \
" O |\n" \
" |\n" \
" |\n" \
" |\n" \
" / \ \n" \
" / \ \n"
if penal == 3:
gal = " -------|\n" \
" | |\n" \
" O |\n" \
" | |\n" \
" | |\n" \
" |\n" \
" / \ \n" \
" / \ \n"
if penal == 4:
gal = " -------|\n" \
" | |\n" \
" O |\n" \
" | / |\n" \
" | |\n" \
" |\n" \
" / \ \n" \
" / \ \n"
if penal == 5:
gal = " -------|\n" \
return gal
" -------|\n" \
" | |\n" \
" O |\n" \
" | / |\n" \
" | |\n" \
" |\n" \
" / \ \n" \
" / \ \n"
again = 'y'
while again == 'y':
###choose a random word from the text_w
index = random.radint(o, len(text_w))
wordw = text_w[index]
secret_word = list(word) # list the letters of the word
print(secret_word)
new_word = ['*' for i in range(len(secret_word))] # Replaces every letter with * asterick
print(new_word)
penalty = 0
letters_used = []
while '*' in new_word and penalty<7: #For as long as there's astericks, the word hasn't been fully guessed
guess = input("Enter a letter \n") # part where a letter is inputted
if guess not in secret_word or guess in letters_used: # Checks with the list with the letters
penalty += 1 ### Same as penalty + 1, no spaces should be between +=
else:
for i in range(len(secret_word)): # assigns positions according to the letters
if guess == secret_word[i]: #checks if the letter is right
new_word[i] = secret_word[i] #replaces the letters into the *
letters_used.append(guess) # adds the letter to what you've used
print(letters_used) # Shows what you've used
print(new_word) # prints word's current state
print('penalty', penalty) #shows penalty ## Here will be placed the gallow calls
print(gallows(penalty))
print("End")
again = input('If you want to keep playing, press y, else press n')
print("Thank you for playing hangman! See you next time!")
exit()
| [
"noreply@github.com"
] | noreply@github.com |
edf73b1370276c264df1373158286010c38bb3be | 705867264a495a465caabecf3a9cf0b63b2f66de | /madagascarBuild/SConstruct | d35831b811128c6119f4b69a12c3ad8c6892dd35 | [] | no_license | AtilaSaraiva/Simple-Madagascar-Curvelet-Example | ab39d352333937a9ad8d675a85a42f61bca9b3ae | 99f76a5d079c35d9716ee78b1faea9a9d535a845 | refs/heads/main | 2023-04-17T09:34:24.766989 | 2021-04-30T00:12:50 | 2021-04-30T00:12:50 | 362,979,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | from rsf.proj import *
Flow("aux1", None,
"""
spike n1=400 n2=400 d1=10 d2=10 mag=200
""")
Flow("aux2", None,
"""
spike n1=400 n2=400 d1=10 d2=10 mag=400 k1=200 l1=400
""")
Flow("testInputData", ["aux1", "aux2"],
"""
add mode=a ${SOURCES[1]}
""")
Flow("dadoFiltrado","testInputData.rsf",
"""
../sfcurvelet nbs=4 nba=16 ac=1 d1=${SOURCES[0]}
""")
Result("dadoFiltrado",
"""
grey gainpanel=a
""")
End()
| [
"atilasaraiva@gmail.com"
] | atilasaraiva@gmail.com | |
e9f00fdd384785ab473b0134aad8cfffa1a927f6 | cc5cbad34c7e26353078f1748b6f3990adc16802 | /02 Keithley SourceMeter 2450/Triple Linear Resistance Measurement/v3.0 Triple Linear Resistance Measurement/Sub_Scripts/Sweep.py | a2abefce5f30fd7099fcc4e64207593505be02df | [] | no_license | Brucewanghahei/QMD_Old_Programs | a51c8520f2b13b0e2ae9a57c053a9032cd4684ee | c67e7e95a4f3237bd5688c0b97a206d5c516335b | refs/heads/master | 2021-08-31T16:05:08.548584 | 2017-12-22T01:00:18 | 2017-12-22T01:00:18 | 115,060,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,101 | py | import visa
import numpy
import time
from PyQt4.QtCore import *
from PyQt4.QtGui import *
resistance_first_scale = []
resistance_second_scale = []
def analyze(data, sweep, time_difference, length_first, length_second):
for i in range(0, len(data)):
data[i] = float(data[i])
total_num = int(len(data)/2)
voltage_values = []
current_values = []
if sweep == "VOLTAGE":
for i in range(0, len(data), 2):
voltage_values.append(data[i])
current_values.append(data[i + 1])
# The first degree of the fitting polynomial is current/voltage
best_fit_first = numpy.polyfit(voltage_values[:length_first], current_values[:length_first], 1)
# Therefore the resistance is reciprocal of the best_fit
resistance_first = 1/best_fit_first[0]
best_fit_second = numpy.polyfit(voltage_values[length_first:length_first + length_second], current_values[length_first:length_first + length_second], 1)
resistance_second = 1/best_fit_second[0]
best_fit_third = numpy.polyfit(voltage_values[length_first + length_second:], current_values[length_first + length_second:], 1)
resistance_third = 1/best_fit_third[0]
elif sweep == "CURRENT":
for i in range(0, len(data), 2):
current_values.append(data[i])
voltage_values.append(data[i + 1])
# The first degree of the fitting polynomial is voltag/current
best_fit_first = numpy.polyfit(current_values[:length_first], voltage_values[:length_first], 1)
# Therefore the resistance is the best_fit
resistance_first = best_fit_first[0]
best_fit_second = numpy.polyfit(current_values[length_first:length_first + length_second], voltage_values[length_first:length_first + length_second], 1)
resistance_second = best_fit_second[0]
best_fit_third = numpy.polyfit(current_values[length_first + length_second:], voltage_values[length_first + length_second:], 1)
resistance_third = best_fit_third[0]
voltage_scale = [1, "Volts"]
if abs(max(voltage_values)) >= 1:
voltage_scale = [1, "Volts"]
elif abs(max(voltage_values)) >= 1E-3 and abs(max(voltage_values)) < 1:
voltage_scale = [1E3, "mVolts"]
elif abs(max(voltage_values)) >= 1E-6 and abs(max(voltage_values)) < 1E-3:
voltage_scale = [1E6, "uVolts"]
elif abs(max(voltage_values)) >= 1E-9 and abs(max(voltage_values)) < 1E-6:
voltage_scale = [1E9, "nVolts"]
elif abs(max(voltage_values)) >= 1E-12 and abs(max(voltage_values)) < 1E-9:
voltage_scale = [1E12, "pVolts"]
voltage_values = numpy.array(voltage_values, dtype = 'float') * voltage_scale[0]
current_scale = [1, "Amps"]
if abs(max(current_values)) > 1:
current_scale = [1, "Amps"]
elif abs(max(current_values)) > 1E-3 and abs(max(current_values)) < 1:
current_scale = [1E3, "mAmps"]
elif abs(max(current_values)) > 1E-6 and abs(max(current_values)) < 1E-3:
current_scale = [1E6, "uAmps"]
elif abs(max(current_values)) > 1E-9 and abs(max(current_values)) < 1E-6:
current_scale = [1E9, "nAmps"]
elif abs(max(current_values)) > 1E-12 and abs(max(current_values)) < 1E-9:
current_scale = [1E12, "pAmps"]
current_values = numpy.array(current_values, dtype = 'float') * current_scale[0]
resistance_first_scale = [1, "Ohms"]
if resistance_first > 1E9:
resistance_first_scale = [1E-9, "GOhms"]
elif resistance_first > 1E6 and resistance_first < 1E9:
resistance_first_scale = [1E-6, "MOhms"]
elif resistance_first > 1E3 and resistance_first < 1E6:
resistance_first_scale = [1E-3, "kOhms"]
elif resistance_first > 1 and resistance_first < 1E3:
resistance_first_scale = [1, "Ohms"]
elif resistance_first > 1E-3 and resistance_first < 1:
resistance_first_sclae = [1E3, "mOhms"]
resistance_first = resistance_first * resistance_first_scale[0]
resistance_second_scale = [1, "Ohms"]
if resistance_second > 1E9:
resistance_second_scale = [1E-9, "GOhms"]
elif resistance_second > 1E6 and resistance_second < 1E9:
resistance_second_scale = [1E-6, "MOhms"]
elif resistance_second > 1E3 and resistance_second < 1E6:
resistance_second_scale = [1E-3, "kOhms"]
elif resistance_second > 1 and resistance_second < 1E3:
resistance_second_scale = [1, "Ohms"]
elif resistance_second > 1E-3 and resistance_second < 1:
resistance_second_sclae = [1E3, "mOhms"]
resistance_second = resistance_second * resistance_second_scale[0]
resistance_third_scale = [1, "Ohms"]
if resistance_third > 1E9:
resistance_third_scale = [1E-9, "GOhms"]
elif resistance_third > 1E6 and resistance_third < 1E9:
resistance_third_scale = [1E-6, "MOhms"]
elif resistance_third > 1E3 and resistance_third < 1E6:
resistance_third_scale = [1E-3, "kOhms"]
elif resistance_third > 1 and resistance_third < 1E3:
resistance_third_scale = [1, "Ohms"]
elif resistance_third > 1E-3 and resistance_third < 1:
resistance_third_sclae = [1E3, "mOhms"]
resistance_third = resistance_third * resistance_third_scale[0]
if sweep == "CURRENT":
scaled_best_fit_first = numpy.polyfit(current_values[:length_first], voltage_values[:length_first], 1)
scaled_best_fit_second = numpy.polyfit(current_values[length_first:length_first + length_second], voltage_values[length_first:length_first + length_second], 1)
scaled_best_fit_third = numpy.polyfit(current_values[length_first + length_second:], voltage_values[length_first + length_second:], 1)
elif sweep == "VOLTAGE":
scaled_best_fit_first = numpy.polyfit(voltage_values[:length_first], current_values[:length_first], 1)
scaled_best_fit_second = numpy.polyfit(voltage_values[length_first:length_first + length_second], current_values[length_first:length_first + length_second], 1)
scaled_best_fit_third = numpy.polyfit(voltage_values[length_first + length_second:], current_values[length_first + length_second:], 1)
time = time_difference
return [voltage_values, current_values, resistance_first, resistance_second, resistance_third, scaled_best_fit_first, scaled_best_fit_second, scaled_best_fit_third, voltage_scale, current_scale, resistance_first_scale, resistance_second_scale, resistance_third_scale, time, length_first, length_second]
class sweep_stop():
def voltage_sweep_stop(self, inst, Voltage, voltage_first, voltage_second, voltage_lim, current_lim, wait_time, probe = "4", ui = False, emit = None):
length_first = len(voltage_first)
length_second = len(voltage_second)
length_whole = len(Voltage)
time_difference = 0
item = 0
datalist = []
x_value = []
y_value = []
x_double_value = []
Time = [0.001]
if probe == '4':
contact = 'ON'
elif probe == '2':
contact = 'OFF'
self.continue_check = True
ui.mplwidget_allplot.figure.clear()
while True:
if self.continue_check == True:
axes_allplot = ui.mplwidget_allplot.figure.add_subplot(111)
axes_allplot.set_title('Current vs.Voltage')
axes_allplot.set_ylabel('Current (uA)')
axes_allplot.set_xlabel('Voltage (mV)')
axes_real_time = ui.mplwidget_real_time.figure.add_subplot(111)
axes_real_time.set_title('Voltage vs.Time')
axes_real_time.set_ylabel('Voltage (mV)')
axes_real_time.set_xlabel('Time (s)')
number = 0
start_time = time.time()
# Select the front-panel terminals for the measurement
inst.write('ROUT:TERM FRONT')
# Set the instrument to measure the current
inst.write('SENS:FUNC "CURR"')
inst.write("SOUR:VOLT:ILIM 1.05")
# Set the voltage range to be auto
inst.write('SOUR:VOLT:RANG:AUTO ON')
# Set to source voltage
inst.write('SOUR:FUNC VOLT')
# Turn on the source read back
inst.write('SOUR:VOLT:READ:BACK 1')
# Input the individual voltage to start the measurement
inst.write("SOUR:VOLT " + str(Voltage[item]))
inst.write('SENS:CURR:RSEN ' + contact)
inst.write('OUTP ON')
voltage = inst.query('READ? "defbuffer1", SOUR')
x_value.append(float(voltage) * 1E3)
x_double_value.append(float(voltage) * 1E3)
x_double_value.append(float(voltage) * 1E3)
if ui != False:
ui.lineEdit_source.setText(str(round(float(voltage)*1000, 3)))
ui.label_source_unit.setText('mV')
datalist.append(voltage)
current = inst.query('READ? "defbuffer1", READ')
y_value.append(float(current) * 1E6)
if ui != False:
ui.lineEdit_measurement.setText(str(round(float(current)*1E6, 3)))
ui.label_measurement_unit.setText('uA')
datalist.append(current)
end_time = time.time()
time_difference += (end_time - start_time)
Time.append(time_difference)
length_whole -= 1
if length_whole == 0:
break
item += 1
ui.mplwidget_allplot.figure.clear()
axes_allplot = ui.mplwidget_allplot.figure.add_subplot(111)
axes_allplot.set_title('Current vs.Voltage')
axes_allplot.set_ylabel('Current (uA)')
axes_allplot.set_xlabel('Voltage (mV)')
axes_allplot.plot(x_value, y_value, marker = '.', linestyle = '')
ui.mplwidget_real_time.figure.clear()
axes_real_time = ui.mplwidget_real_time.figure.add_subplot(111)
axes_real_time.set_title('Voltage vs.Time')
axes_real_time.set_ylabel('Voltage (mV)')
axes_real_time.set_xlabel('Time (s)')
axes_real_time.plot(Time, x_double_value, marker = '.', linestyle = '-')
if emit != None:
emit(SIGNAL("plot"))
time.sleep(float(wait_time))
Time.append(time_difference + 0.001)
inst.write("OUTP OFF")
return_data = analyze(datalist, 'VOLTAGE', time_difference, length_first, length_second)
return return_data
def current_sweep_stop(self, inst, Current, current_first, current_second, current_lim, voltage_lim, wait_time, probe = '4', ui = False):
length_whole = len(Current)
length_first = len(current_first)
length_second = len(current_second)
item = 0
time_difference = 0
datalist = []
x_value = []
y_value = []
if probe == '4':
contact = 'ON'
elif probe == '2':
contact = 'OFF'
self.continue_check = True
while True:
if self.continue_check == True:
#ui.mplwidget_allplot.figure.clear()
axes_allplot = ui.mplwidget_allplot.figure.add_subplot(111)
axes_allplot.plot(x_value, y_value, marker = '.', linestyle = '')
axes_allplot.set_title('All Points of Voltage vs. Current')
axes_allplot.set_xlabel('Current (uA)')
axes_allplot.set_ylabel('Voltage (mV)')
number = 0
start_time = time.time()
# Select the front-panel terminals for the measurement
inst.write('ROUT:TERM FRONT')
# Set the instrument to measure the current
inst.write('SENS:FUNC "VOLT"')
# Set the vsoltage range to be auto
inst.write('SOUR:CURR:RANG:AUTO ON')
# Set to source voltage
inst.write('SOUR:FUNC CURR')
# Turn on the source read back
inst.write('SOUR:VOLT:READ:BACK 1')
# Input the individual voltage to start the measurement
inst.write("SOUR:CURR " + str(Current[item]))
inst.write('SENS:VOLT:RSEN ' + contact)
inst.write('OUTP ON')
current = inst.query('READ? "defbuffer1", SOUR')
x_value.append(round(float(current) * 1E6, 3))
if ui != False:
ui.lineEdit_source.setText(str(round(float(current)*1E6, 3)))
ui.label_source_unit.setText('uA')
datalist.append(current)
voltage = inst.query('READ? "defbuffer1", READ')
y_value.append(round(float(voltage) * 1E3, 3))
if ui != False:
ui.lineEdit_measurement.setText(str(round(float(voltage)*1E3, 3)))
ui.label_measurement_unit.setText('mV')
datalist.append(voltage)
end_time = time.time()
time_difference += (end_time - start_time)
length_whole -= 1
if length_whole == 0:
break
item += 1
ui.mplwidget_allplot.figure.clear()
axes_allplot = ui.mplwidget_allplot.figure.add_subplot(111)
axes_allplot.plot(x_value, y_value, marker = '.', linestyle = '')
axes_allplot.set_title('All Points of Voltage vs. Current')
axes_allplot.set_xlabel('Current (uA)')
axes_allplot.set_ylabel('Voltage (mV)')
if emit != None:
emit(SIGNAL("plot"))
else:
if number == 0:
ui.mplwidget_allplot.figure.clear()
axes_allplot = ui.mplwidget_allplot.figure.add_subplot(111)
axes_allplot.plot(x_value, y_value, marker = '.', linestyle = '')
axes_allplot.set_title('All Points of Voltage vs. Current')
axes_allplot.set_xlabel('Current (uA)')
axes_allplot.set_ylabel('Voltage (mV)')
#ui.mplwidget_allplot.draw()
if emit != None:
emit(SIGNAL("plot"))
number += 1
inst.write("OUTP OFF")
return_data = analyze(datalist, 'CURRENT', time_difference, length_first, length_second)
return return_data
| [
"qw68@duke.edu"
] | qw68@duke.edu |
6c994eefb57372a1ba505e2542327a5df12e8340 | 84968116f79f6de461815134bdd6a516e5d5137f | /Challenge_3.py | 4aaa279d19a78acd942103400b3ba0ad2c0e7cbb | [
"Unlicense"
] | permissive | BradRiley15/Chapter_6 | ba51150c2ee6dcfa47fe247faa59a3312aa6c9e9 | dfaf1d378e6cb41b542487867f3a209e44c7d60b | refs/heads/master | 2021-01-21T09:49:45.664649 | 2014-12-09T17:57:11 | 2014-12-09T17:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # Guess My Number
#
# The computer picks a random number between 1 and 100
# The player tries to guess it and the computer lets
# the player know if the guess is too high, too low
# or right on the money
import random
print("\tWelcome to 'Guess My Number'!")
print("\nI'm thinking of a number between 1 and 100.")
print("Try to guess it in as few attempts as possible.\n")
# set the initial values
the_number = random.randint(1, 100)
question = 'Take a guess: '
tries = 1
low = 1
high = 101
def ask_number(question, low, high, step = 1):
guess = None
while guess not in range(low, high):
guess = int(input(question))
return guess
guess = ask_number(question, low, high, step = 1)
# guessing function loop
def main(the_number, guess):
while guess != the_number:
if guess > the_number:
print("Lower...")
else:
print("Higher...")
global tries
tries += 1
guess = ask_number(question, low, high, step = 1)
main(the_number, guess)
print("You guessed it! The number was", the_number)
print("And it only took you", tries, "tries!\n")
input("\n\nPress the enter key to exit.")
| [
"briley@stcharlessd.org"
] | briley@stcharlessd.org |
af141e3ca97b2e8d1bed73aa747531ee5683400d | 638d957ed8f3c5a054752510e5d01b2bd86adc25 | /cluster/util/PropertyUtil.py | d3e5d0ef79bc87a4faf90cf76a45ac1f69e53e68 | [] | no_license | tuan08/datatp | 3bfed7c6175fe3950659443ebc1a580b510e466a | 7d7ff6bed36199627b143d37dd254cdb6dbf269c | refs/heads/master | 2020-04-12T06:22:53.010673 | 2016-12-07T03:19:53 | 2016-12-07T03:19:53 | 62,194,565 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | class PropertyUtil(object):
@staticmethod
def traverseAndResolvePropertyExpression(obj, properties):
if isinstance(obj, (int, long, float, complex, bool, basestring)):
return
for name in obj.__dict__.keys():
val = getattr(obj, name);
if type(val) in (int, long, float, complex, bool):
continue
elif isinstance(val, (basestring)):
val = PropertyUtil.resolvePropertyExpression(val, properties)
setattr(obj, name, val)
elif isinstance(val, list):
valList = val
newList = [];
for sel in valList:
if isinstance(sel, (basestring)):
sel = PropertyUtil.resolvePropertyExpression(sel, properties)
else:
PropertyUtil.traverseAndResolvePropertyExpression(sel, properties)
newList.append(sel)
del valList[:]
valList.extend(newList)
else:
PropertyUtil.traverseAndResolvePropertyExpression(val, properties)
@staticmethod
def resolvePropertyExpression(obj, properties):
if not isinstance(obj, basestring): return obj
for key in properties.__dict__.keys():
if obj.find('${') >= 0:
exp = '${' + key + '}'
obj = obj.replace(exp, getattr(properties, key))
else:
break
return obj
| [
"tuan08@gmail.com"
] | tuan08@gmail.com |
d39c0d09a7cdb6fc684b67bd725ff45268bda5d2 | c2e1531e81ea8dbc4b611578e7c88657fcee6f28 | /connect.py | f974cee08bfdd50d52ce984ec7503c335c174a98 | [] | no_license | edumigsoft/level_sensor_8 | 93035dc1e89e1b58382b0fe5fdac765d94ce6536 | ddc848fc6104ca347856a8aa6e50b0704b5d1204 | refs/heads/master | 2020-05-15T14:37:55.936362 | 2019-04-20T01:05:30 | 2019-04-20T01:05:30 | 182,343,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | #
#
#
import const
import network
import time
def do_connect():
sta_if = network.WLAN(network.STA_IF)
#cont = 10
if not sta_if.isconnected():
print("Connecting to network")
sta_if.active(True)
sta_if.connect(const.WIFI_SSID, const.WIFI_PASS)
while not sta_if.isconnected():
#if cont == 0:
# cont = -1
# break;
#cont--
time.sleep_ms(5)
#print('.')
#if cont == -1:
#print("\r\n")
print("Network config: " + str( sta_if.ifconfig()))
print("\r\n")
return sta_if.isconnected()
#####################################################################
| [
"asa.sousa@gmail.com"
] | asa.sousa@gmail.com |
65bae85e5f211f68dac062b36b9bdcdef73b06a0 | c84fcf07fd247a4db23ee3265be9d4dd07898cbd | /python/ossid/models/dtoid/anchors.py | 3af0efba6b0bbd9f10677628b3512261dacbae95 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | r-pad/OSSID_code | df526724a59e6a99be089155a184a29301688f99 | b80d429a3fa4464a69a78dc2112d52b4f05d0dfe | refs/heads/master | 2023-05-23T12:40:09.736043 | 2022-04-03T17:37:52 | 2022-04-03T17:37:52 | 461,322,063 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,167 | py | import numpy as np
import torch
import torch.nn as nn
class Anchors(nn.Module):
"""
Forward pass will return all anchor boxes for one batch
"""
def __init__(self, pyramid_levels=None, strides=None, sizes=None, ratios=None, scales=None):
super(Anchors, self).__init__()
self.pyramid_levels = pyramid_levels
self.strides = strides
self.sizes = sizes
self.ratios = ratios
self.scales = scales
if pyramid_levels is None:
self.pyramid_levels = [3, 4, 5, 6, 7]
if strides is None:
self.strides = [2 ** x for x in self.pyramid_levels]
if sizes is None:
self.sizes = [2 ** (x + 2) for x in self.pyramid_levels]
if ratios is None:
self.ratios = np.array([0.5, 1, 2])
if scales is None:
self.scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
def forward(self, image_shapes):
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4)).astype(np.float32)
for idx, p in enumerate(self.pyramid_levels):
anchors = generate_anchors(base_size=self.sizes[idx], ratios=self.ratios, scales=self.scales)
shifted_anchors = shift(image_shapes[idx], self.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
all_anchors = np.expand_dims(all_anchors, axis=0)
return torch.from_numpy(all_anchors.astype(np.float32)).cuda()
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def compute_shape(image_shape, pyramid_levels):
"""Compute shapes based on pyramid levels.
:param image_shape:
:param pyramid_levels:
:return:
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
ratios=None,
scales=None,
strides=None,
sizes=None,
shapes_callback=None,
):
image_shapes = compute_shape(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4))
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(base_size=sizes[idx], ratios=ratios, scales=scales)
shifted_anchors = shift(image_shapes[idx], strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors
def shift(shape, stride, anchors):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors | [
"georgegu1997@gmail.com"
] | georgegu1997@gmail.com |
584e264356b6fd4ecd85aae9114043516fb0c2cd | da816fd4e6759d9e71b4ed0e5bd5e50e9a2717fc | /main.spec | 5ce28285703559dd6a949d00cb7ca74aea914e2d | [] | no_license | bjfishman/SENCO-AI | 51b33f238a4a25f7f725036bcb6145abe106e7cc | 64b6783d064a4d8e4ebaa309f0537c63c5d6beef | refs/heads/main | 2023-06-05T09:35:21.144403 | 2021-06-25T16:38:38 | 2021-06-25T16:38:38 | 380,298,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['GPT3_test_main.py'],
pathex=['A:\\Surfdrive\\Shared\\Fishman, Ben\\Working folder\\03_Prototype'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='GPT3_test_main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True , icon='icon2.ico')
| [
"86485504+bjfishman@users.noreply.github.com"
] | 86485504+bjfishman@users.noreply.github.com |
2e878fe21ec78d6737e56f3b4727b5c424bd919d | 7556d479f6edce45437dba96ab3b8a2becf72f92 | /oasis.py | ba76191896be707fdbbb1dfc248744e0c85f6197 | [] | no_license | kckennylau/Oasis | 593901da4dc8f5f018925d841db4b4e18f023ea7 | 8af972b5835040d0a213696690070f38b5b61b3d | refs/heads/master | 2020-12-14T07:19:37.127185 | 2016-08-31T13:17:27 | 2016-08-31T13:17:27 | 67,039,988 | 0 | 0 | null | 2016-08-31T13:29:57 | 2016-08-31T13:29:57 | null | UTF-8 | Python | false | false | 8,043 | py | import ast
import argparse
import sys
import math
from primes import *
from commands import *
code = ""
elements = []
selector = []
sys.setrecursionlimit(5000)
def func_a(n):
stack_len = 0
if DEBUG and selector:
print("selector >> " + str(selector))
stack = []
def pop_stack(num_s=1, n2=n, s2=stack):
if s2:
return s2.pop()
else:
if DEBUG:
print("using a(" + str(n2 - num_s) + ") = " + str(func_a(n2 - num_s)))
return func_a(n2 - num_s)
result = None
has_calculated = False
try:
has_calculated = elements[n] is not None
except:
has_calculated = False
if has_calculated:
if DEBUG: print("already initialized: " + str(elements[n]) + " at n = " + str(n))
return elements[n]
if len(elements) != 0 and n > len(elements) + 100:
func_a(n - 100)
if DEBUG:
print("\n --- a(" + str(n) + ") --- ")
pointer_position = -1
while pointer_position < len(code) - 1:
pointer_position += 1
command = code[pointer_position]
if DEBUG: print("command > " + command)
stack_len = len(stack)
if command == "+":
b = pop_stack()
a = pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "+"))
elif command == "-":
b = pop_stack()
if stack:
a = pop_stack()
else:
a, b = b, pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "-"))
elif command == "*":
b = pop_stack()
a = pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "*"))
elif command == "/":
b = pop_stack()
a = pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "/"))
elif command == "m":
b = pop_stack()
a = pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "**"))
elif command == "%":
b = pop_stack()
a = pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "%"))
elif command == "\u00f7":
b = pop_stack()
a = pop_stack(2 - stack_len)
stack.append(regular_arithmetic(a, b, "//"))
elif command == "\u00b2":
a = pop_stack()
stack.append(single_arithmetic(a, "** 2"))
elif command == ">":
a = pop_stack()
stack.append(single_arithmetic(a, "+ 1"))
elif command == "<":
a = pop_stack()
stack.append(single_arithmetic(a, "- 1"))
elif command == "!":
a = pop_stack()
stack.append(math.factorial(a))
elif command == "n":
stack.append(n)
elif command == "a":
x = pop_stack()
stack.append(func_a(x))
elif command == "b":
stack.append(func_a(n - 1))
elif command == "c":
stack.append(func_a(n - 2))
elif command == "d":
stack.append(func_a(n - 3))
elif command == "e":
x = pop_stack()
stack.append(func_a(n - x))
elif command == "j":
a = pop_stack()
stack.append(largest_divisor(a))
elif command == "p":
a = pop_stack()
stack.append(is_prime(a))
elif command == "q":
a = pop_stack()
if -1 < a < 9999:
stack.append(primes_100000[a])
else:
if a < 0:
stack.append(0)
else:
current_num = 104729
prime_count = 10000
while prime_count < a + 1:
current_num += 2
if is_prime(current_num):
prime_count += 1
stack.append(current_num)
elif command == "s":
a = pop_stack()
b = pop_stack(2 - stack_len)
stack.append(b)
stack.append(a)
elif command == "x":
a = pop_stack()
stack.append(single_arithmetic(a, "* 2"))
elif command == "y":
a = pop_stack()
stack.append(single_arithmetic(a, "* 3"))
elif command == "z":
a = pop_stack()
stack.append(single_arithmetic(a, "* 4"))
elif command == "\"":
temp_string = ""
temp_string_2 = ""
temp_position = pointer_position
while temp_position < len(code) - 1:
temp_position += 1
try:
current_command = code[temp_position]
except:
break
if current_command == "\"":
break
elif current_command == "\u00ff":
temp_string += str(pop_stack(1))
pointer_position += 1
else:
temp_string += current_command
pointer_position += 1
pointer_position += 1
stack.append(temp_string)
elif command == "\u00ab":
a = pop_stack()
stack.append(single_arithmetic(a, "- 2"))
elif command == "\u00bb":
a = pop_stack()
stack.append(single_arithmetic(a, "+ 2"))
elif command.isnumeric():
temp_number = ""
temp_number += command
temp_position = pointer_position
while temp_position < len(code) - 1:
temp_position += 1
try:
current_command = code[temp_position]
except:
break
if is_digit_value(current_command):
temp_number += current_command
pointer_position += 1
else:
break
stack.append(int(temp_number))
if DEBUG:
print("stack >> " + str(stack))
if stack:
if DEBUG: print(" --- fin ---")
while True:
try:
elements[n] = stack[-1]
break
except:
elements.append(None)
return stack[-1]
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', help="Debug mode", action="store_true")
parser.add_argument('-s', '--safe', help="Safe mode", action="store_true")
parser.add_argument('-c', '--cp1252', help="Encode from CP-1252", action="store_true")
parser.add_argument('-t', '--time', help="Time the program", action="store_true")
parser.add_argument("program_path", help="Program path", type=str)
args, num = parser.parse_known_args()
filename = args.program_path
DEBUG = args.debug
SAFE_MODE = args.safe
ENCODE_CP1252 = args.cp1252
TIME_IT = args.time
if ENCODE_CP1252:
code = open(filename, "r", encoding="cp1252").read()
else:
code = open(filename, "r", encoding="utf-8").read()
code = code.replace("T", "10")
code = code.replace("U", "00")
code = code.replace("V", "11")
code = code.replace("W", "000")
code = code.replace("X", "01")
while is_digit_value(code[-1]) or code[-1] == "N":
if code[-1] == "N":
elements.append(None)
else:
elements.append(int(code[-1]))
code = code[:-1]
try:
n_num = int(num[0])
except:
try:
n_num = str(num[0])
selector = range(0, len(n_num))
except:
n_num = 0
if TIME_IT:
import time
start_time = time.time()
print(func_a(n_num))
end_time = time.time()
print()
print("Elapsed: " + str(end_time - start_time) + " seconds")
else:
print(func_a(n_num)) | [
"Adriandmen@users.noreply.github.com"
] | Adriandmen@users.noreply.github.com |
0f024db601cead40864a278ca35e5a1dd113bda3 | 07feb97d13fbb77ccebf42fd5995fbcc0845846f | /CK_CLJK/AZZLGL/test_AZZLGL.py | 64107abdfc052328d97121c9b78dd5b906096973 | [] | no_license | DebugChen/Web-UI | da80be200c6722b189533106dbd9617d6b685266 | df3920e97047b94ddcb948e9a70ccb6aa2b803cf | refs/heads/master | 2023-03-05T07:13:33.358133 | 2021-02-19T09:28:29 | 2021-02-19T09:28:29 | 340,285,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,276 | py | import os
from BeautifulReport import BeautifulReport
from selenium import webdriver
import time
import unittest
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
class MyTestCase_CLJK_AZZLGL(unittest.TestCase):
'''车辆监控平台-安装资料管理模块-测试集'''
# 自定义截图方法
def save_img(self, img_name):
"""
传入一个img_name, 并存储到默认的文件路径下
:param img_name:
:return:
"""
# os.path.abspath(r"G:\Test_Project\img")截图存放路径
self.dr.get_screenshot_as_file('{}/{}.png'.format(os.path.abspath(
r"E:\UIZDH\CK_CLJK\AZZLGL\img"), img_name))
# 启动测试用例方法
@classmethod
def setUpClass(cls):
# 启动Chrome浏览器
cls.dr=webdriver.Chrome()
# 最大化浏览器
cls.dr.maximize_window()
# 输入登录网址
cls.dr.get("https://ebeta.starlinkware.com/")
# 输入用户名
cls.dr.find_element_by_id("username").send_keys("chenkai")
time.sleep(1)
# 输入密码
cls.dr.find_element_by_id("password").send_keys("Xd0020110")
time.sleep(1)
# 输入验证码
cls.dr.find_element_by_id("code").send_keys("1234")
time.sleep(1)
# 点击登录按钮
cls.dr.find_element_by_xpath('//input[@value="登录"]').click()
time.sleep(2)
# 鼠标悬停在“客户资料管理”链接上
link = cls.dr.find_element_by_xpath(
'//div[@class="happy-scroll-content"]/div/ul/li[2]')
ActionChains(cls.dr).move_to_element(link).perform()
time.sleep(2)
# 鼠标悬停在“安装资料管理”链接上
link2 = cls.dr.find_element_by_xpath('/html/body/div[2]/ul/li[3]/ul')
ActionChains(cls.dr).move_to_element(link2).perform()
time.sleep(2)
# 点击安装资料管理菜单
cls.dr.find_element_by_xpath('/html/body/div[2]/ul/li[3]/ul').click()
time.sleep(2)
# 关闭测试用例方法
@classmethod
def tearDownClass(cls):
# # 报错截图
# cls.dr.get_screenshot_as_file(
# 'C:\\Users\starlinkware\PycharmProjects\\CK_CLJK\AZZLGL\image\AZZLGL_Cx.png')
# 关闭浏览器
cls.dr.quit()
# @unittest.skip('跳过用例')
# 装饰器,当你没有报错也要截图的话,那么你需要在用例里面调用save_img('001')方法
@BeautifulReport.add_test_img('test_CLJK_AZZLGL_Cx')
def test_CLJK_AZZLGL_Cx(self):
'''安装资料管理模块-查询测试用例'''
# #按IMEI精确查询
# 输入IMEI号
self.dr.find_element_by_xpath(
'//input[@placeholder="请输入需要搜索IMEI号"]').send_keys('861097041206321')
time.sleep(1)
# 点击查询按钮
self.dr.find_element_by_xpath('//button[@title="搜索"]').click()
time.sleep(3)
# 获取页面标签,保存到变量
div = self.dr.find_element_by_xpath(
'//table[@class="el-table__body"]/tbody/tr[1]/td[2]/div').text
# 判断预期结果与实际结果是否一致
self.assertEqual(div, '861097041206321', '按IMEI查询安装信息失败')
time.sleep(2)
# 点击清除按钮
self.dr.find_element_by_xpath('//button[@title="清空搜索条件"]').click()
time.sleep(3)
# #按IMEI模糊查询
# 输入IMEI号
self.dr.find_element_by_xpath(
'//input[@placeholder="请输入需要搜索IMEI号"]').send_keys('182166')
time.sleep(1)
# 点击查询按钮
self.dr.find_element_by_xpath('//button[@title="搜索"]').click()
time.sleep(3)
# 获取页面标签,保存到变量
div = self.dr.find_element_by_xpath(
'//table[@class="el-table__body"]/tbody/tr[1]/td[2]/div').text
# 判断预期结果与实际结果是否一致
self.assertEqual(div, '808029900182166', '按IMEI查询安装资料失败')
time.sleep(2)
# 点击清除按钮
self.dr.find_element_by_xpath('//button[@title="清空搜索条件"]').click()
time.sleep(3)
# #按安装点精确查询
# 输入安装点
self.dr.find_element_by_xpath(
'//input[@placeholder="请输入需要搜索安装点"]').send_keys('安装测试')
time.sleep(1)
# 点击查询按钮
self.dr.find_element_by_xpath('//button[@title="搜索"]').click()
time.sleep(3)
# 点击查看详情
self.dr.find_element_by_xpath(
'//table[@class="el-table__body"]/tbody/tr[1]/td[6]/div/a').click()
time.sleep(2)
# 获取页面标签,保存到变量
div = self.dr.find_element_by_xpath(
'//*[@id="app"]/div/div[6]/div[3]/div/div[2]/div[1]/div[4]/div[2]/div').text
# 判断预期结果与实际结果是否一致
self.assertEqual(div, '安装测试', '按安装点查询安装资料失败')
time.sleep(2)
# 关闭窗口
self.dr.find_element_by_xpath(
'//div[@aria-label="详情"]/div[2]/div[2]/div/div/button').click()
time.sleep(2)
# 点击清除按钮
self.dr.find_element_by_xpath('//button[@title="清空搜索条件"]').click()
time.sleep(3)
# #按安装点模糊查询
# 输入安装点
self.dr.find_element_by_xpath(
'//input[@placeholder="请输入需要搜索安装点"]').send_keys('测试')
time.sleep(1)
# 点击查询按钮
self.dr.find_element_by_xpath('//button[@title="搜索"]').click()
time.sleep(4)
# 点击查看详情
self.dr.find_element_by_xpath(
'//table[@class="el-table__body"]/tbody/tr[1]/td[6]/div/a').click()
time.sleep(2)
# 获取页面标签,保存到变量
div = self.dr.find_element_by_xpath(
'//*[@id="app"]/div/div[6]/div[3]/div/div[2]/div[1]/div[4]/div[2]/div').text
# 判断预期结果与实际结果是否一致
self.assertEqual(div, '安装测试', '按安装点查询安装资料失败')
time.sleep(2)
# 关闭窗口
self.dr.find_element_by_xpath(
'//div[@aria-label="详情"]/div[2]/div[2]/div/div/button').click()
time.sleep(2)
# 点击清除按钮
self.dr.find_element_by_xpath('//button[@title="清空搜索条件"]').click()
time.sleep(3)
# #按安装时间查询
# 点击安装时间按钮
self.dr.find_element_by_xpath('//input[@placeholder="选择日期"]').click()
time.sleep(1)
# 点击月份
self.dr.find_element_by_xpath('//div[@class="el-date-picker__header"]/span[2]').click()
time.sleep(2)
# 选中六月
self.dr.find_element_by_xpath(
'//table[@class="el-month-table"]/tbody/tr[2]/td[2]/div/a').click()
time.sleep(2)
# 选中十七日
self.dr.find_element_by_xpath(
'//table[@class="el-date-table"]/tbody/tr[4]/td[4]/div/span').click()
time.sleep(2)
# 点击查询按钮
self.dr.find_element_by_xpath('//button[@title="搜索"]').click()
time.sleep(4)
# # 获取页面标签,保存到变量
# div = self.dr.find_element_by_xpath(
# '//table[@class="el-table__body"]/tbody/tr[1]/td[3]/div').text
# # 判断预期结果与实际结果是否一致
# self.assertEqual(div, '2020-06-17 10:33:02', '按安装时间查询安装资料失败')
# time.sleep(2)
# 点击清除按钮
self.dr.find_element_by_xpath('//button[@title="清空搜索条件"]').click()
time.sleep(3)
@unittest.skip('跳过用例')
# 装饰器,当你没有报错也要截图的话,那么你需要在用例里面调用save_img('001')方法
@BeautifulReport.add_test_img('test_CLJK_AZZLGL_Fy')
def test_CLJK_AZZLGL_Fy(self):
'''安装资料管理模块-分页显示测试用例'''
# #按50条/页
# 点击分页显示
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').click()
time.sleep(2)
# 下一页
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.DOWN)
time.sleep(2)
# 下一页
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.DOWN)
time.sleep(2)
# 回撤(按50条分页显示)
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.ENTER)
time.sleep(5)
# #按200条/页
# 点击分页显示
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').click()
time.sleep(2)
# 下一页
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.DOWN)
time.sleep(2)
# 回撤(按200条分页显示)
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.ENTER)
time.sleep(5)
# #按500条/页
# 点击分页显示
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').click()
time.sleep(2)
# 下一页
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.DOWN)
time.sleep(2)
# 回撤(按500条分页显示)
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.ENTER)
time.sleep(7)
# #按20条/页
# 点击分页显示
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').click()
time.sleep(2)
# 下一页
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.DOWN)
time.sleep(2)
# 回撤(按20条分页显示)
self.dr.find_element_by_xpath('//input[@placeholder="请选择"]').send_keys(Keys.ENTER)
time.sleep(3)
# #上下页切换操作
# 点击下一页
self.dr.find_element_by_xpath('//div[@class="pagingChild"]/div/div/button[2]').click()
time.sleep(3)
# 点击上一页
self.dr.find_element_by_xpath('//div[@class="pagingChild"]/div/div/button[1]').click()
time.sleep(3)
# 点击任意一页(5页)
self.dr.find_element_by_xpath('//div[@class="pagingChild"]/div/div/ul/li[5]').click()
time.sleep(3)
# 获取到输入页码框,并清理掉当前页码号
self.dr.find_element_by_xpath(
'//div[@class="pagingChild"]/div/div/span/div/input').send_keys(Keys.BACK_SPACE)
time.sleep(2)
# 获取到输入页码框,并输入页码号“8”
self.dr.find_element_by_xpath(
'//div[@class="pagingChild"]/div/div/span/div/input').send_keys('8')
time.sleep(2)
# 点击空白处,切换到当前页码
self.dr.find_element_by_xpath('//div[@class="el-pagination is-background"]').click()
time.sleep(3)
# 点击清除按钮
self.dr.find_element_by_xpath('//button[@title="清空搜索条件"]').click()
time.sleep(3)
# @unittest.skip('跳过用例')
# 装饰器,当你没有报错也要截图的话,那么你需要在用例里面调用save_img('001')方法
@BeautifulReport.add_test_img('test_CLJK_AZZLGL_Ckxq')
def test_CLJK_AZZLGL_Ckxq(self):
'''安装资料管理模块-查看详情测试用例'''
# 查看详情
self.dr.find_element_by_link_text('查看详情').click()
time.sleep(2)
# 获取页面标签,保存到变量
div = self.dr.find_element_by_xpath(
'//*[@id="app"]/div/div[6]/div[3]/div/div[1]/span').text
# 判断预期结果与实际结果是否一致
self.assertEqual(div, '详情', '查看详情失败')
time.sleep(2)
# 关闭窗口
self.dr.find_element_by_xpath(
'//div[@aria-label="详情"]/div[2]/div[2]/div/div/button').click()
time.sleep(2)
if __name__=='__main__':
# unittest.main()
# 组装测试套件
testunit = unittest.TestSuite()
# 添加测试用例
testunit.addTest(MyTestCase_CLJK_AZZLGL('test_CLJK_AZZLGL_Cx'))
testunit.addTest(MyTestCase_CLJK_AZZLGL('test_CLJK_AZZLGL_Fy'))
testunit.addTest(MyTestCase_CLJK_AZZLGL('test_CLJK_AZZLGL_Ckxq'))
# 定义测试报告
runner = BeautifulReport(testunit)
runner.report(filename='车辆监控平台-安装资料管理模块-测试报告',
description='车辆监控平台-设备资料管理模块-测试用例执行情况',
report_dir='report',
theme='theme_default')
# # 定义测试报告存放路径
# # fp = open('../KHXXGL/image1/result2.html', 'wb')
# # 定义测试报告
# # runner = HTMLTestRunner(stream=fp, title='车辆监控平台UI自动化测试报告', description='客户信息管理模块测试用例执行情况')
# # 执行测试用例
# runner = unittest.TextTestRunner()
# runner.run(testunit)
| [
"usernameXXX@gmail.com"
] | usernameXXX@gmail.com |
14efb8cee258c21c5bcdc133aee730b69ced1881 | e9e8dc70f8d31b954719b278d2b8946cef0292ec | /Practice/Python/set_union.py | ec1d67815113552ded6f4d26da342a3969ed744e | [] | no_license | AriyanStClair/Hackerrank-Solutions | b280c328e981c0d873859cb5ad65c0725b631733 | 80547f52477b5db1d66d1922c9fa2bdc28ca0544 | refs/heads/master | 2021-06-21T06:35:28.163651 | 2021-03-16T20:28:05 | 2021-03-16T20:28:05 | 193,166,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # The students of District College have subscriptions to English and French newspapers. Some students have subscribed only to English, some have subscribed to only French and some have subscribed to both newspapers.
#You are given two sets of student roll numbers. One set has subscribed to the English newspaper,
# and the other set is subscribed to the French newspaper.
# The same student could be in both sets. Your task is to find the total number of students who have subscribed to at least one newspaper.
# Get input
e = int(input()) # number of students that subscribed to english paper
english = set(map(int,input().split())) # roll numbers of english subscribers
f = int(input()) # number of students that subscribed to french paper
french = set(map(int,input().split())) # roll numbers of french subscribers
union = len(english.union(french)) # number of students in total
print(union)
| [
"noreply@github.com"
] | noreply@github.com |
b1fb1eba5ab3fd2d6e6b1b318459347f3f6d49d6 | a3c4d13e0ce264f86632733a7e67d047e49f1e01 | /uri.py | f33c4b5b56f8e4220386d74501b5bd649a3ea3a5 | [] | no_license | pmbhumkar/cowin-pushbullet | dd5ac46af92f589b1fe4dfd805d4813e55e6f4de | 92043652ecf99bdf737596c1d3ec556cf7388c54 | refs/heads/master | 2023-04-25T11:07:50.160077 | 2021-05-06T12:09:02 | 2021-05-06T12:09:02 | 363,893,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | class URI(object):
calendar_by_pin = "/v2/appointment/sessions/public/calendarByPin" | [
"Pravin.Bhumkar@emc.com"
] | Pravin.Bhumkar@emc.com |
00bda78c7c91a7658095ea1e56b4c40bc19a33c3 | d491d39a96974fe4f56a62f23506ecb60e6c3c98 | /utils/visual.py | 3dc0fad5102a956158debfc07e13f1f5fd1ba5b8 | [] | no_license | innerlee/ganbase | a4689e9cd8f12cf6b6dd573d00f3ad18654e078a | 77b640a15512f6cc31da673e46943c5678a02779 | refs/heads/master | 2021-05-02T05:44:52.452277 | 2019-06-20T09:22:43 | 2019-06-20T09:22:43 | 120,847,644 | 2 | 2 | null | 2019-06-23T05:16:37 | 2018-02-09T02:47:55 | Python | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
import numpy as np
def show_visdom_line(vis, inputs, legend, win=1, title='loss over time', xlabel='minibatch', ylabel='loss'):
if isinstance(inputs, list):
inputs = np.array(inputs)
y_axis = inputs
x_axis = np.arange(y_axis.shape[0])
vis.line(y_axis, x_axis, win=win, opts={
'title': title,
'legend': legend,
'xlabel': xlabel,
'ylabel': ylabel
})
| [
"xinhang.leng@gmail.com"
] | xinhang.leng@gmail.com |
4b1c156a5fbd8b1083a31472220fdd8c0b7d4e3a | cc6e1cce2f0d7fa8eb16f2dc3e90d60575aeac66 | /uploader/models.py | 1671ef86d98332e6ced4177a5d9084b8f038ada0 | [] | no_license | andysitu/p_site | 84bd0fa600593a91ea9f67ca9460e0fa4b633049 | 257386bdf792ea867dbbd9905c7245695ab55a6b | refs/heads/master | 2023-06-21T16:30:21.423414 | 2019-06-26T19:21:56 | 2019-06-26T19:21:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | from django.db import models
import django, os
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
from django.conf import settings
from django.contrib.auth.models import User
class UFileManager(models.Model):
name = models.CharField(max_length = 50)
count = models.IntegerField(default=0)
class UFile(models.Model):
filename = models.CharField(max_length=50)
uploaded_date = models.DateTimeField(default=django.utils.timezone.now)
file_manager = models.ForeignKey(UFileManager, on_delete=models.CASCADE)
file_extensions = models.CharField(max_length=10, default=".txt")
def __str__(self):
return self.filename
def get_filepath(self):
folder_name = str(self.file_manager.id)
filepath = os.path.join(settings.MEDIA_ROOT, "uploader", folder_name, str(self.id) + self.file_extensions)
return filepath
@receiver(pre_delete, sender=User)
def delete_file(sender, instance, using, **kwargs):
print("HI")
try:
filepath = instance.get_filepath()
os.remove(filepath)
console.log("removed file")
except FileNotFoundError:
pass
class Note(models.Model):
text = models.TextField(max_length=200)
file_manager = models.ForeignKey(UFileManager, on_delete=models.CASCADE) | [
"and.situ@gmail.com"
] | and.situ@gmail.com |
4c3ab23c18f9d4491755f6abf41148a2ed42fc82 | c4702d1a06640555829b367852138cc93ba4a161 | /dym_bank_trf_request/wizard/bank_trf_advice_group_old.py | 6ef06d317a4c2cef785790f379608629ac9eeabb | [] | no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class BankTrfRequestGroup(osv.osv_memory):
_name = "bank.trf.request.group"
_description = "Bank Transfer Request Grup"
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
"""
Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context={}
res = super(BankTrfRequestGroup, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if context.get('active_model','') == 'bank.trf.request' and len(context['active_ids']) < 2:
raise osv.except_osv(_('Warning!'),
_('Please select multiple order to merge in the list view.'))
return res
def merge_trf_requests(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: purchase order view
"""
trf_req_obj = self.pool.get('bank.trf.request')
# proc_obj = self.pool.get('procurement.order')
mod_obj =self.pool.get('ir.model.data')
if context is None:
context = {}
result = mod_obj._get_id(cr, uid, 'dym_bank_trf_request', 'bank_trf_request_search_view')
id = mod_obj.read(cr, uid, result, ['res_id'])
# allorders = trf_req_obj.do_merge(cr, uid, context.get('active_ids',[]), context)
allorders = []
return {
'domain': "[('id','in', [" + ','.join(map(str, allorders.keys())) + "])]",
'name': _('Bank Transfer Request Group'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'bank.trf.request',
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': id['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"rizal@portcities.net"
] | rizal@portcities.net |
2df4630185cbefd14c9b15aeea7aa3af7e99f6be | 4783d07c08dacbe5903b3ff11d0c8ae6a9a92321 | /api/serializers.py | 1ad36e194589eb745591757e559c337ed92b3e44 | [] | no_license | vollcheck/brumbrum | 577a9aa6ed064f9825c2ffdab54919bb0ee5dec7 | 69fcd01a796eabe68dd93314f457467f6f1d995e | refs/heads/master | 2023-04-22T16:08:37.201808 | 2021-04-11T22:21:28 | 2021-04-11T22:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | import json
import socket
from urllib.request import urlopen
from rest_framework import serializers
from api.models import Car, Rating
from api.validators import validate_if_car_exists, validate_new_car
VEHICLES_API = "https://vpic.nhtsa.dot.gov/api/vehicles/getmodelsformake/{}?format=json"
TIMEOUT = 5
# Review note:
# whole serializer could be inherited from `CarCreateSerializer`
# but I wanted to be sure with the Meta.fields order.
class CarCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = ["make", "model"]
@staticmethod
def get_api_data(url: str) -> dict:
try:
response = urlopen(url, timeout=TIMEOUT)
except socket.timeout as e:
raise serializers.ValidationError(e)
if response.code != 200:
raise serializers.ValidationError("Cannot connect to API.")
return json.load(response)
def validate(self, attrs):
make = attrs.get("make").lower()
model = attrs.get("model").lower()
url = VEHICLES_API.format(make)
api_data = self.get_api_data(url)
validate_if_car_exists(api_data, make, model)
validate_new_car(make, model)
return {"make": make, "model": model}
class CarListSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = ["id", "make", "model", "avg_rating"]
class CarPopularListSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = ["id", "make", "model", "rates_number"]
class RateSerializer(serializers.ModelSerializer):
car_id = serializers.PrimaryKeyRelatedField(queryset=Car.objects.all())
rating = serializers.IntegerField(min_value=1, max_value=5)
class Meta:
model = Rating
fields = ["car_id", "rating"]
| [
"j3k.walczak@gmail.com"
] | j3k.walczak@gmail.com |
a0006eed1e6a9da936a954b9db707f84ed753258 | 9bebf632a32b6d623f7141625e1732ce3951d4ab | /program_SSDG_2d_modify_tsne/tsne_val_hallu.py | b8bc3e70b0c522c849830815fc055401027bea22 | [] | no_license | r08922128/Face-Anti-Spoofing | 110a195c60bed92fe333cb65e98eaba6a0eb1ebe | b733105f95a48329d431da9e7f76c4ee4c4f3d64 | refs/heads/main | 2023-05-09T16:12:07.028694 | 2021-05-17T08:21:51 | 2021-05-17T08:21:51 | 368,108,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,114 | py | import os
import os.path
from os.path import join
import argparse
import torch
import torch.nn as nn
from torch import optim
from dataloader import RealFakeDataloader, SessionDataloader, AllDataloader
from torch.utils.data import DataLoader
#from model_1 import Extractor, Classifier, Discriminator
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
import sys, csv
import numpy as np
from utils import cal_accuracy, write_csv, cal_AUC
from hard_triplet_loss import HardTripletLoss
from DGFAS import *
from config import config
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type = str)
parser.add_argument('--data', type = str,default=None)
parser.add_argument('--load', type = int, default = 1)
parser.add_argument('--epochs', type = int, default = 30)
parser.add_argument('--lr_step', type = int, default = 10)
parser.add_argument('--h_degree', type = int, default = 1)
parser.add_argument('--train_data_path', type = str, default = '../oulu_npu_cropped/train')
parser.add_argument('--valid_data_path', type = str, default = '../oulu_npu_cropped/val')
#parser.add_argument('--test_data_path', type = str, default = '../siw_test')
return parser.parse_args()
def get_Lambda(index, start_step, total_step):
p = float(index + start_step) / total_step
lambda_term = 2. / (1. + np.exp(-10 * p)) - 1
return lambda_term
def prepare_noise_features_labels(args, real_noise_features, fake_noise_features, fake_session_labels):
features_len = real_noise_features.shape[-1]
real_labels = torch.tensor([[0]] * real_noise_features.shape[0] * args.h_degree).cuda()
fake_session_labels = (fake_session_labels + 1).unsqueeze(-1).expand(-1, args.h_degree).reshape(-1, 1)
real_noise_features = real_noise_features.view(-1, features_len)
fake_noise_features = fake_noise_features.view(-1, features_len)
total_labels = torch.cat((real_labels, fake_session_labels), dim = 0)
total_features = torch.cat((real_noise_features, fake_noise_features), dim = 0)
return total_features, total_labels
def prepare_features_labels(real_features, fake_features, fake_session_labels):
real_labels = torch.tensor([[0]] * real_features.shape[0]).cuda()
fake_session_labels = fake_session_labels + 1
total_labels = torch.cat((real_labels, fake_session_labels.unsqueeze(-1)), dim = 0)
total_features = torch.cat((real_features, fake_features), dim = 0)
total_labels=total_labels.squeeze(-1)
return total_features, total_labels
def one_dim_expand(label,h_degree):
batch_size=label.size(0)
label=label.unsqueeze(1)
label=label.expand(-1,h_degree+1)
label=label.reshape(-1)
return label
def two_dim_expand(feature,h_degree):
batch_size=feature.size(0)
feature_size=feature.size(1)
feature=feature.unsqueeze(2)
feature=feature.expand(-1,-1,h_degree+1)
feature=feature.reshape(-1,feature_size)
return feature
def train(args, device):
torch.multiprocessing.freeze_support()
train_real_dataloader = RealFakeDataloader(args.mode, args.train_data_path, 0)
train_real_data = DataLoader(train_real_dataloader, batch_size = 16, num_workers = 4, shuffle = True)
train_fake_dataloader = RealFakeDataloader(args.mode, args.train_data_path, 1)
train_fake_data = DataLoader(train_fake_dataloader, batch_size = 16, num_workers = 4, shuffle = True)
train_data_len = min(len(train_real_data), len(train_fake_data))
valid_dataloader = AllDataloader('valid', args.valid_data_path)
valid_data = DataLoader(valid_dataloader, batch_size = 11, num_workers = 0, shuffle = False)
valid_data_len = len(valid_data)
print('loading model...')
ENet = DG_model('resnet18',args)
total_params = sum(p.numel() for p in ENet.parameters() if p.requires_grad)
ENet.cuda().float()
# print(ENet)
# print("Total number of params = ", total_params)
# print()
# CNet = Classifier()
# total_params = sum(p.numel() for p in CNet.parameters() if p.requires_grad)
# CNet.cuda().float()
# print(CNet)
# print("Total number of params = ", total_params)
# print()
DNet = Discriminator()
total_params = sum(p.numel() for p in DNet.parameters() if p.requires_grad)
DNet.cuda().float()
# print(DNet)
# print("Total number of params = ", total_params)
# print()
save_path = './models/'
os.makedirs(save_path, exist_ok = True)
ENet.load_state_dict(torch.load(join(save_path, 'E_SSDG_new_29.pth')))
DNet.load_state_dict(torch.load(join(save_path, 'D_SSDG_new_29.pth')))
optimizer_SGD = optim.SGD(list(list(ENet.parameters()) + list(DNet.parameters())), lr = 1e-3, weight_decay = 0.012, momentum = 0.9)
scheduler_SGD = optim.lr_scheduler.StepLR(optimizer_SGD, step_size = 10, gamma = 0.1)
# optimizer_Adam = optim.Adam(list(list(ENet.parameters()) + list(CNet.parameters()) + list(DNet.parameters())), lr = 1e-3, betas = (0.5, 0.9))
CELoss = nn.CrossEntropyLoss()
CELoss.cuda()
TripletLoss = HardTripletLoss()
TripletLoss.cuda()
MSELoss = nn.MSELoss()
MSELoss.cuda()
best_loss = 100.0
with torch.no_grad():
ENet.eval()
#CNet.eval()
DNet.eval()
total_classification_loss = total_domain_loss = 0
pred_list, label_list, folder_name_list = [], [], []
total_DNet_output_session0=0.0
total_DNet_output_session1=0.0
all_features=[]
all_realfake_label=[]
all_session_label=[]
for index, (images, labels, sessions, folder_name) in enumerate(tqdm(valid_data, ncols = 70, desc = 'Validate')):
if index>100:
break
images, labels, sessions = images.to(device), labels.to(device), sessions.to(device)
aug_predict, aug_features, aug_features_no_norm, clean_predict, clean_features, clean_features_no_norm= ENet(images, True)
real_discrimination = DNet(clean_features, 1)
labels=one_dim_expand(labels,args.h_degree)
sessions=one_dim_expand(sessions,args.h_degree)
all_features.append(aug_features)
all_realfake_label.append(labels)
all_session_label.append(sessions)
# total_DNet_output_session0+=(torch.argmax(real_discrimination,dim=1)==0).sum().item()
# total_DNet_output_session1+=(torch.argmax(real_discrimination,dim=1)==1).sum().item()
# total_classification_loss += CELoss(clean_predict, labels)
# total_domain_loss += CELoss(real_discrimination, sessions)
pred_list.append(clean_predict[0].view(1,-1))
label_list.append(labels[0].view(1,-1))
folder_name_list.append(folder_name[0])
# avg_classification_loss = total_classification_loss / valid_data_len
# avg_domain_loss = total_domain_loss / valid_data_len
# print('[Valid]\navg_classification_loss: {:.5f} avg_domain_loss: {:.5f} session_0: {:.1f} session_1: {:.1f}'.format(avg_classification_loss, avg_domain_loss,total_DNet_output_session0,total_DNet_output_session1))
# print()
##tsne
all_features=torch.cat(all_features)
all_realfake_label=torch.cat(all_realfake_label)
all_session_label=torch.cat(all_session_label)
#plot 2d
realfake_session_label=[]
for i in range(all_realfake_label.size(0)):
if all_realfake_label[i].item()==0 and all_session_label[i].item()==0:
realfake_session_label.append(0)
elif all_realfake_label[i].item()==0 and all_session_label[i].item()==1:
realfake_session_label.append(1)
elif all_realfake_label[i].item()==1 and all_session_label[i].item()==0:
realfake_session_label.append(2)
else:
realfake_session_label.append(3)
label=realfake_session_label
all_features=all_features.detach().cpu()
tsne = TSNE(n_components=2, init='random', random_state=5, verbose=1)
X_tsne = tsne.fit_transform(all_features)
print(X_tsne.shape)
data=X_tsne
# x_min, x_max = np.min(data,axis=0), np.max(data,axis=0)
# data = (data- x_min) / (x_max - x_min)
x=data[:,0]
y=data[:,1]
ax = plt.figure().add_subplot(111)
#nsamples=50
#colors={0:'b',1:'r',2:'r',3:'c',4:'m',5:'y',6:'k'}
#c=[colors[i] for i in np.round(np.random.uniform(0,6,nsamples),0)]
c=['#52C0D4','#F6C75C','#1D99A6','#DDA230']
for i in range(data.shape[0]):
if i%(args.h_degree+1)==0:
temp=ax.scatter(x[i],y[i],color=c[int(label[i])], marker='.')
else:
temp=ax.scatter(x[i],y[i],color=c[int(label[i])], marker='x')
if int(label[i])==0:
p0=temp
elif int(label[i])==1:
p1=temp
elif int(label[i])==2:
p2=temp
else:
p3=temp
legend=plt.legend([p0,p1,p2,p3],['real session1','real session2','fake session1','fake session2'],loc='upper right',scatterpoints=1)
plt.show()
if __name__ == '__main__':
args = _parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train(args, device) | [
"r08922128@csie.ntu.edu.tw"
] | r08922128@csie.ntu.edu.tw |
ca3322bacd4039bef234187f11f7e7988ff9e0ca | 7a88534aea49d86bb94c1aef0c59e90f8ef08577 | /AttendanceGUI.py | 3d8348c1f7f7d995ee4e8f5f491bc1d6a5f08e9b | [] | no_license | Mohamed-Shaaban/odu-attendance-pi | 333de1f607027cec904363c7ea24c1e8ba7b50df | 3b6121f7909b0cd56056a6dc679cd2a2fb548203 | refs/heads/master | 2020-06-09T03:54:55.933628 | 2019-06-23T15:38:41 | 2019-06-23T15:38:41 | 193,365,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,374 | py | import json
import time
import pyqrcode
import requests
import logging
import logging.handlers
import os
from tkinter import *
from configapi import get_config_info as configs
from PIL import ImageTk, Image
from AnimatedGIF import AnimatedGIF
class Window(Tk):
cardCode = ''
theToken = ''
cancel_after_id = ''
dir_path = os.path.dirname(os.path.realpath(__file__))
def __init__(self):
Tk.__init__(self)
self.confs = configs()
self.logger = logging.getLogger('Attendace GUI')
self.logger.setLevel(logging.DEBUG)
# create a file handler
handler = logging.handlers.RotatingFileHandler(
self.dir_path + '/log/attendance_gui.log',
mode='a',
maxBytes=10 * 1024 * 1024,
backupCount=5
)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
self.title("Old Dominion Attendance Application")
self.state = True
self.attributes("-fullscreen", True)
self.bind("<F11>", self.toggle_fullscreen)
self.bind("<Escape>", self.end_fullscreen)
def createMyView(self, token, title, projector_view=False):
self.theToken = token
if token:
if projector_view:
self.geometry('{}x{}'.format(1820, 980))
self.createProjectorView(token, title)
else:
self.geometry('{}x{}'.format(800, 500))
self.createReaderView(token, title)
else:
self.geometry('{}x{}'.format(1820, 980)) if projector_view else self.geometry('{}x{}'.format(800, 500))
self.createNotActiveSesstionView()
def createProjectorView(self, token, title):
# Generate the Code based on the session token
Window.createQRCode(token)
qr_code_frame = Frame(
self, bg='white',
highlightbackground="green",
highlightcolor="red",
highlightthickness=0,
bd=0
)
label_frame = Frame(
self, bg='white',
highlightbackground="gray",
highlightcolor="red",
highlightthickness=1,
bd=0
)
qr_code_frame.pack(anchor=N, fill=BOTH, expand=True, side=LEFT)
label_frame.pack(anchor=S, fill=BOTH, expand=True, side=LEFT)
Qrimage = self.createImage('/img/code.png', (1000, 1000))
qr_panel = Label(qr_code_frame, image=Qrimage, borderwidth=0)
qr_panel.pack(side=LEFT)
# panel.grid(row=0, column=1, sticky="nw")
qr_panel.image = Qrimage
title_label = Label(label_frame, text=title)
title_label.place(x=160, y=60, anchor="center")
title_label.config(width=600, font=("Arial", 18), bg="white", pady=25)
title_label.pack(side=TOP)
def createReaderView(self, token, title):
# Generate the Code based on the session token
Window.createQRCode(token)
# Main containers
left = Frame(self, bg='white', highlightthickness=0)
top_right = Frame(self, bg='white', highlightthickness=0)
btm_right = Frame(self, bg='white', highlightthickness=0)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
# old frame setting
for r in range(6):
self.grid_rowconfigure(r, weight=1)
for c in range(6):
self.grid_columnconfigure(c, weight=1)
left.grid(row=0, column=0, rowspan=6, columnspan=3, sticky=W + E + N + S)
top_right.grid(row=0, column=3, rowspan=2, columnspan=3, padx=0, sticky=W + E + N + S)
btm_right.grid(row=2, column=3, rowspan=4, columnspan=3, sticky=W + E + N + S)
self.label1 = Label(top_right, text=title)
self.label1.place(x=200, y=65, anchor="center")
self.label1.config(width=800, font=("Arial", 12), bg="white", padx=35)
# reading the card
self.label = Label(btm_right, text="Please Swipe your Card")
self.label.place(x=200, y=165, anchor="center")
self.label.config(width=400, font=("Arial", 12), bg="white", padx=0)
self.canvas = Canvas(btm_right, width=250, height=210, highlightthickn=0)
self.canvas.configure(background='white')
self.canvas.place(x=165, y=45, anchor="center")
self.present_img = self.createImage('/img/present.png', size=(150, 150))
self.absent_img = self.createImage('/img/absent.png', size=(130, 130))
self.invalid_img = self.createImage('/img/invalid.png', size=(150, 150))
self.late_img = self.createImage('/img/late.png', size=(150, 150))
# self.loading_gif = AnimatedGIF(self.canvas, self.dir_path + '/img/loading.gif')
self.status_item = self.canvas.create_image(155, 125, image=self.present_img, state=HIDDEN)
# self.loading = self.canvas.create_window(155, 125, window=self.loading_gif, state=HIDDEN)
self.bind('<Key>', self.get_key)
# insert QR code
qr_img = self.createImage('/img/code.png', size=(450, 450))
panel = Label(left, image=qr_img, borderwidth=0)
panel.place(x=200, y=200, anchor="center")
panel.image = qr_img
odu_logo = self.createImage(file_path='/img/odu-logo.gif', size=(150, 75))
panel = Label(btm_right, image=odu_logo, borderwidth=0)
panel.place(x=200, y=245, anchor="center")
panel.config(highlightthickness=0)
panel.image = odu_logo
top_right.grid_rowconfigure(0, weight=1)
top_right.grid_columnconfigure(1, weight=1)
def createImage(self, file_path, size):
image = Image.open(self.dir_path + file_path)
image = image.resize(size, Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
return image
def createNotActiveSesstionView(self):
main = Frame(
self, bg='white',
highlightbackground="green",
highlightcolor="green",
highlightthickness=2,
bd=0
)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
main.pack(anchor=N, fill=BOTH, expand=True, side=LEFT)
oduLogo = Image.open(self.dir_path + '/img/odu.jpg')
basewidth = 400
wpercent = (basewidth / float(oduLogo.size[0]))
hsize = int((float(oduLogo.size[1]) * float(wpercent)))
oduLogo = oduLogo.resize((basewidth, hsize), Image.ANTIALIAS)
oduLogo = ImageTk.PhotoImage(oduLogo)
panel = Label(main, image=oduLogo, borderwidth=0)
panel.image = oduLogo
panel.pack(anchor=CENTER)
label = Label(main, text="No Active Session", borderwidth=1, relief="solid")
label.config(width=100, font=("Arial", 60), bg="white")
label.pack(anchor=CENTER)
def get_key(self, event):
if not self.theToken:
return None
if event.char in '0123456789;=':
self.cardCode += event.char
elif event.char == '?':
self.cardCode += event.char
headers = {'Authorization': self.confs['OAuthToken'], 'Content-type': 'application/json'}
# if the card code is not proper format
if len(self.cardCode) != 16:
if len(self.cardCode) > 16:
codes = set(self.cardCode.split(";"))
for code in codes:
if len(code) != 15:
continue
code = ";" + code
self.submit_code(code, headers)
return None
else:
self.logger.error('Error reading card: ' + self.cardCode)
self.label.after_cancel(self.cancel_after_id)
self.clear_label()
self.label['text'] = "Error, Please try again!"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.invalid_img)
self.cancel_after_id = self.label.after(1000, self.clear_label)
return None
self.submit_code(self.cardCode, headers)
return None
def submit_code(self, code, headers):
self.label.after_cancel(self.cancel_after_id)
# self.loading_gif.start_animation()
# self.canvas.itemconfigure(self.loading, state=NORMAL)
current_time = int(round(time.time() * 1000))
postData = {
"timestamp": current_time,
"token": self.theToken,
"identifier": {
"type": "SWIPE",
"identifier": code
}
}
jsonData = json.dumps(postData)
try:
response = requests.post(self.confs['submitAttendanceAPI'], data=jsonData, headers=headers)
except Exception as e:
self.logger.error("Error, was not able to post the request to ESB! ")
self.clear_label()
self.label['text'] = "Error Submitting, Please try again!"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.invalid_img)
self.cancel_after_id = self.label.after(1000, self.clear_label)
return
after_scan = int(round(time.time() * 1000))
if response.status_code == 200:
result = json.loads(response.content.decode('utf-8'))
response_code = result.get('response', 'unknown')
self.logger.info('Attendance was submitted successfully for card number: ' + code
+ ' -- submission time: ' + str(after_scan - current_time) + ' (ms) '
+ ' -- result code: ' + str(response_code))
self.clear_label()
if response_code == 1:
self.label['text'] = "Marked Present"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.present_img)
elif response_code == 2:
self.label['text'] = "Marked Absent"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.absent_img)
elif response_code == 3:
self.label['text'] = "Marked Tardy"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.late_img)
elif response_code == 99:
self.label['text'] = "Invalid, Please try again later"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.invalid_img)
else:
self.label['text'] = "Invalid, Unknown Error!"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.invalid_img)
self.cancel_after_id = self.label.after(1000, self.clear_label)
else:
self.logger.error('Error submitting card info: ' + code
+ ' -- Error code: ' + str(response.status_code)
+ ' -- submission time: ' + str(after_scan - current_time) + ' (ms) ')
self.clear_label()
self.label['text'] = "Error Submitting, Please try again!"
self.canvas.itemconfigure(self.status_item, state=NORMAL, image=self.invalid_img)
self.cancel_after_id = self.label.after(1000, self.clear_label)
# raise Exception
def clear_label(self):
# self.canvas.itemconfigure(self.loading, state=HIDDEN)
# self.loading_gif.stop_animation()
self.label['text'] = "Please Swipe your Card"
self.cardCode = ""
self.canvas.itemconfigure(self.status_item, state=HIDDEN)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.attributes("-fullscreen", False)
return "break"
@staticmethod
def createQRCode(qr_token):
qrCode = pyqrcode.create(qr_token, error='L')
qrCode.png(Window.dir_path + '/img/code.png', scale=6, module_color=(0, 0, 0, 128),
background=(0xff, 0xff, 0xff))
| [
"mohamedshaaban@Mohameds-MacBook-Pro.local"
] | mohamedshaaban@Mohameds-MacBook-Pro.local |
c8c29d73cf53a8c88def0aa8dd6b24fe9e953cd5 | 0e1b725df08a428d9ce1b1bafbc05753fcc24962 | /business/register_business.py | e4a79121072f8040a26d55756fca64601ad7c420 | [
"Apache-2.0"
] | permissive | Hanlen520/XKAppium | e52a5acd08a6fce1f77008e32a317e917f93eec9 | 400736df8fdb6a49f8ed1594855344f557beca3e | refs/heads/main | 2023-03-18T21:40:26.324135 | 2021-03-11T05:52:24 | 2021-03-11T05:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
# @Author : xiaoke
# @Email : 976249817@qq.com
from appium import webdriver
from time import sleep
from base.base_driver import BaseDriver
class RegisterBusiness:
def __init__(self, i):
base_driver = BaseDriver()
self.driver = base_driver.get_driver(i)
# 点击已有账号,去登录
def go_login(self):
self.driver.find_element_by_id("cn.com.open.mooc:id/tv_go_login").click()
| [
"noreply@github.com"
] | noreply@github.com |
278010849f6c888e86cd9237c60ee0f61c668fd9 | 9bd687b5454ca7d2b4deb0e149ec7023b2f3b89e | /ebikes/lora/rfm/ll/__init__.py | 3ca2c4bfa45a8a729a51f15a13480468039889bb | [] | no_license | AlbertoFDR/EBikes-IoT | 57132ff8b059b6d2e5185e241afe7720f96b667f | cd5da02d96ccedb57a9fd3e76d4430a11fd4f4fd | refs/heads/master | 2022-12-10T14:02:22.468032 | 2020-02-11T12:37:59 | 2020-02-11T12:37:59 | 225,611,209 | 3 | 0 | null | 2021-06-02T00:45:47 | 2019-12-03T12:11:18 | Python | UTF-8 | Python | false | false | 760 | py | """
LinkLayer submodule
"""
__author__ = """Alexander Krause <alexander.krause@ed-solutions.de>"""
__date__ = "2016-12-28"
__version__ = "0.1.0"
__license__ = "GPL"
class Prototype:
conf = None
PL = None
def __init__(self, cfg, pl):
self.conf = cfg
self.PL = pl
self.PL.setIRQH(self._handleIRQ)
self.postInit()
def _handleIRQ(self):
pass
def get(conf, pl=None):
"""
get a new LinkLayer instance, depending on config
if a PhysicalLayer is given, it's added to the LinkLayer
"""
if conf["type"] in ["rfm9x", "rfm95", "rfm96", "rfm97", "rfm98"]:
from .ll_rfm9x import LinkLayer
else:
print("unsupported type")
return None
return LinkLayer(conf, pl)
| [
"aratzml@opendeusto.es"
] | aratzml@opendeusto.es |
82af1793259868f2a9d4753be5804861e1781ff7 | 54a7231da06c45aa1c93283520648de8f2769e23 | /hubblestack/files/hubblestack_nova/vulners_scanner.py | 5f98962ea6a93275a47f8de3d80a39dbaae4e71d | [
"Apache-2.0"
] | permissive | cedwards/hubble-1 | 4fe80fdf855bb563f7129cd6d3dda3d3a77c1dac | e3459a9fb8424ce9774825653911c3344f834630 | refs/heads/develop | 2021-01-01T18:31:30.917445 | 2017-07-25T21:17:44 | 2017-07-25T21:17:44 | 98,355,780 | 1 | 1 | null | 2017-07-25T22:37:44 | 2017-07-25T22:37:44 | null | UTF-8 | Python | false | false | 4,549 | py | '''
HubbleStack Nova plugin for auditing installed packages.
The module gets the list of installed packages of the system and queries
the Vulners.com Linux Vulnerability Audit API.
The API is described at the link below:
https://blog.vulners.com/linux-vulnerability-audit-in-vulners/
:maintainer: HubbleStack / avb76
:maturity: 3/26/2017 (TODO: change the format when the release date is established)
:platform: Linux
:requires: SaltStack
This audit module requires a YAML file inside the hubblestack_nova_profiles directory.
The file should have the following format:
vulners_scanner: <random data>
It does not matter what `<random data>` is, as long as the top key of the file is named `vulners_scanner`.
This allows the module to run under a certain profile, as all of the other Nova modules do.
'''
from __future__ import absolute_import
import logging
import sys
import requests
log = logging.getLogger(__name__)
def __virtual__():
return not sys.platform.startswith('win')
def audit(data_list, tags, debug=False):
os_name = __grains__.get('os').lower()
os_version = __grains__.get('osmajorrelease')
if debug:
log.debug("os_version: {0}, os_name{1}".format(os_version, os_name))
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for profile, data in data_list:
if 'vulners_scanner' in data:
local_packages = _get_local_packages()
vulners_data = _vulners_query(local_packages, os=os_name, version=os_version)
if vulners_data['result'] == 'ERROR':
log.error(vulners_data['data']['error'])
vulners_data = _process_vulners(_vulners_query(local_packages, os = os_name, version = os_version))
total_packages = len(local_packages)
secure_packages = total_packages - len(vulners_data)
ret['Success'] = [{'tag': 'Secure packages',
'description': '{0} out of {1}'.format(secure_packages, total_packages)}]
ret['Failure'] = vulners_data
return ret
def _get_local_packages():
'''
Get the packages installed on the system.
:return: A nice list of packages.
'''
local_packages = __salt__['pkg.list_pkgs']()
return ['{0}-{1}'.format(pkg, local_packages[pkg]) for pkg in local_packages]
def _vulners_query(packages=None, os=None, version=None, url='https://vulners.com/api/v3/audit/audit/'):
'''
Query the Vulners.com Linux Vulnerability Audit API for the provided packages.
:param packages: The list on packages to check
:param os: The name of the operating system
:param version: The version of the operating system
:param url: The URL of the auditing API; the default value is the Vulners.com audit API
Check the following link for more details:
https://blog.vulners.com/linux-vulnerability-audit-in-vulners/
:return: A dictionary containing the JSON data returned by the HTTP request.
'''
# error dict matching the error dict returned by the requests library
error = {
'result': 'ERROR',
'data': {'error': None}
}
if not packages:
error['data']['error'] = 'Missing the list of packages.'
return error
if not os and not version:
error['data']['error'] = 'Missing the operating system name and version.'
return error
if not os:
error['data']['error'] = 'Missing the operating system name.'
return error
if not version:
error['data']['error'] = 'Missing the operating system version.'
return error
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
data = {
"os": os,
"package": packages,
"version": version
}
try:
response = requests.post(url=url, headers=headers, json=data)
return response.json()
except requests.Timeout:
error['data']['error'] = 'Request to {0} timed out'.format(url)
return error
def _process_vulners(vulners):
'''
Process the data returned by the API into the format accepted by `hubble.py`.
:param vulners: The JSON data returned by the API
:return: A list of dictionaries as hubble.py swallows
'''
packages = vulners.get('data', {}).get('packages')
if not packages:
return []
return [{'tag': pkg,
'vulnerabilities': packages[pkg],
'description': ', '.join(packages[pkg].keys())}
for pkg in packages]
| [
"colton.myers@gmail.com"
] | colton.myers@gmail.com |
15ae360082178c5a2e06f58d8d6089e457934304 | c97f6219c0858f3a901bc1a2443bf7895c688916 | /src/segnet.py | 657b66c3435b3114cfc1cae10bfb999e874bfd3f | [] | no_license | pq53ui/Project2 | a0b7124b5912ea16ca6082bb4fdb52783680dfd0 | fa293daf19e73bff0eddfd78393a2da8cf01244f | refs/heads/master | 2020-07-12T03:25:28.879912 | 2019-08-27T13:21:53 | 2019-08-27T13:21:53 | 204,704,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | # -*- coding: utf-8 -*-
"""SegNet model for Keras.
# Reference:
- [Segnet: A deep convolutional encoder-decoder architecture for image segmentation](https://arxiv.org/pdf/1511.00561.pdf)
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
from keras.layers import Input
from keras.layers.core import Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.utils import np_utils
from keras.applications import imagenet_utils
def preprocess_input(X):
return imagenet_utils.preprocess_input(X)
def to_categorical(y, nb_classes):
num_samples = len(y)
Y = np_utils.to_categorical(y.flatten(), nb_classes)
return Y.reshape((num_samples, y.size / num_samples, nb_classes))
def SegNet(input_shape=(360, 480, 3), classes=12):
# c.f. https://github.com/alexgkendall/SegNet-Tutorial/blob/master/Example_Models/bayesian_segnet_camvid.prototxt
img_input = Input(shape=input_shape)
x = img_input
# Encoder
x = Convolution2D(64, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(128, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(256, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(512, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# Decoder
x = Convolution2D(512, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = UpSampling2D(size=(2, 2))(x)
x = Convolution2D(256, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = UpSampling2D(size=(2, 2))(x)
x = Convolution2D(128, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = UpSampling2D(size=(2, 2))(x)
x = Convolution2D(64, 3, 3, border_mode="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Convolution2D(classes, 1, 1, border_mode="valid")(x)
x = Reshape((input_shape[0]*input_shape[1], classes))(x)
x = Activation("softmax")(x)
model = Model(img_input, x)
return model
| [
"zadnikar.maja@gmail.com"
] | zadnikar.maja@gmail.com |
45f05ca089913d58bd5488016dafa80a91d9f25f | ca84fefcca1676b45d4171fc1831cf74d6d9fe36 | /Anuraag/extension.py | a2a2d7c3296d611a5b6e6927ca9341fe558dd204 | [] | no_license | aishik-rakshit/STUD-bot | fb2dab25effb3d8f7510e62f4f3436e6918f5c32 | 5fcdf17a7a481431e123507df93c466ae657f10b | refs/heads/main | 2023-05-08T18:39:47.013824 | 2021-06-02T07:28:18 | 2021-06-02T07:28:18 | 333,859,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | from flask import Flask, jsonify, request
import time
from playsound import playsound
# from pydub import AudioSegment
# from pydub.playback import play
app = Flask(__name__)
urls = {}
blacklisted = frozenset(["www.youtube.com", "www.facebook.com", "www.instagram.com"])
def url_strip(url):
if "http://" in url or "https://" in url:
url = url.replace("https://", '').replace("http://", '').replace('\"', '')
if "/" in url:
url = url.split('/', 1)[0]
return url
@app.route('/send_url', methods=['POST'])
def send_url():
resp_json = request.get_data()
params = resp_json.decode()
url = params.replace("url=", "")
url = url_strip(url)
if url in blacklisted:
playsound('audio.mp3')
print(url)
# song = AudioSegment.from_mp3("audio.mp3")
# play(song)
urls[url] = urls.get(url, 0) + 1
print(urls)
return jsonify({'message': 'success!'}), 200
app.run(host='0.0.0.0', port=5000)
| [
"noreply@github.com"
] | noreply@github.com |
98637f480ca2acec508dbfcd02af43563b21ae13 | 7c8f39c335d98bf7eeaafd3078f1adf82e8de937 | /ta7291.py | ee306ddb6fed78608ab96aa9f33c0e96b5f8e078 | [] | no_license | u-ono/nyandroid | cd01a18df8fe9c3f22c380ab8e52985f210f9076 | 6de9e70e86f59819f342dd016b381c1bf425337e | refs/heads/master | 2021-05-14T12:21:49.794614 | 2018-01-08T14:18:04 | 2018-01-08T14:18:04 | 116,407,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
import sys
class TA7291:
def __init__(self, pwm, in1, in2):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pwm, GPIO.OUT)
GPIO.setup(in1, GPIO.OUT)
GPIO.setup(in2, GPIO.OUT)
self.in1 = in1
self.in2 = in2
self.p = GPIO.PWM(18, 50)
def drive(self, speed):
if speed > 0:
GPIO.output(self.in1, 1)
GPIO.output(self.in2, 0)
self.p.start(speed)
if speed < 0:
GPIO.output(self.in1, 0)
GPIO.output(self.in2, 1)
self.p.start(-speed)
if speed == 0:
GPIO.output(self.in1, 0)
GPIO.output(self.in2, 0)
def brake(self):
GPIO.output(self.in1, 1)
GPIO.output(self.in2, 1)
time.sleep(0.5)
def cleanup(self):
self.brake()
GPIO.cleanup()
if __name__ == "__main__":
pass
| [
"oono.yuu@gmail.com"
] | oono.yuu@gmail.com |
94571bd209d76bc7305447d8753501761edf4885 | f4a43d4eead4b1ffb145a27d30eb4b3571f80fed | /moduloGranja/apps/notificaciones/models.py | 51c28d8b3d9d6dac12cf817ca2bbbf147fcc743b | [] | no_license | tom-sb/farmManage | 256dd968416c5449d52e118ad0e3d3cd8ebdaae9 | 0f24f4a3f790e30accd70bf0c7876f635e27a9ba | refs/heads/master | 2023-02-25T15:22:30.073951 | 2021-02-05T01:28:48 | 2021-02-05T01:28:48 | 298,927,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from django.db import models
# Create your models here.
class Notificacion(models.Model):
mensaje = models.CharField(max_length=50)
prioridad = models.BooleanField()
def __str__(self):
return self.mensaje
| [
"fvillanuevanut@gmail.com"
] | fvillanuevanut@gmail.com |
8c81e31d33e457d0e89dc65dec99c8eeb4f81c5f | 369c1d2f392657ca61b5e7f35239dc9257588c12 | /PageLocators/gui_manage_locators.py | b188975b656b375dda69530a6f2dba4866558db2 | [] | no_license | Dake-M/boss_web_framework | 3028203f58d8408de285f8a5c8d81dd65047aa55 | b6905b765d84263439e459d6281cd2440e634cef | refs/heads/master | 2020-11-26T07:10:06.005531 | 2019-12-19T07:30:08 | 2019-12-19T07:30:08 | 228,999,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,492 | py | # -*- coding: utf-8 -*-
from selenium.webdriver.common.by import By
class ShowManageLocators:
"""
指引管理页面元素信息
"""
# 主菜单指引管理
show_manage_menu = (By.XPATH, "//span[text()='指引管理']")
# ===========================指引管理页面===========================
# 添加模块按钮
add_module_btn = (By.XPATH, "//span[text()='添加模块']")
# 列表第一行模块名称
module_name = (By.XPATH, "//tbody//tr[1]//div")
# 列表第一行操作列删除按钮
module_del_btn = (By.XPATH, "//tbody//tr[1]//span[text()='删除']")
# 列表第一行删除确定按钮
module_del_sure_btn = (By.XPATH, "//span[contains(.,'确定')]")
# 列表第一行操作列详情按钮
information_btn = (By.XPATH, "//tbody//tr[1]//span[text()='详情']")
# 左上角title名称
left_up_info = (By.XPATH, "//div[@class='info-title']")
# ===========================新增指引模块页面==============================
# 名称文本框
input_name = (By.XPATH, "//label[@for='name']/following-sibling::div//input")
# 排序文本框
input_index = (By.XPATH, "//label[@for='index']/following-sibling::div//input")
# logo 上传按钮
load_logo_btn = (By.XPATH, "//i[@class='el-icon-plus upload-demo-icon over']")
# 添加指引模块确定按钮
add_module_sure_btn = (By.XPATH, "//button[@class='el-button el-button--primary']")
# ===========================详情页面====================================
# 添加指引按钮
add_gui_btn = (By.XPATH, "//span[text()='添加指引']")
# 列表第一行指引项名称//tbody//tr[1]//td[4]//span[text()='删除']
gui_item_name =(By.XPATH, "//tbody//tr[1]//td[2]//div")
# 列表操作列删除按钮
gui_item_del_btn = (By.XPATH, "//tbody//tr[1]//td[4]//span[text()='删除']")
# 删除确定按钮
del_sure_btn = (By.XPATH, "//button//span[contains(text(),'确定')]")
# ============================添加指引页面================================
# 名称文本框
input_gui_name = (By.XPATH, "//input[@type='text' and @class = 'el-input__inner']")
# 排序文本框
input_gui_index = (By.XPATH, "//input[@type='number']")
# 展示
iframe_ele = (By.XPATH, "//iframe[@class= 'ke-edit-iframe']")
input_gui_text = (By.XPATH, "//body[contains(@class,'ke-content')]")
# 提交按钮
submit_gui_btn = (By.XPATH, "//span[text()='提交']") | [
"604297158@qq.com"
] | 604297158@qq.com |
2b6c55c91181c7e97e176e24ad5f588c767be731 | 21bc908a1612e76a32f61d3e3e8865d3025e01f3 | /backend/manage.py | aed36335605846b8d24da47b5997edc90131d876 | [] | no_license | crowdbotics-apps/damp-sky-27690 | 422acfb8a5a6df8847d31c40dfcf2dc0ce7b1b7c | c6d0d7277a35d40363155773ed2e2860d5748449 | refs/heads/master | 2023-05-08T00:48:47.975234 | 2021-06-02T22:23:20 | 2021-06-02T22:23:20 | 373,317,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'damp_sky_27690.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
02c511b60cde23f482f156867d34247a278e9f14 | 78ed388a01610359d4554efa046e473a008ba1ae | /hdlConvertorAst/translate/verilog_to_basic_hdl_sim_model.py | 0e08a141d10a01eeda1d6dc9d129bc04cf50cc7b | [
"MIT"
] | permissive | mewais/hdlConvertorAst | f9ad85cfb2804c52a1b90642f4c9cede2ce2d3e6 | 64c8c1deee923ffae17e70e0fb1ad763cb69608c | refs/heads/master | 2022-12-09T12:01:23.150348 | 2020-09-06T04:10:15 | 2020-09-06T04:15:38 | 293,200,130 | 0 | 0 | MIT | 2020-09-06T04:03:17 | 2020-09-06T04:03:17 | null | UTF-8 | Python | false | false | 2,050 | py | from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model.\
add_unique_labels_to_all_processes import AddUniqueLabelsToAllProcesses
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.verilog_types_to_basic_hdl_sim_model import VerilogTypesToBasicHdlSimModel
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.wrap_module_statements_to_processes import wrap_module_statements_to_processes
from hdlConvertorAst.translate.common.discover_declarations import DiscoverDeclarations
from hdlConvertorAst.translate.vhdl_to_verilog import link_module_dec_def
from hdlConvertorAst.translate.common.name_scope import NameScope
from hdlConvertorAst.translate.common.resolve_names import ResolveNames
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.discover_stm_outputs import discover_stm_outputs_context
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.verilog_operands_to_basic_hdl_sim_model import BasicHdlSimModelTranslateVerilogOperands
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.assignment_to_update_assignment import AssignmentToUpdateAssignment
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model.apply_io_scope_to_signal_names import ApplyIoScopeToSignalNames
def verilog_to_basic_hdl_sim_model(context):
"""
:type context: HdlContext
"""
link_module_dec_def(context)
name_scope = NameScope.make_top(False)
DiscoverDeclarations(name_scope).visit_HdlContext(context)
ResolveNames(name_scope).visit_HdlContext(context)
wrap_module_statements_to_processes(context)
BasicHdlSimModelTranslateVerilogOperands().visit_HdlContext(context)
VerilogTypesToBasicHdlSimModel().visit_HdlContext(context)
stm_outputs = discover_stm_outputs_context(context)
AddUniqueLabelsToAllProcesses(name_scope, stm_outputs).context(context)
AssignmentToUpdateAssignment().visit_HdlContext(context)
ApplyIoScopeToSignalNames().visit_HdlContext(context)
return context, stm_outputs, name_scope
| [
"nic30@seznam.cz"
] | nic30@seznam.cz |
3b7b489e1f4e1f84e4d3437544c82841ff530a2c | c3bbceee0dc30561c633731f697772f58428ce96 | /generative_models/encoder_next.py | bd104fb38efbeccf0a75750152e10a5b619e8628 | [] | no_license | t-walker-21/cs7643_final_project | 94ad0144c4e5479b8eee3f40f7e3dc4e88396387 | 88c77f804a456c243faeba56a59c0454b4b53160 | refs/heads/master | 2020-09-13T16:21:29.449397 | 2020-01-02T22:27:23 | 2020-01-02T22:27:23 | 222,839,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | """
Class to define GAN discriminator
"""
import torch
from torchvision import models
import torch.nn as nn
import numpy
class Encoder(nn.Module):
def __init__(self, cnn_model, input_dim, hidden_dim, lstm_layers, embedding_dim, sequence_len):
super(Encoder, self).__init__()
# Convolutions (pre-trained)
self.cnn_embedding_dim = None
if (cnn_model == "vgg"):
self.cnn = models.vgg16(pretrained=True).features
self.cnn_embedding_dim = 25088
elif (cnn_model == "resnet"):
self.cnn = models.resnet(pretrained=True).features
self.cnn_embedding_dim = 1024
self.fc1 = nn.Linear(self.cnn_embedding_dim, input_dim)
self.fc2 = nn.Linear(hidden_dim, embedding_dim)
self.unpool_1 = nn.Upsample(scale_factor=5, mode='bilinear')
self.deconv_1 = nn.ConvTranspose2d(in_channels=8, out_channels=3, kernel_size=200, stride=1)
#Activations
self.relu = nn.ReLU()
self.leaky_relu = nn.LeakyReLU(0.2)
#LSTM
self.LSTM = nn.LSTM(
input_size=input_dim,
hidden_size=hidden_dim,
num_layers=lstm_layers,
batch_first=True # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.sequence_len = sequence_len
# Lock conv layers
#self.cnn.eval()
def forward(self, x):
"""
Unroll video tensor and pass through cnn feature extractor
"""
x = x.view(-1, 3, 224, 224)
conv_feats = self.cnn(x).view(-1, self.cnn_embedding_dim)
embedding = self.fc1(conv_feats)
embedding = self.relu(embedding)
hidden = None
# Initialize initial hidden state
out, hidden = self.LSTM(embedding.view(1, self.sequence_len, -1), hidden)
out = self.fc2(out)
out = self.relu(out)[:,:,:].view(self.sequence_len, 8 , 5, 5)
out = self.unpool_1(out)
out = self.deconv_1(out)
#print(out.shape)
return out | [
"twalker81@gatech.edu"
] | twalker81@gatech.edu |
1b7bdd2662345090df8af628e14e3b2cb6ddf1be | e9a9dc57a57c60f1d91d88decc1a01e49435f6a4 | /traveller/migrations/0001_initial.py | 7e12232b5b7b9be0b156086414338381f72e2e3c | [] | no_license | saurabh1498/Website | ec9bf4ee77746fc3d1b38d474d1cf5180be2a033 | d3e3a10c94a0c6a541f6974e775f0f8fcf01a905 | refs/heads/master | 2022-12-22T02:59:30.923213 | 2020-09-30T15:38:29 | 2020-09-30T15:38:29 | 299,319,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # Generated by Django 3.1 on 2020-08-07 18:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='pics')),
('desc', models.TextField()),
('price', models.IntegerField()),
('offer', models.BooleanField(default=False)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
0faaab6550929001312d1c69b5cd7335b0237141 | e262e64415335060868e9f7f73ab8701e3be2f7b | /test_api2/api/test_wen.py | 30802898bb6b9118e58bbed576fa44ca15d6289a | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,273 | py | # from json import loads
# def read_line():
# with open("/Users/yeahmobi/Desktop/work/python/developer/test_api2/api/wenjian.yaml",'r') as f:
# n = f.readlines()
# count =0
# flag = True
# for i in n:
# i = i.strip() #去除行前后的空格
# if i.startswith("#"):
# continue
# elif i == "":
# continue
# elif i[0:3] == '"""':
# continue
# elif i.startswith("'''"):
# continue
# elif i.startswith('"""') and i.endswith('"""'):
# continue
# else:
# count += 1
#
# if i == "'''" or i == '"""':
# if flag == True:
# flag = False
# continue
# else:
# flag = True
# continue
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# if flag == True:
# flag = False
# continue
# else:
# flag = True
# continue
# else:
# count += 1
# print(count)
#
#
# read_line()
# def count_line_core(file_name): ##传入单个文件,统计行数,之后返回该文件的实际代码行数;区分utf-8、gbk有待优化
# # print('core_file_name:',file_name)
# lines_count=0
# flag=True
# # try:
# # with open(file_name,'r',encoding='gbk') as fp:
# # # print('gbk file_name:',file_name)
# # for i in fp:
# # i=i.strip()
# # if i=="'''" or i=='"""':
# # if flag==True:
# # flag=False
# # continue
# # else:
# # flag=True
# # continue
# # elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# # continue
# # elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# # if flag==True:
# # flag=False
# # continue
# # else:
# # flag=True
# # continue
# # if flag==True and i!='' and not i.startswith('#'):
# # lines_count+=1
# # #print(i)
# # if i.startswith('#-*-') or i.startswith('#coding') or i.startswith('#encoding'):
# # lines_count+=1
# # #print(i)
# # except:
# with open(file_name,'r',encoding='utf-8') as fp:
# # print('utf-8 file_name:',file_name)
# for i in fp:
# i=i.strip()
# if i=="'''" or i=='"""':
# if flag==True:
# flag=False
# continue
# else:
# flag=True
# continue
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# if flag==True:
# flag=False
# continue
# else:
# flag=True
# continue
# if flag==True and i!='' and not i.startswith('#'):
# lines_count+=1
# #print(i)
# if i.startswith('#-*-') or i.startswith('#coding') or i.startswith('#encoding'):
# lines_count+=1
# #print(i)
# return lines_count
# def count_line_core(file_name): ##传入单个文件,统计行数,之后返回该文件的实际代码行数;区分utf-8、gbk有待优化
# # print('core_file_name:',file_name)
# lines_count=0
# flag=True
# with open(file_name,'r',encoding='utf-8') as fp:
# # print('utf-8 file_name:',file_name)
# for i in fp:
# i=i.strip()
# if i=="'''" or i=='"""':
# if flag==True:
# flag=False
# continue
# else:
# flag=True
# continue
# continue
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# # elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# # if flag==True:
# # flag=False
# # continue
# # else:
# # flag=True
# # continue
#
# if flag==True and i!='' and not i.startswith('#'):
# lines_count+=1
# print(i)
# # if i.startswith('#-*-') or i.startswith('#coding') or i.startswith('#encoding'):
# # lines_count+=1
# # print(i)
# return lines_count
#
# print(count_line_core('/Users/yeahmobi/Desktop/work/python/developer/test_api2/api/wenjian.yaml'))
"""
这是一个统计代码行数的函数
"""
# def count_line(filename): #函数名
# with open(filename) as f:
# flag = True
# count = 0
# for i in f.readlines():
# i = i.strip()
# if i == '"""' or i == "'''":
# if flag == True:
# flag = False
# continue
# else:
# flag = True
# continue
#
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# elif i !='' and flag == True and i[0:1] != "#":
# count+=1
# print(i)
# return count
#
# print(count_line('/Users/yeahmobi/Desktop/work/python/developer/test_api2/api/test_wen.py'))
# class Solution:
# def reverse(self, x: int) -> int:
# s = int(str(abs(x))[::-1])
# if s.bit_length() > 31:
# return 0
# else:
# if x >=0:
# return s
# else:
# return -s
# a = Solution().reverse(120)
# print(a)
# class Solution:
# def reverse(self, x: int) -> int:
# if x >=0:
# a = int(str(x)[::-1])
# else:
# a =0- int(str(x)[:0:-1])
#
#
# if (-2**31) <a < (2**31)-1:
# return a
# else:
# return 0
# class Solution:
# def solve(self , str ):
# # write code her
# str = list(str)
# print(str)
# l,r = 0,len(str)-1
# while l <=r:
# str[l],str[r] = str[r],str[l]
# l +=1
# r -=1
# return ''.join(str)
#
# a=Solution().solve('ancd')
# print(a)
# class Solution:
# def maxLength(self, arr):
# # write code here
# l, r = 0, 0
# stark = []
# n = 0
# while r < len(arr):
#
# if arr[r] in stark:
# l += 1
# r = l
# stark.clear()
#
# else:
# stark.append(arr[l])
# r += 1
# n = max(n, len(stark))
#
# return n
# class Solution:
# def maxLength(self , arr ):
# # write code here
# res=[]
# length=0
# for i in arr:
# if i not in res:
# res+=[i]
# else:
# res = res[res.index(i)+1:] + [i]
# if length<len(res): length= len(res)
# return length
# class Solution:
# def maxLength(self , arr ):
# # write code here
# l,stark =0,[] #定义一个l存储每次遇到重复的最大值,stark存储不重复的值
# for i in arr:
# if i in stark: #如果在stark中,就开始遍历
# l = max(l,len(stark))
# st = stark.index(i) #获取当前i(重复元素)在stark中的下标
# stark = stark[st+1:] #取当前stark中重复元素后的数
# stark.append(i)
# return max(l,len(stark))
#
# arr = [2,2,3,4,3]
# a = Solution().maxLength(arr)
# print(a)
# stark = [1,2,3,4]
# # st = stark.index(4)
# stark = stark[1:]
# print(stark)
# "1AB2345CD","12345EF"
class Solution:
def LCS(self , str1 , str2 ):
# l = 0
n = []
for i in range(len(str2)):
s = str2[:i+1]
if s in str1:
# l = max(l,len(s))
if not n:
n.append(s)
if len(s) > len(n[0]):
n.append(s)
else:
str2 = str2[i:]
return n[-1]
a = Solution().LCS("1AB2345CD","12345EF")
print(a)
# a = ['ancd']
# print(len(a[0])) | [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
62db140e70dbb695de2e9e68f5009a80d07fb3df | c09e2f4aab27493d299c683e3e480fc6ed08ba06 | /sliding-block/sliding-block.py | e62c9b9a56d40db2d4a18d943655df8b88fcead2 | [] | no_license | rachd/game-ai | 22d630e0391396e078076456fd9f2c24685a306c | 3b864dbc185caeee02b476a10abbaee1ed9162d4 | refs/heads/master | 2020-03-17T01:23:43.487508 | 2018-05-12T14:56:14 | 2018-05-12T14:56:14 | 133,151,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | from random import shuffle
def printBoard(positions):
print ("""
| %s | %s | %s |
| %s | %s | %s |
| %s | %s | %s |""" % tuple(positions))
def setInitialPosition():
initialElements = [1, 2, 3, 4, 5, 6, 7, 8, "_"]
shuffle(initialElements)
return (initialElements, getEmptyPosition(initialElements))
def checkForSolved(position):
return position == [1, 2, 3, 4, 5, 6, 7, 8, "_"]
def getEmptyPosition(position):
return position.index("_")
def moveUp(state):
position = list(state[0])
emptyPosition = state[1]
temp = position[emptyPosition - 3]
position[emptyPosition - 3] = position[emptyPosition]
position[emptyPosition] = temp
return (position, emptyPosition - 3)
def moveDown(state):
position = list(state[0])
emptyPosition = state[1]
temp = position[emptyPosition + 3]
position[emptyPosition + 3] = position[emptyPosition]
position[emptyPosition] = temp
return (position, emptyPosition + 3)
def moveLeft(state):
position = list(state[0])
emptyPosition = state[1]
temp = position[emptyPosition - 1]
position[emptyPosition - 1] = position[emptyPosition]
position[emptyPosition] = temp
return (position, emptyPosition - 1)
def moveRight(state):
position = list(state[0])
emptyPosition = state[1]
temp = position[emptyPosition + 1]
position[emptyPosition + 1] = position[emptyPosition]
position[emptyPosition] = temp
return (position, emptyPosition + 1)
def getMoves(state):
if (state[1] == 0): return [moveRight(state), moveDown(state)]
elif (state[1] == 1): return [moveLeft(state), moveRight(state), moveDown(state)]
elif (state[1] == 2): return [moveLeft(state), moveDown(state)]
elif (state[1] == 3): return [moveRight(state), moveUp(state), moveDown(state)]
elif (state[1] == 4): return [moveLeft(state), moveRight(state), moveUp(state), moveDown(state)]
elif (state[1] == 5): return [moveLeft(state), moveUp(state), moveDown(state)]
elif (state[1] == 6): return [moveRight(state), moveUp(state)]
elif (state[1] == 7): return [moveLeft(state), moveRight(state), moveUp(state)]
elif (state[1] == 8): return [moveLeft(state), moveUp(state)]
state = setInitialPosition()
printBoard(state[0])
print(getMoves(state))
# 0 1 2
# 3 4 5
# 6 7 8 | [
"rachel.m.dorn@gmail.com"
] | rachel.m.dorn@gmail.com |
b742faff5cfd681a80942299795596c9318e2739 | ec64534d7d7a6fa36c6b2ca20f48d3e3120645cb | /Lesson-3 Draw Circle.py | a21d1aa88fae3ca64bf1f72eb72e39343abd0c19 | [] | no_license | liuxiang0/turtle_tutorial | e542faf5ca8311a580713b0254df7f80262c9bb8 | dc81af2c841524ebcd379f2dbfc6481641957001 | refs/heads/master | 2020-09-15T18:41:34.653035 | 2020-03-28T12:44:45 | 2020-03-28T12:44:45 | 223,530,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # -*- coding: utf-8 -*-
"""
Lesson 3 : Draw a circle 画圆、半圆、弧、正多边形
"""
import turtle
from turtle_conf import Tpen
def Circles(pen, radius, num):
for i in range(num):
pen.circle(radius * (i + 1))
"""call: circle(radius) # 画整个圆, 负值也可以
|--or: circle(radius, extent) # 画弧--部分圆 180 半圆
|--or: circle(radius, extent, steps)
|--or: circle(radius, steps=6) # 画六边形
"""
win = turtle.Screen() # Create a graphics windows 创建绘画窗口
t = Tpen(shape="turtle", drawcolor='red', size=2)
mycolor = ["red","green","blue"]
Circles(t, 30,2)
for i in range(3):
t.color(mycolor[i])
t.circle(-50*(i+1),steps=(i+1)*3) # 画正n边形
t.reset() # 清屏,重来
for i in range(3):
t.color(mycolor[i])
t.circle(50*(i+1),180) # 画半圆
t.setposition(0,0) # setpos(x,y) 移动圆点(0,0)
for i in range(3):
t.color(mycolor[i])
t.circle(-50*(i+1),180,(i+1)*4) # 画正n边形 代表 半圆
win.exitonclick() | [
"liuxiangxyd@163.com"
] | liuxiangxyd@163.com |
0ca3fd575744c26a79e0e06ac41f3947591794c8 | bd3446304bc1dc80653e55391e809c4502467a2f | /sort/urls.py | 6a86ac200984c94037de8b9bae203466d2298f10 | [] | no_license | marceloandriolli/sortbook | 1b36e69f20c322e82046772f183c3fff4d90191c | d3a4da050bec439e47c6c5ff3f8c4edc6ec11684 | refs/heads/master | 2021-01-22T18:43:29.704344 | 2017-03-15T21:58:53 | 2017-03-15T21:58:53 | 85,110,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.conf.urls import include, url
from sort import views
urlpatterns = [
# url(r'^admin/', admin.site.urls),
# url(r'^$', include('sort.urls')),
url(r'^api/v1/sort/books/$', views.SortBooks.as_view(), name='sort-books'),
]
| [
"marcelo@sustentaretecnologia.com"
] | marcelo@sustentaretecnologia.com |
da8a71665ff2e37323bcce4a1ca25bfb0c536ec6 | eadbbe70324801c5cbe1cd7befdc53c324cc537d | /migrations/versions/8bc3df9edeac_create_products.py | c9ea6acf0f25af40e189b0fe7717e29aa0a2a6b8 | [] | no_license | lariau/sqlalchemy | b91506c3611141253712a67c3a822dad9b371b1b | 5e8e8bf90eabc577e4074fa43bfe87a99923a196 | refs/heads/master | 2020-04-08T19:48:27.858933 | 2018-11-30T10:41:27 | 2018-11-30T10:41:27 | 159,671,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | """create products
Revision ID: 8bc3df9edeac
Revises:
Create Date: 2018-11-29 11:09:57.799293
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8bc3df9edeac'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('products',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('products')
# ### end Alembic commands ###
| [
"lariau@yahoo.fr"
] | lariau@yahoo.fr |
bf2fc497a0307d4032d16962a23b0f9096438ee0 | f468d3b1f938895c81a2c3438a93c3a05acfd9da | /download-invoices.py | 8838c4265d09411cd44999af5652dd5a5fc99b05 | [] | no_license | slyfox42/invoices-download | 10df35f64d9173b9d9dce3d955a27e5fbe417acc | 5a79870ec2a3f92d8f920885916a318b0c9dbf1a | refs/heads/master | 2022-05-01T08:41:41.038040 | 2020-12-11T15:46:22 | 2020-12-11T15:46:22 | 199,009,664 | 0 | 0 | null | 2022-03-29T22:01:52 | 2019-07-26T12:04:48 | Python | UTF-8 | Python | false | false | 4,178 | py | import email
import imaplib
import os
import shutil
from datetime import date, timedelta
from save_invoices import get_file_path, SSH_Manager
import constants as c
class Email_Manager:
connection = None
default_mailbox = c.INVOICES_MAILBOX
def get_connection(self):
if not self.connection:
self.connection = imaplib.IMAP4_SSL(c.SMTP_SERVER)
self.connection.login(c.EMAIL_ADDRESS,
c.EMAIL_PASSWORD)
self.connection.select(
self.default_mailbox, readonly=False
)
return self.connection
def close_connection(self):
if self.connection:
self.connection.close()
self.connection = None
def save_attachments(self, messages):
print("Saving message attachments...")
self.get_connection()
if not os.path.exists(c.TMP_FOLDER):
os.makedirs(c.TMP_FOLDER)
attachments_paths = []
for msg in messages:
for part in msg.walk():
if part.get_content_maintype() == "multipart":
continue
if part.get("Content-Disposition") is None:
continue
filename = part.get_filename()
if not filename or not filename.endswith('.pdf'):
continue
att_path = os.path.join(c.TMP_FOLDER, filename)
attachments_paths.append(att_path)
if not os.path.isfile(att_path):
fp = open(att_path, "wb")
fp.write(part.get_payload(decode=True))
fp.close()
self.close_connection()
print(
f"{'Attachments' if len(attachments_paths) > 1 else 'Attachment'} saved correctly."
)
return attachments_paths
def fetch_emails(
self
):
print("> Fetching emails...")
since = (date.today() - timedelta(days=21)).strftime("%d-%b-%Y")
self.get_connection()
(result, messages) = self.connection.search(
None, f'(FROM "{c.FROM_ADDRESS}" SINCE "{since}")'
)
if result == "OK":
id_list = messages[0].split()
messages = []
for el in id_list:
(result, data) = self.connection.fetch(el, "(RFC822)")
msg = email.message_from_bytes(data[0][1])
for part in msg.walk():
if part.get_content_maintype() == "multipart":
continue
if part.get("Content-Disposition") is None:
continue
messages.append(msg)
self.close_connection()
if not len(messages):
print("> No messages with attachment(s) found.")
return messages
print(f'Fetched {len(id_list)} messages.')
return messages
else:
self.close_connection()
raise Exception(
f"Connection Error searching for emails. Result: {result}"
)
def save_invoices():
email_manager = Email_Manager()
ssh_manager = SSH_Manager()
saved = list()
try:
messages = email_manager.fetch_emails()
if len(messages):
attachments = email_manager.save_attachments(messages)
attachments = list(set(attachments))
file_paths = [get_file_path(attachment) for attachment in attachments]
zipped = zip(attachments, file_paths)
for invoice_path, new_file_path in zipped:
invoice = ssh_manager.scp_copy(invoice_path, new_file_path)
saved.append(invoice)
ssh_manager.close_connection()
saved = [x for x in saved if x]
invoices_list = '\n'.join(saved)
print(f'Imported invoices:\n{invoices_list}' if len(saved) else 'No invoices imported.')
shutil.rmtree(c.TMP_FOLDER)
except Exception as e:
print(e)
finally:
ssh_manager.close_connection()
if __name__ == "__main__":
save_invoices()
| [
"federico.obialero@gmail.com"
] | federico.obialero@gmail.com |
a29401ba0ee34428fc57b67d36d853d772f7add2 | efc7433020b404f19bd46372000032797d361a0c | /worker/constants.py | 1369ce497ba4f755a4e87e0dc7d8eb5c8ed57edf | [] | no_license | dimitrisamp/joinflyline | 2819571c867434ec7b7e859a21cd278bb2bde501 | dff0ab38244e6e2876443995fe99ad9959fc3db4 | refs/heads/develop | 2022-12-21T13:11:19.364128 | 2020-02-14T13:37:30 | 2020-02-14T13:37:30 | 242,969,405 | 0 | 0 | null | 2022-12-12T03:15:37 | 2020-02-25T10:12:46 | Vue | UTF-8 | Python | false | false | 327 | py | DEALS_DAYS = 30
CHECK_FLIGHT_TRY_COUNT = 20
SEARCH_MAX_TRIES = 10
SEARCH_DELAY = 1
CHECKER_SLEEP_INTERVAL = 1
MAX_CHECK_FLIGHT_DELAY = 8
SEARCH_API_URL = "https://kiwicom-prod.apigee.net/v2/search"
CHECK_FLIGHTS_API_URL = "https://kiwicom-prod.apigee.net/v2/booking/check_flights"
MIN_NIGHTS_IN_DEST = 1
MAX_NIGHTS_IN_DEST = 7
| [
"bladeofdima@gmail.com"
] | bladeofdima@gmail.com |
889561373222e776f285c46bed462a03db1dce83 | d5f8ca3c13f681d147b7614f1902df7ba34e06f9 | /CelebA/main.py | 1a920e6f5ac9b064598be6c2ab89096536d2adde | [] | no_license | hhjung1202/OwnAdaptation | 29a6c0a603ab9233baf293096fb9e7e956647a10 | 50805730254419f090f4854387be79648a01fbb4 | refs/heads/master | 2021-06-25T22:31:15.437642 | 2020-11-26T18:19:55 | 2020-11-26T18:19:55 | 176,670,379 | 1 | 0 | null | 2020-06-11T07:35:55 | 2019-03-20T06:36:19 | Python | UTF-8 | Python | false | false | 6,344 | py | import argparse
import torch
from torch.autograd import Variable
from torchvision.utils import save_image
import numpy as np
from model import *
import os
import torch.backends.cudnn as cudnn
import time
import utils
import dataset
import math
parser = argparse.ArgumentParser(description='PyTorch Cycle Domain Adaptation Training')
parser.add_argument('--sd', default='CelebA', type=str, help='source dataset')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--epoch', default=164, type=int, metavar='N', help='number of total epoch to run')
parser.add_argument('--decay-epoch', default=30, type=int, metavar='N', help='epoch from which to start lr decay')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=1e-2, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--img-size', type=int, default=32, help='input image width, height size')
parser.add_argument('--dir', default='./', type=str, help='default save directory')
parser.add_argument('--gpu', default='0', type=str, help='Multi GPU ids to use.')
best_prec_result = torch.tensor(0, dtype=torch.float32)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(args.seed)
cuda = True if torch.cuda.is_available() else False
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
criterion = torch.nn.CrossEntropyLoss()
def main():
global args, best_prec_result
start_epoch = 0
utils.default_model_dir = args.dir
start_time = time.time()
train_loader, test_loader = dataset_selector(args.sd)
state_info = utils.model_optim_state_info()
state_info.model_init(args=args, num_class=4000)
state_info.model_cuda_init()
state_info.weight_init()
state_info.optimizer_init(args)
if cuda:
print("USE", torch.cuda.device_count(), "GPUs!")
cudnn.benchmark = True
checkpoint = utils.load_checkpoint(utils.default_model_dir, is_last=True)
if checkpoint:
start_epoch = checkpoint['epoch'] + 1
best_prec_result = checkpoint['Best_Prec']
state_info.load_state_dict(checkpoint)
for epoch in range(0, args.epoch):
if epoch < 80:
lr = args.lr
elif epoch < 122:
lr = args.lr * 0.1
else:
lr = args.lr * 0.01
for param_group in state_info.optimizer.param_groups:
param_group['lr'] = lr
train(state_info, train_loader, epoch)
prec_result = test(state_info, test_loader, epoch)
if prec_result > best_prec_result:
best_prec_result = prec_result
filename = 'checkpoint_best.pth.tar'
utils.save_state_checkpoint(state_info, best_prec_result, filename, utils.default_model_dir, epoch)
utils.print_log('Best Prec : {:.4f}'.format(best_prec_result.item()))
filename = 'latest.pth.tar'
utils.save_state_checkpoint(state_info, best_prec_result, filename, utils.default_model_dir, epoch)
now = time.gmtime(time.time() - start_time)
utils.print_log('Best Prec : {:.4f}'.format(best_prec_result.item()))
utils.print_log('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
print('done')
def train(state_info, train_loader, epoch): # all
utils.print_log('Type, Epoch, Batch, loss, total_loss, Percent')
state_info.set_train_mode()
correct = torch.tensor(0, dtype=torch.float32)
total = torch.tensor(0, dtype=torch.float32)
train_loss = 0
for it, [x, y] in enumerate(train_loader):
x, y = to_var(x, FloatTensor), to_var(y, LongTensor)
output = state_info.forward(x)
# Train
state_info.optimizer.zero_grad()
loss = criterion(output, y)
loss.backward()
state_info.optimizer.step()
# Log Print
train_loss += loss.data.item()
total += float(y.size(0))
_, predicted = torch.max(output.data, 1)
correct += float(predicted.eq(y.data).cpu().sum())
if it % 10 == 0:
utils.print_log('Train, {}, {}, {:.6f}, {:.4f}, {:.2f}'
.format(epoch, it, loss.item(), train_loss, 100.*correct / total))
print('Train, {}, {}, {:.6f}, {:.4f}, {:.2f}'
.format(epoch, it, loss.item(), train_loss, 100.*correct / total))
utils.print_log('')
def test(state_info, test_loader, epoch):
utils.print_log('Type, Epoch, Acc')
state_info.set_test_mode()
correct = torch.tensor(0, dtype=torch.float32)
total = torch.tensor(0, dtype=torch.float32)
for it, [x, y] in enumerate(test_loader):
x, y = to_var(x, FloatTensor), to_var(y, LongTensor)
output = state_info.forward(x)
# Log Print
total += float(y.size(0))
_, predicted = torch.max(output.data, 1)
correct += float(predicted.eq(y.data).cpu().sum())
utils.print_log('Test, {}, {:.2f}'.format(epoch, 100.*correct / total))
print('Test, {}, {:.2f}'.format(epoch, 100.*correct / total))
utils.print_log('')
return 100.*correct / total
def dataset_selector(data):
if data == 'mnist':
return dataset.MNIST_loader(img_size=args.img_size)
elif data == 'svhn':
return dataset.SVHN_loader(img_size=32)
elif data == "usps":
return dataset.usps_loader(img_size=args.img_size)
elif data == "mnistm":
return dataset.MNIST_M_loader(img_size=args.img_size)
elif data == "cifar10":
return dataset.cifar10_loader(args)
elif data == "CelebA":
return dataset.CelebA_loader(image_size=args.img_size, batch_size=args.batch_size)
def to_var(x, dtype):
return Variable(x.type(dtype))
if __name__=='__main__':
main() | [
"hhjung1202@naver.com"
] | hhjung1202@naver.com |
a11db09c5b2341a7b1bc6b2d0fc94831775e45c2 | 4395d0a9cca389a6544aecb93466242941d3ba3f | /process/split_data.py | ded216064433a532046c992685b17a8de63a9790 | [
"MIT"
] | permissive | keshav1990/HeadSwap | da884b575d061bf4765c48778bba86886015164b | a60a696f208d352bcd8746e5e58ab70e90afe334 | refs/heads/main | 2023-08-22T20:02:33.930866 | 2022-12-28T16:37:35 | 2022-12-28T16:37:35 | 591,616,056 | 16 | 8 | MIT | 2023-01-21T09:45:03 | 2023-01-21T09:45:02 | null | UTF-8 | Python | false | false | 721 | py | import numpy as np
import os
import random
def split_data(id_path,train_save_path,val_save_path):
idinfo = np.load(id_path,allow_pickle=True).item()
keys = list(idinfo.keys())
random.shuffle(keys)
train_id_info = {}
val_id_info = {}
for k in keys[:100]:
val_id_info[k] = idinfo[k]
for k in keys[100:]:
train_id_info[k] = idinfo[k]
np.save(train_save_path,train_id_info)
np.save(val_save_path,val_id_info)
if __name__ == "__main__":
id_path = '/HeadSwap/wav2lip-headswap/info/all_id.npy'
train_path = '/HeadSwap/wav2lip-headswap/info/train_id.npy'
val_path = 'HeadSwap/wav2lip-headswap/info/val_id.npy'
split_data(id_path,train_path,val_path)
| [
"jiahuili@jh-mac.local"
] | jiahuili@jh-mac.local |
c5000324a37133b8e3e2bad62736b29664f711fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03659/s495769033.py | 54ee8453e730f35341ffac0335267d937fc39396 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import numpy as np
n = int(input())
a = np.array(list(map(int, input().split())))
cumsum_a = a.cumsum()
sum_a = cumsum_a[-1]
ans = 2 * 10**9
for i in range(n-1):
ans = min(ans, abs(sum_a - 2*cumsum_a[i]))
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d0dd8c0f79d16b37610b0f645641720c3a87dc5b | 347c70d4851b568e03e83387f77ae81071ab739e | /fn_splunk_integration/tests/test_function_utils.py | 0c9e09a2c07c9c3e0f49d16aed5e0ed0666a3c55 | [
"MIT"
] | permissive | neetinkandhare/resilient-community-apps | 59d276b5fb7a92872143ce2b94edd680738693ce | 3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f | refs/heads/master | 2021-12-27T09:05:36.563404 | 2021-09-29T13:04:56 | 2021-09-29T13:04:56 | 159,804,866 | 1 | 0 | MIT | 2021-08-03T19:45:45 | 2018-11-30T10:07:32 | Python | UTF-8 | Python | false | false | 1,628 | py |
#
# Unit tests for fn_splunk_integration/components/function_utils.py
#
# 100% code coverage
#
#
import unittest
import sys
sys.path.append("../fn_splunk_integration/util")
sys.path.append("fn_splunk_integration/util")
from function_utils import make_query_string
from function_utils import make_item_dict
from function_utils import ItemDataError
def test_query_string():
print("Testing query string substitution....")
input_string = "index = %param1% source=%param2% AND %param3%=%param4%"
params = ["_internal", "*splunkd*", "clientip", "127.0.0.1"]
query = make_query_string(input_string, params)
assert query == "index = _internal source=*splunkd* AND clientip=127.0.0.1"
def test_make_item_dict():
print("Testing make_item_dict")
params = ["field1", "value1",
"field2", "value2",
"field3", "value3"]
item_dict = make_item_dict(params)
assert item_dict["field1"] == "value1" and item_dict["field2"] == "value2" and item_dict["field3"] == "value3"
# Test wrong number of params
try:
make_item_dict(["p1","p2","p3"])
assert False
except ItemDataError:
assert True
# Test null key
try:
item_dict = make_item_dict(["p1", "p2",
None, "p4",
"p5", "p6"])
assert item_dict["p1"] == "p2" and item_dict["p5"] == "p6"
assert "p4" not in item_dict
except ItemDataError:
assert False
# Test null value
try:
item_dict = make_item_dict(["p1", None])
assert not item_dict["p1"]
except:
assert False
| [
"hpyle@us.ibm.com"
] | hpyle@us.ibm.com |
c92531920c42a17859e35c08af588e644af7a251 | 5990d7eebdabde9635d94ac3e402e0197454fc48 | /prismEphem.py | 9e3243c09bd618a6d0e6a65388d53de8ef2531fe | [] | no_license | moxuse/prismEphem.py | ec182b3a91ffda485e21423044e41d52cccb6403 | df23b9ad9ecf2136f288cfa797b899789e3258be | refs/heads/master | 2016-09-16T03:50:27.259382 | 2011-10-24T03:18:53 | 2011-10-24T03:18:53 | 2,574,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import datetime
import pytz
import urllib2
import chardet
import re
import ephem
def readFile():
lines = ['','','']
#print 'file !!!'
tleFile = open('tleData.txt', 'r')
lines = tleFile.readlines()
fileLineNum = len(lines)
for i in range(fileLineNum):
lines[i] = lines[i]
tleFile.close()
return lines
def six2ten(six):
ten = six.split(':')
t0 = float(ten[0])
t1 = float(ten[1])*0.0166666666667
t2 = float(ten[2])*0.000277777777778
if t0>=0:
return str (t0 + t1 + t2)
if t0<0:
return str (-(t0 - t1 - t2))
longtitude = "nil"
latitude = "nil"
elevation = "nil"
line1 = ""
line2 = ""
line3 = ""
#fetch NORAD site
url = 'http://celestrak.com/NORAD/elements/amateur.txt'
htmlOpener = urllib2.urlopen(url)
if(htmlOpener.code != 200): exit(0)
src = htmlOpener.read().splitlines()
if len(src) > 0:
count = 0
tleFile = open('tleData.txt', 'w')
for line in src:
if line.find('PRISM') != -1:
line1 = 'PRISM'
tleFile.write(line1 + "\n")
line2 = src[count+1]
tleFile.write(line2 + "\n")
line3 = src[count+2]
tleFile.write(line3)
# print line1
# print line2
# print line3
tleFile.close()
count+=1
else:
ocp = readFile()
line1 = ocp[0]
line2 = ocp[1]
line3 = ocp[2]
#timezone pref use UTC now
utcTime = datetime.datetime.utcnow()
sat = ephem.readtle(line1, line2, line3)
timeNow = utcTime.strftime("%Y/%m/%d %H:%M") #we don't concern in seconds
#print timeNow
sat.compute( timeNow )
longtitude = six2ten(str(sat.sublong))
latitude = six2ten(str(sat.sublat))
elevation = sat.elevation
print longtitude
print latitude
print elevation | [
"moxuse@gmail.com"
] | moxuse@gmail.com |
478be1a4393f7a842192c13ca3891affe3bfd213 | ac7b02f4e7aa751ee24ed8933276990c801e1e64 | /src/model/ndb_data.py | 3531079bcbbd60baa4bf9918d747393d594b8c6f | [] | no_license | emilia-sokol/gae_interpreter | 563ff02008229679933412ef7defe4f10f1bf02f | f1ba7feb46e6e072844859622b30e30a1cf0694a | refs/heads/master | 2022-12-18T16:49:49.339302 | 2020-09-16T17:55:17 | 2020-09-16T17:55:17 | 260,986,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from google.appengine.ext import ndb
# define data model
class Data(ndb.Expando):
# we store all data as key, type and value. key should be unique (but not necessarily, maybe key will be id of
# user that sends data or/and id of a device that sends data). by type we distinguish what kind of data is this
# e.g. temperature, text (this will be used possibly by mapper and reducer functions)
id = ndb.StringProperty(default="")
# user_id = ndb.StringProperty(default="")
# device_id = ndb.StringProperty(default="")
type = ndb.StringProperty(default="")
# we do most data processing on values, cannot be null
value = ndb.StringProperty(default="")
# time when data was added to the system
time = ndb.DateTimeProperty(auto_now_add=True)
| [
"emilia.sokol94@gmail.com"
] | emilia.sokol94@gmail.com |
f48a3d35ae9058c2debe98e42131250fe2204c6d | 8af54cf9b2f7edce0b5aa4fea5117ff4dc0ae4bf | /src/urls.py | e3e531e077ad7e7dd5b2bee85a323d7d3568877d | [] | no_license | chemalle/fopag | d9a3434ce83ebf215d54be825a531763f954b5e7 | d6d89aed7bed06bb0b88cac5efd037db4ed8f6b4 | refs/heads/master | 2020-03-10T08:15:19.225631 | 2018-04-12T16:30:34 | 2018-04-12T16:30:34 | 129,281,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | """fopag URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('folha.urls')),
url(r'^accounts/login/$', views.login, name='login'),
url(r'^accounts/logout/$', views.logout, name='logout', kwargs={'next_page': '/'}),
] + static (settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"chemalle@econobilidade.com"
] | chemalle@econobilidade.com |
61b3fc6cd55a2e79b4b00649321e1ced94c0e15e | e880fa35999c72c2a318679b35a6227906b32246 | /tests/test_lock.py | ff3177d9963915c7abf00e785e9855d7a923fd88 | [] | no_license | julianhille/play | 3f47837804dd35f4a0ef4698d3331cbabd15fd0f | dd35392b82e32899f5e53cc7745ca97fe64af45c | refs/heads/develop | 2021-01-18T18:43:51.056095 | 2016-03-09T23:22:41 | 2016-03-09T23:22:41 | 43,735,739 | 1 | 1 | null | 2016-03-09T23:22:42 | 2015-10-06T07:21:11 | Python | UTF-8 | Python | false | false | 874 | py | from play import lock
from pytest import raises
from bson import ObjectId
def test_lock_exception(mongodb):
with lock.lock(mongodb.directories, 'ddff19b92e21e1560a7dd000'):
with raises(lock.LockException) as context:
with lock.lock(mongodb.directories, 'ddff19b92e21e1560a7dd000'):
pass
assert 'ddff19b92e21e1560a7dd000' in str(context.value)
def test_lock(mongodb):
directory_id = ObjectId('ddff19b92e21e1560a7dd000')
assert '_lock' not in mongodb.directories.find_one({'_id': directory_id}), \
'There should be no lock'
with lock.lock(mongodb.directories, directory_id):
assert '_lock' in mongodb.directories.find_one({'_id': directory_id}), \
'Lock should be set'
assert '_lock' not in mongodb.directories.find_one({'_id': directory_id}), \
'Lock should be released'
| [
"j.hille484@gmail.com"
] | j.hille484@gmail.com |
b4a462eb1065b774dfb97d19519129fefdbe76c0 | 46841318b6701c314123e9ec5088a98c210e784f | /rest-tjlug/sphinx-test/conf.py | cba371c9d9d7c641c24e633de221acf59ef17767 | [] | no_license | ivivisoft/moonranger.github.com | c54330c954bb1b0ffc2b6542a2a6e104ccdcec64 | f988f2f86d507e641a43be2c13b8a2c6ed172667 | refs/heads/master | 2020-12-25T23:57:57.684523 | 2013-01-14T05:36:07 | 2013-01-14T05:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,138 | py | # -*- coding: utf-8 -*-
#
# Closure And Python Scoping Rule documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 15 14:53:29 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Closure And Python Scoping Rule'
copyright = u'2011, Jerry Peng'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ClosureAndPythonScopingRuledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': '''
\usepackage{xeCJK}
\setCJKmainfont[BoldFont=SimHei, ItalicFont=KaiTi_GB2312]{SimSun}
\setCJKmonofont[Scale=0.9]{Droid Sans Mono}
\setCJKfamilyfont{song}[BoldFont=SimSun]{SimSun}
\setCJKfamilyfont{sf}[BoldFont=SimSun]{SimSun}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ClosurePythonScopingRule.tex', u'Closure And Python Scoping Rule',
u'Jerry Peng', 'howto'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'closureandpythonscopingrule', u'Closure And Python Scoping Rule Documentation',
[u'Jerry Peng'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ClosureAndPythonScopingRule', u'Closure And Python Scoping Rule Documentation',
u'Jerry Peng', 'ClosureAndPythonScopingRule', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"pr2jerry@gmail.com"
] | pr2jerry@gmail.com |
97840c304aa4d2f2fe74d0ff7a6a068cdac7ae6f | 71e1ba58e9389744a75f3c3907e18a38a8a03f8a | /inpuy_whileloop/ex7.4.py | a10b8011affee3fb4a4e4edf9d7e8049ba48adc3 | [] | no_license | Rifat951/PythonExercises | e4c306edec418aa83aca928de1bc292cf49b0dd5 | 7cc07c1989d1b01e4e04edd29bdbfe8f1d1b3296 | refs/heads/main | 2023-09-01T09:28:10.530203 | 2023-08-30T02:52:25 | 2023-08-30T02:52:25 | 360,920,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # 7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of
# pizza toppings until they enter a 'quit' value As they enter each topping,
# print a message saying you’ll add that topping to their pizza
loop_flag = True
pizza = []
while loop_flag:
pizza_topping = input("Enter pizza topping : \n")
if pizza_topping == "quit":
loop_flag = False
print("Code has stopped working....")
else:
print("Continue adding toppings... \n")
pizza.append(pizza_topping)
print("\n")
for ingredients in pizza:
print(ingredients,end=" ")
| [
"noreply@github.com"
] | noreply@github.com |
4747167c7f1782dac3e0d0dab835efb9ff76fdf7 | eb841ad2854cbcb60aa75b1080573da6ae8e2a1c | /Evolife/Other/PathFinder/Walker.py | fa60ba6bb0377e643b1c56ffbf12f5f049dd5c17 | [] | no_license | tomMoral/pjld | a5aef7201a1ed5e666c9b71b9edaa77e00e8ddd2 | 436b027f1ae55a168ec96db98580ebcf3c9bcf34 | refs/heads/master | 2020-06-04T07:53:50.789582 | 2014-03-13T14:32:30 | 2014-03-13T14:32:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,518 | py | ##############################################################################
# EVOLIFE www.dessalles.fr/Evolife Jean-Louis Dessalles #
# Telecom ParisTech 2013 www.dessalles.fr #
##############################################################################
##############################################################################
# Walker #
##############################################################################
""" Path finding:
Individuals walk, and paths emerge
"""
import sys
from time import sleep
import random
import cmath
sys.path.append('..')
sys.path.append('../../..')
import Evolife.Scenarii.Parameters as EPar
import Evolife.Ecology.Observer as EO
import Evolife.Ecology.Individual as EI
import Evolife.Ecology.Group as EG
import Evolife.Ecology.Population as EP
import Evolife.QtGraphics.Evolife_Window as EW
import Evolife.Tools.Tools as ET
import Landscapes
print ET.boost() # significWalkerly accelerates python on some platforms
# two functions to convert from complex numbers into (x,y) coordinates
c2t = lambda c: (int(round(c.real)),int(round(c.imag))) # converts a complex into a couple
t2c = lambda (x,y): complex(x,y) # converts a couple into a complex
#################################################
# Aspect of Walkers and pheromone on display
#################################################
# WalkerAspect = ('black', 6)
# PheromonAspect = (17, 2)
WalkerAspect = ('white', 1)
PheromonAspect = ('white', 4)
class Walker_Observer(EO.Observer):
""" Stores global variables for observation
"""
def __init__(self, Scenario):
EO.Observer.__init__(self, Scenario)
self.CurrentChanges = [] # stores temporary changes
self.StepId = 0
self.recordInfo('CurveNames', [(3, 'Year (each Walker moves once a year on average)')])
def record(self, Info):
# stores current changes
# Info is a couple (InfoName, Position) and Position == (x,y)
self.CurrentChanges.append(Info)
def get_info(self, Slot):
" this is called when display is required "
if Slot == 'PlotOrders': return [(3, (self.StepId, self.StepId))] # curve
#elif Slot == 'PlotOrders': return [(3, (self.StepId, int(self.Statistics['Food']['average'][0])))]
# curve
else: return EO.Observer.get_info(self, Slot)
def get_data(self, Slot):
if Slot == 'Positions':
CC = self.CurrentChanges
# print CC
self.CurrentChanges = []
return tuple(CC)
else: return EO.Observer.get_data(self, Slot)
class LandCell(Landscapes.LandCell_3D):
""" Defines what's in one location on the ground
"""
# Cell content is defined as Pheromone
def __init__(self, InitContent=0):
self.VoidCell = 0
self.setContent(InitContent, Record=False)
def p(self, addendum=0):
if addendum: self.setContent(min(self.Content() + addendum, Gbl.Parameter('Saturation')))
return self.Content()
def evaporate(self):
# Pheromone evaporation should be programmed about here
Pher = self.p()
if Pher > 0:
self.p(-Pher * Gbl.Parameter('Evaporation')/1000.0) # Attractive Pheromone
if self.p() <= 1:
self.clean()
return True
return False
class Landscape(Landscapes.Landscape_3D):
""" A 2-D grid with cells that contains food or pheromone
"""
def Modify(self, (x,y), Modification):
self.Ground[x][y] += Modification # uses addition as redefined in LandCell
return self.Ground[x][y]
def pheromone(self, Pos, delta=0):
if delta: self.ActiveCells.append(Pos)
return self.Cell(Pos).p(delta) # adds attractive pheromone
def Altitude(self, Pos):
return self.Cell(Pos).Altitude
def evaporation(self):
for Pos in self.ActiveCells.list():
if self.Cell(Pos).evaporate(): # no pheromone left
# call 'erase' for updating display when there is no pheromone left
self.erase(Pos) # for ongoing display
self.ActiveCells.remove(Pos)
def erase(self, Pos):
" says to Observer that there is no pheromone left at that location "
Observer.record(('P%d_%d' % Pos, Pos + (-3,))) # negative colour means erase from display
def update_(self):
# scans ground for food and pheromone - May be used for statistics
Food = []
Pher = []
for (Position, Cell) in self.travel():
if Cell.Pheromone: Pher.append((Pos, Cell.p()))
return Pher
class Walker(EI.Individual):
""" Defines individual agents
"""
def __init__(self, Scenario, IdNb, Start=(0,0), Target=(100,100)):
EI.Individual.__init__(self, Scenario, ID=IdNb)
self.Start = Start # Starting position
self.Target = Target
NoPoints = Gbl.Parameter('NoStartpoints')
if NoPoints > 1:
Y = int(IdNb[1:]) % NoPoints
self.Start = (Start[0], 5 + int(Y * float(Land.Height - 10)/(NoPoints-1)))
self.Target = (Target[0], Land.Height -5 - int(Y * float(Land.Height - 10)/(NoPoints-1)))
# print self.Start, self.Target
self.location = self.Start
self.Path = []
self.PreviousPathLength = 0
self.Deposit = Gbl.Parameter('Deposit') # quantity of pheromone laid down by the agent
self.Action = 'Move' # agents moves toward target, or back home when value is 'BackHome'
self.moves()
def Sniff(self):
" Looks for the next place to go "
DirectionToTarget = cmath.phase(t2c(self.Target) - t2c(self.location)) # argument
DirectionToTarget = ET.noise_add(DirectionToTarget, 0.02*cmath.pi * Gbl.Parameter('Noise'))
# Distance0 = abs(DirectionToTarget)
Neighbourhood = Land.neighbours(self.location, Gbl.Parameter('SniffingDistance'))
random.shuffle(Neighbourhood) # to avoid anisotropy
acceptable = None
best = -Gbl.Parameter('Saturation') # best == pheromone balance found so far
for NewPos in Neighbourhood:
if NewPos == self.location: continue
# Target attractiveness
Direction = cmath.phase(t2c(NewPos) - t2c(self.location))
Value = Gbl.Parameter('TargetAttractiveness') * abs(cmath.pi - abs(DirectionToTarget - Direction))
# looking for position with pheromone
# attractiveness of pheromone
Value += Gbl.Parameter('PheromoneAttractiveness') * float(Land.pheromone(NewPos)) / Gbl.Parameter('Saturation')
# Value += Gbl.Parameter('PheromoneAttractiveness') * (Land.pheromone(NewPos))
# aversion to climbing
Value -= Gbl.Parameter('SlopeAversion') * abs(Land.Altitude(NewPos) - Land.Altitude(self.location))
if Value > best:
acceptable = NewPos
best = Value
return acceptable
def pathPurge(self):
" Eliminates loops from Path "
NewPath = []
Positions = []
for step in self.Path:
if step[0] in Positions:
del NewPath[Positions.index(step[0]):]
del Positions[Positions.index(step[0]):]
NewPath.append(step)
Positions.append(step[0])
self.Path = NewPath
def checkTarget(self):
" Path are reinforced when target reached "
if abs(t2c(self.Target) - t2c(self.location)) <= Gbl.Parameter('SniffingDistance'):
# Target has been reached
# Computing the quantity of pheromone that will be laid down on the way back home
self.pathPurge()
ApparentLength = len(self.Path)
ApparentLength += Gbl.Parameter('SlopeAversion') * (sum(map(lambda x: x[1], self.Path)))
ApparentLength += Gbl.Parameter('PheromoneAttractiveness') * (sum(map(lambda x: x[2], self.Path)))
if self.PreviousPathLength == 0: self.PreviousPathLength = ApparentLength
if ApparentLength > self.PreviousPathLength:
self.Deposit = Gbl.Parameter('Deposit') * (1 - Gbl.Parameter('DepositVariation')/100.0)
elif ApparentLength < self.PreviousPathLength:
self.Deposit = Gbl.Parameter('Deposit') * (1 + Gbl.Parameter('DepositVariation')/100.0)
return True
return False
def moves(self):
""" Basic behavior: move by looking for neighbouring unvisited cells.
If food is in sight, return straight back home.
Lay down negative pheromone on visited cells.
Lay down positive pheromone on returning home.
"""
if self.Path:
if self.Action == 'Move' and self.checkTarget():
self.Action = 'BackHome' # agent will move back home
else:
self.location = self.Start
self.Action = 'Move' # agents moves toward target
if self.Action == 'BackHome':
self.location = self.Path.pop()[0]
Observer.record((self.ID, self.location + WalkerAspect)) # for ongoing display of Walkers
# marking current positon as interesting with pheromone
Land.pheromone(self.location, self.Deposit)
# ongoing display of positive pheromone
Observer.record(('P%d_%d' % self.location, self.location + PheromonAspect))
else:
NextPos = self.Sniff()
if NextPos is None or random.randint(0,100) < Gbl.Parameter('Exploration'):
# either all neighbouring cells have been visited or in the mood for exploration
E = Gbl.Parameter('SniffingDistance')
NextPos = c2t(t2c(self.location) + complex(random.randint(-E,E),random.randint(-E,E)))
NextPos = Land.ToricConversion(NextPos)
self.Path.append((NextPos, max(0, Land.Altitude(NextPos) - Land.Altitude(self.location)),
1*(Land.pheromone(NextPos) < Gbl.Parameter('ForwardThreshold'))))
self.location = NextPos
Observer.record((self.ID, self.location + WalkerAspect)) # for ongoing display of Walkers
# marking current positon as visited with pheromone
Land.pheromone(self.location, Gbl.Parameter('ForwardDeposit'))
Observer.record(('P%d_%d' % self.location, self.location + PheromonAspect))
class Group(EG.Group):
# The group is a container for individuals.
# Individuals are stored in self.members
def __init__(self, Scenario, Start=(0,0), Target=(100,100), ID=1, Size=100):
self.Start = Start
self.Target = Target
EP.Group.__init__(self, Scenario, ID=ID, Size=Size)
def createIndividual(self, ID=None, Newborn=True):
# calling local class 'Individual'
return Walker(self.Scenario, self.free_ID(Prefix='A'), Start=self.Start, Target=self.Target) # call to local class 'Walker'
class Population(EP.Population):
" defines the population of agents "
def __init__(self, Scenario, Observer, Start=(0,0), Target=(100,100)):
self.Start = Start
self.Target = Target
EP.Population.__init__(self, Scenario, Observer)
" creates a population of Walker agents "
self.Moves = 0 # counts the number of times agents have moved
def createGroup(self, ID=0, Size=0):
return Group(self.Scenario, Start=self.Start, Target=self.Target, ID=ID, Size=Size) # Call to local class 'Group'
def One_Decision(self):
""" This function is repeatedly called by the simulation thread.
One Walker is randomly chosen and decides what it does
"""
Walker = self.selectIndividual()
Walker.moves()
self.Moves += 1
Newyear = self.Moves // self.popSize # One step = all Walkers have moved once on average
if Newyear > self.year:
EP.Population.one_year(self) # performs statistics
Land.evaporation()
self.year = Newyear
return True # This value is forwared to "ReturnFromThread"
if __name__ == "__main__":
print __doc__
#############################
# Global objects #
#############################
Gbl = EPar.Parameters('_Params.evo') # Loading global parameter values
Observer = Walker_Observer(Gbl) # Observer contains statistics
Observer.recordInfo('FieldWallpaper', Gbl.Parameter('Ground'))
# Observer.recordInfo('FieldWallpaper', 'white')
Observer.recordInfo('DefaultViews', ['Field'])
Land = Landscape(AltitudeFile=Gbl.Parameter('Altitudes'), CellType=LandCell)
Startpoint = Gbl.Parameter('Startpoint').split('x')
Startpoint = ((int(Startpoint[0]) * Land.Width) // 100, (int(Startpoint[1]) * Land.Height) // 100)
Endpoint = Gbl.Parameter('Endpoint').split('x')
Endpoint = ((int(Endpoint[0]) * Land.Width) // 100, (int(Endpoint[1]) * Land.Height) // 100)
Pop = Population(Gbl, Observer, Start=Startpoint, Target=Endpoint) # Walker colony
print Land.Width, Land.Height
Observer.record(('Dummy',(Land.Width, Land.Height, 0, 1))) # to resize the field
# Observer.record(('Dummy',(Gbl.Parameter('LandSize'), Gbl.Parameter('LandSize'), 0, 1))) # to resize the field
EW.Start(Pop.One_Decision, Observer, Capabilities='RPC')
print "Bye......."
sleep(1.0)
## raw_input("\n[Return]")
__author__ = 'Dessalles'
| [
"thierry.deo@polytechnique.edu"
] | thierry.deo@polytechnique.edu |
de545d59d6cef1c31510ec5ae3cc05ddf7c17e52 | 6d37e22d0c7d5cb3a82468df166f680c645f72f8 | /palindromo.py | a92f659ae521f5a28a6f51450a16e11dad204f3c | [] | no_license | aldojaimejuarez/codigo_python | d15c5e64bbfc930a2ed99a39d91208e97e6cd11f | d422eeb35343a932547de5c66521760112542257 | refs/heads/master | 2023-01-02T03:32:01.462992 | 2020-10-09T19:38:29 | 2020-10-09T19:38:29 | 302,126,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | def palindromo(palabra):
palabra = palabra.replace(" ", "")
palabra = palabra.lower()
palabra_invertida = palabra[::-1]
if palabra == palabra_invertida:
return True
else:
return False
def run():
palabra = input("Escribe una palabra: ")
es_palindromo = palindromo(palabra)
if es_palindromo == True:
print("Es Palíndromo")
else:
print("No es Palíndromo")
if __name__ == "__main__":
run()
# def es_palindromo(palabra):
# palabra = palabra.replace(' ', '').lower()
# if palabra[::] == palabra[::-1]:
# return True
# else:
# return False
# def run():
# palabra = input('Ingrese una palabra: ')
# if es_palindromo(palabra):
# print('Es palindromo')
# else:
# print('No es palindromo')
# if __name__ == "__main__":
# run() | [
"aldojaimejuarez@gmail.com"
] | aldojaimejuarez@gmail.com |
4152db7cdda80a51d3094365275463da46565dfd | a3b14bb98d12ac70c57ef5e8b43db52a6472b98f | /reply.py | 3bc22704e99d8a56ec3f17a417da5855a31e21c4 | [] | no_license | zhouqiw/itchatdemo | 4176241fcf875dcc212e6b69e0dc66aa902aa417 | de6093845e36c15f4e4f17456da7d9b8a442686d | refs/heads/master | 2021-04-30T16:03:49.595203 | 2019-01-14T06:06:25 | 2019-01-14T06:06:25 | 121,252,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # encoding: utf-8
"""
@author: zhouqi
@software: PyCharm
@file: reply.py
@time: 2017/5/26 上午10:24
"""
import requests
import itchat
KEY = '8edce3ce905a4c1dbb965e6b35c3834d'
def get_response(msg):
apiUrl = 'http://www.tuling123.com/openapi/api'
data = {
'key' : KEY,
'info' : msg,
'userid' : 'wechat-robot',
}
try:
r = requests.post(apiUrl, data=data).json()
return r.get('text')
except:
return
@itchat.msg_register(itchat.content.TEXT)
def tuling_reply(msg):
defaultReply = 'I received: ' + msg['Text']
reply = get_response(msg['Text'])
return reply or defaultReply
itchat.auto_login(hotReload=True)
itchat.run() | [
"931384927@qq.com"
] | 931384927@qq.com |
9a516cde0db5717278caa1da9352d130580082fa | ae9e91c2f5d79facf6938eaf0d99d2916afa1b42 | /preprocess/04_calculate_weights.py | 2a2e6b4b15fd5eefb38e34621904c519c76d3845 | [] | no_license | nofreewill42/bms | a0857b351fde362ea7653f703ef111c157c1eae0 | 286c55cb0bf76c6fa99c294315beed8646f88c21 | refs/heads/master | 2023-05-03T16:36:02.270070 | 2021-05-27T19:15:49 | 2021-05-27T19:15:49 | 351,814,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import numpy as np
import pandas as pd
from pathlib import Path
if __name__ == '__main__':
ds_path_str = Path('data_path.txt').read_text()
ds_path = Path(ds_path_str)
processed_labels_path = ds_path / 'train_labels_processed.csv' # input
label_weights_path = ds_path / 'train_labels_weights.csv' # output
# Do the job
print(f'Reading processed labels - {processed_labels_path}')
train_df = pd.read_csv(processed_labels_path, low_memory=False)
train_df.fillna('', inplace=True)
print('Complexity')
complexity_df = train_df.iloc[:, 14].apply(lambda x: x.count('-') + x.count('-') * x.count('(') + x.count('('))
print('Atom counts')
atom_counts_df = train_df.iloc[:, 1:13].sum(axis=1)
print('Atom (non)rarity')
atom_present = (train_df.iloc[:, 1:13] > 0)
atom_rarity_df = atom_present/(1+atom_present.sum(axis=0))
atom_not_present = (~atom_present)
atom_non_rarity_df = atom_not_present/(1+atom_not_present.sum(axis=0))
atom_rarity_df = (atom_rarity_df + atom_non_rarity_df).sum(axis=1)
print('Layer (non)rarity')
layer_present = (train_df.iloc[:, 14:] != '') # not # ih,ib,it,im,is are not considered
layer_rarity_df = layer_present/(1+layer_present.sum(axis=0))
layer_not_present = (~layer_present)
layer_non_rarity_df = layer_not_present/(1+layer_not_present.sum(axis=0))
layer_rarity_df = (layer_rarity_df + layer_non_rarity_df).sum(axis=1)
print(f'Saving to {label_weights_path}')
weights_df = pd.concat([train_df['image_id'], complexity_df, atom_counts_df, atom_rarity_df, layer_rarity_df], axis=1)
weights_df.columns = ['image_id', 'complexity', 'atom_count', 'atom_rarity', 'layer_rarity']
weights_df.to_csv(label_weights_path, index=False) | [
"jonavan01@gmail.com"
] | jonavan01@gmail.com |
578ce09d09ffd5291d367e600f74d286a4369d07 | d9f5f5f95c1aefa3674c116ba6b66d520914e832 | /Shared/Build/Windows/Win32/bin/rsvg | 7c0c71495b1fe63d5cdbbfa640a1d1af9ca88ebb | [] | no_license | zsx/ossbuild | b7e686b8c8f82a83d945a1857157822e330eaec4 | a510b4fb1b15922c14dd6311a24f139b8a790699 | refs/heads/master | 2016-09-05T17:53:52.556306 | 2010-03-05T01:45:42 | 2010-03-05T01:45:42 | 548,912 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | #!/usr/bin/env python
#
# This python script subsumes the old 'rsvg' C-based command-line program.
# It should be considered deprecated in favor of 'rsvg-convert'.
#
import getopt, sys, os
def usage():
print >> sys.stdout, """Usage: rsvg [-v?] [-d|--dpi-x <float>] [-p|--dpi-y <float>]
[-x|--x-zoom <float>] [-y|--y-zoom <float>] [-w|--width <int>]
[-h|--height <int>] [-q|--quality <int>] [-f|--format [png, jpeg]]
[-v|--version] [-?|--help] [--usage] [OPTIONS...] file.svg file.png"""
def help():
print >> sys.stdout, """Usage: rsvg [OPTIONS...] file.svg file.png
-d, --dpi-x=<float> pixels per inch
-p, --dpi-y=<float> pixels per inch
-x, --x-zoom=<float> x zoom factor
-y, --y-zoom=<float> y zoom factor
-w, --width=<int> width
-h, --height=<int> height
-q, --quality=<int> JPEG quality
-f, --format=[png, jpeg] save format
-v, --version show version information
Help options:
-?, --help Show this help message
--usage Display brief usage message
""",
def shellEscape(param):
"""Escape a string parameter for the shell."""
return "'" + param.replace("'", "'\\''") + "'"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "d:p:x:y:w:h:q:f:v?", ["dpi-x=", "dpi-y=", "x-zoom=", "y-zoom=", "width=", "height=", "quality=", "format=", "version", "usage"])
except getopt.GetoptError:
help()
sys.exit(1)
command_str = ""
for o, a in opts:
if o in ("-v", "--version"):
print "rsvg version %s" % ("2.26.0")
sys.exit(0)
elif o in ("--usage"):
usage()
sys.exit(0)
elif o in ("-?", "--help"):
help()
sys.exit(0)
elif (o in ("-f", "--format")):
if a in ("jpg", "jpeg"):
print >> sys.stderr, "The JPEG output format is no longer supported"
sys.exit(1)
elif (o in ("-q", "--quality")):
print "The --quality option is no longer supported"
sys.exit(1)
else:
command_str += " " + shellEscape(o) + " " + shellEscape(a)
if len(args) != 2:
help()
sys.exit(1)
return os.system("%s %s -o %s %s" % (shellEscape(os.path.join("/D/OSSBuild/Build/Windows/Win32/Release", "bin", "rsvg-convert")), command_str, shellEscape(args[1]), shellEscape(args[0])))
if __name__ == "__main__":
main()
| [
"david.g.hoyt@72a73a14-1cf0-11de-a027-6f5b8e30e268"
] | david.g.hoyt@72a73a14-1cf0-11de-a027-6f5b8e30e268 | |
d00dbd97b58fc1d1199f2fc36746e9223ddfeea0 | 39b0d9c6df77671f540c619aff170441f953202a | /default program/descriptor_method1.py | 18d5b5dc9831d6210a3cfa6fd591f3a965cd7de1 | [] | no_license | yeboahd24/Python201 | e7d65333f343d9978efff6bf86ce0447d3a40d70 | 484e66a52d4e706b8478473347732e23998c93c5 | refs/heads/main | 2023-02-06T10:24:25.429718 | 2020-12-26T01:08:04 | 2020-12-26T01:08:04 | 306,487,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #!usr/bin/env/python3
class DescriptorClass(object):
"""All know that descriptor attributes should be in the class not the __init__
instance--> this is the instance of your class, so in this case test and test1 becomes our instance
owner--> this the name of class of the instance, ClientClass is now our owner here
"""
def __get__(self, instance, owner):
if instance is None: # don't forget to add this
return f"{self.__class__.__name__}.{owner.__name__}"
return f"value for {instance}"
class ClientClass(object):
descriptor = DescriptorClass()
test = ClientClass.descriptor # calling ClientClass directly
test1 = ClientClass().descriptor
print(test)
print(test1)
| [
"noreply@github.com"
] | noreply@github.com |
7e3abe5ff2836f61260cff4e091e0e15a6e5aa06 | 0966fc5e479f7dd86683fd2d961e44bb4f71a614 | /splatify/views.py | 8b506aeb27b9322d1943be7e2675565ce5510105 | [] | no_license | micnem/splatify2 | 112972616f6216598791df6b025c2de7be020281 | a90328fbf79667ebe10a028a66c49334c840ae57 | refs/heads/main | 2023-02-10T11:20:48.570326 | 2021-01-06T14:14:08 | 2021-01-06T14:14:08 | 327,318,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | from django.shortcuts import render
from .spopulate import get_top_artists, create_playlist, match, main
from .models import *
def check_profile(profile):
if not profile.populated:
get_top_artists(profile)
def homepage(request):
return render(request, 'homepage.html')
def room(request):
check_profile(request.user.profile)
users = User.objects.all()
return render(request, 'room.html', {'users': users})
def show_top_artists(request):
return render(request,'top_artists.html')
def splat(request, user_id):
user2 = User.objects.get(id=user_id)
master_list = match([request.user, user2])
playlist_id = main(master_list, request.user.profile, user2)
return render(request, 'result.html', {'playlist_id':playlist_id})
def play(request, playlist_id):
return render(request, 'play.html', {'playlist_id':playlist_id}) | [
"michael.nemni@gmail.com"
] | michael.nemni@gmail.com |
1b8c7a053a6d9a3d7531f29790c6c674faeec052 | c17f1109589f94c1d5d00a0cc435d5f374008e4b | /cs181-s20-homeworks/hw3/T3_P2.py | d02ef9dfbe8bf93ec427d1393a555fda7e9dd102 | [] | no_license | mgbvox/sulaiman_a | e7b2a36ee9740e747c5ffc5a60d280d3bf6a988b | 85bfecbc1f0e2fe69b84677ffa47662519109fa1 | refs/heads/master | 2021-01-16T10:07:31.164600 | 2020-02-25T18:46:54 | 2020-02-25T18:46:54 | 243,075,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | import numpy as np
# might need to install
import torch
# parameters
N = 2000
M = 100
H = 75
# generate data
np.random.seed(181)
W1 = np.random.random((H, M))
b1 = np.random.random(H)
W2 = np.random.random(H)
b2 = np.random.random(1)
X = np.random.random((N, M))
y = np.random.randint(0,2,size=N).astype('float')
# torch copies of data
tW1 = torch.tensor(W1, requires_grad=True)
tb1 = torch.tensor(b1, requires_grad=True)
tW2 = torch.tensor(W2, requires_grad=True)
tb2 = torch.tensor(b2, requires_grad=True)
tX = torch.tensor(X)
ty = torch.tensor(y)
# CAREFUL: if you run the code below w/o running the code above,
# the gradients will accumulate in the grad variables. Rerun the code above
# to reset
# torch implementation
def tforward(X):
z1 = (torch.mm(tX, tW1.T) + tb1).sigmoid()
X = (torch.mv(z1, tW2) + tb2).sigmoid()
return X
tyhat = tforward(tX)
L = (ty * tyhat.log()) + (1-ty) * (1 - tyhat).log()
# the magic of autograd!
L.sum().backward()
# the gradients will be stored in the following variables
grads_truth = [tW1.grad.numpy(), tb1.grad.numpy(), tW2.grad.numpy(), tb2.grad.numpy()]
# Utils
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# use this to check your implementation
# you can pass in grads_truth as truth and the output of get_grads as our_impl
def compare_grads(truth, our_impl):
for elt1, elt2 in zip(truth, our_impl):
if not np.allclose(elt1, elt2, atol=0.001, rtol=0.001):
return False
return True
# Implement the forward pass of the data. Perhaps you can return some variables that
# will be useful for calculating the gradients.
def forward(X):
return X
# Code the gradients you found in part 2.
# Can pass in additional arguments
def get_grads(y, yhat, X):
dLdb2 = None
dLdW2 = None
dLdb1 = None
dLdW1 = None
# make sure this order is kept for the compare function
return [dLdW1, dLdb1, dLdW2, dLdb2]
| [
"mgbvox@gmail.com"
] | mgbvox@gmail.com |
ae455af0b625d88efc701edf5ecb73630079c44e | 80b05048a9cedbd49b6d42cc0449432e1dd0a950 | /ics_tool/models.py | f4eb94900946a8f99ef3827eef6232fedaf8cd13 | [] | no_license | SamanthaNaraharisetti/edm_present | c4ede571980ed6e17027fec9425b9717f9331342 | 1f6369d820236f02f80c4992800dc247167ff95f | refs/heads/master | 2021-08-19T17:47:33.370862 | 2017-11-27T04:14:16 | 2017-11-27T04:14:16 | 112,136,068 | 0 | 0 | null | 2017-11-27T02:13:57 | 2017-11-27T02:13:57 | null | UTF-8 | Python | false | false | 4,413 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
# Database Tables should be defined as class object
class Customer(models.Model):
CustomerId = models.IntegerField(unique=True)
CustomerName = models.CharField(max_length=200)
class Donors(models.Model):
OrganizationName = models.CharField(max_length=100)
Salutation = models.CharField(max_length=100)
FirstName = models.CharField(max_length=100)
LastName = models.CharField(max_length=100)
Email = models.EmailField(max_length=100)
PhoneNumber = models.CharField(max_length=15)
Comments = models.CharField(max_length=100)
StreetAddress = models.CharField(max_length=100)
City = models.CharField(max_length=100)
State = models.CharField(max_length=100)
Zip = models.CharField(max_length=100)
ICS = models.CharField(max_length=2)
class SearchDonor(models.Model):
SearchQuery = models.CharField(max_length=255)
class DonationsLog(models.Model):
logID = models.IntegerField(primary_key=True)
lastModifidDate = models.DateField()
lastChangedBy = models.CharField(max_length=100)
class Donations(models.Model):
donationID =models.IntegerField(primary_key=True)
donation_date=models.DateField()
additional_comments=models.CharField(max_length=100)
dontype=models.CharField(max_length=20)
class EBDGFOOD(models.Model):
EDBGEvent_foodID= models.IntegerField(primary_key=True)
ServingsPerGallon=models.IntegerField()
EBDGtype=models.CharField(max_length=20)
RestaurantName=models.CharField(max_length=100)
class EMPTYBOWLFOOD(models.Model):
EmptyBowlEvent_foodID=models.IntegerField(primary_key=True)
food_description=models.CharField(max_length=100)
quantity=models.IntegerField()
totalServings=models.IntegerField()
class FOOD(models.Model):
foodEntryID=models.IntegerField(primary_key=True)
foodtype=models.CharField(max_length=100)
avgCost=models.IntegerField()
totalPounds=models.IntegerField()
totalValue=models.IntegerField()
NoPounds=models.IntegerField()
ServingsperPound=models.IntegerField()
class FOODCATEGORY(models.Model):
donationID=models.ForeignKey(Donations, on_delete=models.CASCADE)
categoryID=models.IntegerField()
class FOODENTRY(models.Model):
CategoryID=models.ForeignKey(FOODCATEGORY, on_delete=models.CASCADE)
foodEntryID=models.IntegerField(primary_key=True)
class FOODCATEGORYDESC(models.Model):
categoryID=models.ForeignKey(FOODCATEGORY, on_delete=models.CASCADE)
description=models.CharField(max_length=100)
categoryName=models.CharField(max_length=100)
class FUNDRAISINGEVENTS(models.Model):
eventID=models.IntegerField(primary_key=True)
estimatedValue=models.IntegerField()
receivedDate=models.DateField()
location=models.CharField(max_length=100)
class GOLF(models.Model):
eventID=models.ForeignKey(FUNDRAISINGEVENTS, on_delete=models.CASCADE)
dtype=models.CharField(max_length=100)
class ITEMS(models.Model):
donationID=models.ForeignKey(Donations, on_delete=models.CASCADE)
description=models.CharField(max_length=100)
isack_sent=models.BooleanField(default=False)
approxValue=models.IntegerField()
class MONETARY (models.Model):
donationID=models.ForeignKey(Donations, on_delete=models.CASCADE)
amount=models.IntegerField()
modeOfPayment=models.CharField(max_length=100)
class Reports(models.Model):
reportID=models.IntegerField(primary_key=True)
filename=models.CharField(max_length=100)
reportgen_date=models.DateField()
class ServiceEvents(models.Model):
service_detailID=models.IntegerField(primary_key=True)
date_of_service=models.DateField()
serviceID=models.IntegerField()
class EBDGRAFFLE(models.Model):
eventID=models.ForeignKey(FUNDRAISINGEVENTS, on_delete=models.CASCADE)
donation=models.CharField(max_length=100)
class EMPTYBOWLRAFFLEAUCTION(models.Model):
eventID=models.ForeignKey(FUNDRAISINGEVENTS, on_delete=models.CASCADE)
description=models.CharField(max_length=100)
item=models.CharField(max_length=100)
status=models.CharField(max_length=100)
receivedDate=models.DateField()
| [
"vvijayakumar@localhost.localdomain"
] | vvijayakumar@localhost.localdomain |
7d11b2810249e52b844e73ddf8a67f64a89b8962 | 23b6817f82389f4c68c2af537a3ab7a9e043324d | /env/bin/pip3.8 | added22061a2d7cd401f74a5f3594f396f6ca10b | [] | no_license | mirroronthesun/django_project | 1e8516e2d7d94bc23734020502206371fc7e6c4c | 7d817e80817f952c5623ac5c3f3fc109277b5d44 | refs/heads/main | 2023-01-10T09:16:47.557733 | 2020-11-15T14:39:34 | 2020-11-15T14:39:34 | 311,956,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | 8 | #!/Users/mirroronthesun/Desktop/env/hoonstagram/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dydgns1210@naver.com"
] | dydgns1210@naver.com |
15fe944f077a01f102bbb05813f6781c3866b114 | 77a806373b88919b15359c3a8d2c002e44e51a96 | /fleet_extend/models/fleet_vehicle_cost.py | 597b2a0acf44519ad628947cf6f0b3e30fa33550 | [] | no_license | Odoo-Pakistan/fleet | c7bef3f9a828306191a92fc109183dac5b01799d | 652345d635889be7878df3aa415f597476591cef | refs/heads/master | 2020-05-22T18:30:03.237394 | 2016-02-07T12:08:56 | 2016-02-07T12:08:56 | 84,714,355 | 1 | 0 | null | 2017-03-12T09:28:48 | 2017-03-12T09:28:48 | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
from openerp import models, api
class FleetVehicleCost(models.Model):
_inherit = 'fleet.vehicle.cost'
@api.multi
def unlink(self):
for obj in self:
if obj.odometer_id:
obj.odometer_id.sudo().unlink()
return super(FleetVehicleCost, self).unlink() | [
"nemanja-d@hotmail.com"
] | nemanja-d@hotmail.com |
06cb5bb45d0c1544fb7c7734354aae5ec1efeb1d | cb60c614d5bda6d298669fde88da1c8cb36a1dbd | /Lab2/skrypt.py | e19a5d8ef2a89003a73c817adc61b98d87f8653f | [] | no_license | karosroczyk/DataAnalitics | 2b73c34d073d848f14564973d056c3b275821447 | 4c2b488dc57545cd321d72fc19786baf3aebe744 | refs/heads/master | 2022-11-07T04:16:23.977017 | 2020-06-08T23:46:53 | 2020-06-08T23:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,806 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 27 16:35:05 2019
@author: Lenovo
"""
import psycopg2 as pg
import pandas.io.sql as psql
import pandas as pd
connection = pg.connect(host='localhost', port=5432, dbname='postgres', user='postgres', password='*********')
# 1) Ile kategorii filmów mamy w wypożyczalni?
df = pd.read_sql_query("select count(category_id) from category",con=connection)
print(df)
# 2) Wyświetl listę kategorii w kolejności alfabetycznej.
df = pd.read_sql_query("select name from category order by name",con=connection)
print(df)
# 3) Znajdź najstarszy i najmłodszy film do wypożyczenia.
df = pd.read_sql_query("select title, release_year from film where release_year = (select min(release_year) from film)",con=connection)
print(df)
df = pd.read_sql_query("select title, release_year from film where release_year = (select max(release_year) from film)",con=connection)
print(df)
#4) Ile wypożyczeń odbyło się między 2005-07-01 a 2005-08-01
df = pd.read_sql_query("select count(rental_date) from rental where rental_date between'2005-07-01' and '2005-08-01'",con=connection)
print(df)
# 5) Ile wypożyczeń odbyło się między 2010-01-01 a 2011-02-01
df = pd.read_sql_query("select count(rental_date) from rental where rental_date between'2010-01-01' and '2011-02-01'",con=connection)
print(df)
#6) Znajdź największą płatność wypożyczenia
df = pd.read_sql_query("select max(amount) from payment",con=connection)
print(df)
# 7) Znajdź wszystkich klientów z Polski, Nigerii lub Bangladeszu.
df = pd.read_sql_query("select first_name, last_name from customer where address_id in(select address_id from address where city_id in(select city_id from city where country_id in (select country_id from country where country in('Poland', 'Bangladesh', 'Nigeria'))))",con=connection)
print(df)
# 8) Gdzie mieszkają członkowie personelu?
df = pd.read_sql_query("select country from country where country_id in(select country_id from city where city_id in(select city_id from address where address_id in(select address_id from staff)))",con=connection)
print(df)
# 9) Ilu pracowników mieszka w Argentynie lub Hiszpanii?
df = pd.read_sql_query("select count(staff_id) from staff where address_id in(select address_id from address where city_id in(select city_id from city where country_id in (select country_id from country where country in ('Spain', 'Argentina'))))",con=connection)
print(df)
# 10) Jakie kategorie filmów zostały wypożyczone przez klientów?
df = pd.read_sql_query("select name from category where category_id in(select category_id from film_category where film_id in(select film_id from inventory where inventory_id in(select inventory_id from rental where customer_id in(select customer_id from customer))))",con=connection)
print(df)
# 11) Znajdź wszystkie kategorie filmów wypożyczonych w Ameryce.
df = pd.read_sql_query("select name from category where category_id in(select category_id from film_category where film_id in(select film_id from inventory where inventory_id in(select inventory_id from rental where customer_id in(select customer_id from customer where address_id in(select address_id from address where city_id in(select city_id from city where country_id in(select country_id from country where country = 'United States')))))))",con=connection)
print(df)
# 12) Znajdź wszystkie tytuły filmów, w których grał: Olympia Pfeiffer lub Julia Zellweger lub Ellen Presley
df = pd.read_sql_query("select title from film where film_id in(select film_id from film_actor where actor_id in(select actor_id from actor where first_name = 'Olympia' and last_name = 'Pfeiffer' or first_name = 'Julia' and last_name = 'Zellweger' or first_name = 'Ellen' and last_name = 'Presley' ))",con=connection)
print(df)
| [
"50304244+karosroczyk@users.noreply.github.com"
] | 50304244+karosroczyk@users.noreply.github.com |
ea01bc41999069dc12407183ad60c1d5114970d9 | ab66ad0c4148982fea5d9ccb1473f0c6b4786812 | /opencv-project/volume_control.py | 4f3ca78f2f0ea7385a3d17504ad74a70c30b834b | [] | no_license | shivamsatyam/opencv_ai | e097384f506f9957ca93edda4c8f28d03269ddaf | 38bd2ac981c9555b2bd9a07c12df90dde956bec6 | refs/heads/main | 2023-07-25T12:38:39.705819 | 2021-09-04T18:04:44 | 2021-09-04T18:04:44 | 403,116,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | import cv2
import mediapipe as mp
import time
import hand_tracking_module as htm
import math
from ctypes import cast,POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities,IAudioEndpointVolume
from numpy import interp
wCam,hCam = 800,600
capture = cv2.VideoCapture('https://26.151.102.175:8080/video')
# capture.set(3,wCam)
# capture.set(4,hCam)
pTime = 0
detector = htm.handDetector(detectionCon=0.7)
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_,CLSCTX_ALL,None)
volume = cast(interface,POINTER(IAudioEndpointVolume))
#volume.GetMute()
#volume.GetMasterVolumeLevel()
#print(volume.GetVolumeRange()) #(-65.25, 0.0, 0.03125)
volRange = volume.GetVolumeRange()
#volume.SetMasterVolumeLevel(0,None)
minVol = volRange[0]
maxVol = volRange[1]
vol = 0
volBar = 400
volPer = 0
while True:
success,img = capture.read()
img = cv2.resize(img,(800,600),interpolation=cv2.INTER_AREA)
img = detector.findHands(img)
lmList = detector.findPosition(img,draw=False)
if len(lmList)!=0:
x1,y1 = lmList[4][1],lmList[4][2]
x2,y2 = lmList[8][1],lmList[8][2]
cx,cy = (x1+x2)//2 , (y1+y2)//2
cv2.circle(img,(x1,y1),15,(255,0,255),cv2.FILLED)
cv2.circle(img,(x2,y2),15,(255,0,255),cv2.FILLED)
cv2.line(img,(x1,y1),(x2,y2),(255,0,255),3)
cv2.circle(img,(cx,cy),15,(255,0,255),cv2.FILLED)
length = math.hypot(x2-x1,y2-y1)
# print(length)
# Hand Range 50-300
# volume Range -65 - 0
vol = interp(length,[50,250],[minVol,maxVol])
volBar = interp(length,[50,250],[400,150])
volPer = interp(length,[50,250],[0,100])
volume.SetMasterVolumeLevel(vol,None)
if length<50:
print('circle')
cv2.circle(img,(cx,cy),15,(0,255,0),cv2.FILLED)
cv2.rectangle(img,(50,150),(85,400),(0,0,255))
cv2.rectangle(img,(50,int(volBar)),(85,400),(0,0,255),cv2.FILLED)
cv2.putText(img,f"{int(volPer)}%",(40,450),cv2.FONT_HERSHEY_PLAIN,1,(0,0,255),3)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,f"{int(fps)}",(20,70),cv2.FONT_HERSHEY_PLAIN,3,(0,255,0),3)
cv2.imshow('image',img)
cv2.waitKey(1)
| [
"noreply@github.com"
] | noreply@github.com |
970b268424584e832ec5d02c64aaaf2236a84cc0 | 591aabcbf7dc6d4aefbc78a43e4c5c26e5443d6d | /oj/25298.py | ba0432ca1ae01d2b56a34379a490b7ec3068c752 | [] | no_license | liujiboy/Python_Course | adbafbc7401cabd9903c673fd65bfce1f03dacdf | e336c36ccd5e1123af9195146037e9736e6a84d6 | refs/heads/master | 2020-03-31T02:19:44.196840 | 2019-04-08T04:34:27 | 2019-04-08T04:34:27 | 151,818,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | while True:
s=input()
l=[]
for n in range(len(s)//2):
l.append(s[2*n+1])
l.append(s[2*n])
if len(s)%2==1:
l.append(s[-1])
print("".join(l)) | [
"liujiboy@163.com"
] | liujiboy@163.com |
ce23396e1af092c87dabfa94de0f5dfb48046ca7 | 50689f98f2af73064e555dd7ffb38aaf0c252c9a | /futiontable/rtt/rajsthan_nodes.py | 72cd0efdb7d5ac9f672e5f85b90a2648e9101f5f | [] | no_license | arvindmahla/ruralnet | daad10810014867df6de6471857071d2c84c00f6 | 46f06c06afbca178267f67031f5e4f81822d76c0 | refs/heads/master | 2016-09-06T19:05:58.519536 | 2014-09-08T13:47:35 | 2014-09-08T13:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,466 | py | import sqlite3
import sys
import numpy
import csv
import thread
locaname={
'rnm13':'Ukwa, Paraswada, Balaghat, Madhya Pradesh',
'rnm3':'New Delhi3, South Delhi, Delhi, NCT',
'rnm0':'Dindori, Dindori, Dindori, Madhya Pradesh',\
'rnm7':'Samnapur, Dindori, Dindori, Madhya Pradesh',
'rnm11':'Amarpur, Dindori, Dindori, Madhya Pradesh',
'rnm2':'New Delhi2, South Delhi, Delhi, NCT',
'rnm18':'Lamta, Balaghat, Balaghat, Madhya Pradesh',
'rnm14':'Paraswada, Paraswada, Balaghat, Madhya Pradesh',
'rnm':'Hanumanpura, Kuchaman, Nagaur , Rajasthan',
'rnm101':'Jaipur, Jaipur, Jaipur, Rajasthan',
'rnm31':'Sikar, Sikar, Sikar, Rajasthan'}
loc_isp1={
'rnm':['airtel','idea','mtnl'],
'rnm31':['airtel','mtnl','idea'],
'rnm101':['airtel','mtnl','idea']
}
loc_isp={
'rnm31':['airtel','mtnl','idea'],
'rnm101':['airtel','mtnl','idea']
}
log_path='tr/'
def fun(loc,isp,tid):
print "In Thread "+tid
ffile=open('rttavg/'+loc+'_'+isp,'w')
con = sqlite3.connect(":memory:")
con.text_factory = str
cur = con.cursor()
cur.execute("CREATE TABLE t (location TEXT ,TS TEXT,isp TEXT,hopid INTEGER,landmark TEXT,ipaddress TEXT,ASN TEXT,latency REAL);")
fin=open(log_path+loc+'_'+isp+'.csv','r')
lines=fin.readlines()
data=[]
for line in lines:
data.append(tuple(line.strip().split(',')))
cur.executemany("INSERT INTO t (location,TS,isp,hopid,landmark,ipaddress,ASN,latency) VALUES (?,?,?,?,?,?,?,?);", data)
con.commit()
rows=cur.execute("select max(hopid),TS,landmark from t where ASN = \'"+isp+"\' group by TS,landmark order by TS,landmark")
for row in rows:
for lat in cur.execute("select latency from t where TS=\'"+row[1]+"\' and landmark=\'"+row[2]+"\' and hopid="+str(row[0])):
ffile.write(str(lat[0])+'\n')
ffile.close()
print tid+': rttavg/'+loc+'_'+isp+' done'
def fun1(loc,isp,tid):
print "In Thread "+tid
ffile=open('raj/'+loc+'_'+isp,'w')
con = sqlite3.connect(":memory:")
con.text_factory = str
cur = con.cursor()
cur.execute("CREATE TABLE t (location TEXT ,TS TEXT,isp TEXT,hopid INTEGER,landmark TEXT,ipaddress TEXT,ASN TEXT,latency REAL);")
fin=open(log_path+loc+'_'+isp+'.csv','r')
lines=fin.readlines()
data=[]
for line in lines:
data.append(tuple(line.strip().split(',')))
cur.executemany("INSERT INTO t (location,TS,isp,hopid,landmark,ipaddress,ASN,latency) VALUES (?,?,?,?,?,?,?,?);", data)
con.commit()
rows=cur.execute("select max(hopid) ,TS from t group by TS,landmark having landmark=\'106.187.35.87\'")
for row in rows:
for lat in cur.execute("select latency from t where TS="+row[1]+" and hopid="+str(row[0])+" and landmark=\'106.187.35.87\'"):
ffile.write(str(lat[0])+'\n')
ffile.close()
print tid+': raj/'+loc+'_'+isp+' done'
def main():
i=0
for loc in loc_isp:
for isp in loc_isp[loc]:
try:
i+=1
thread.start_new_thread(fun1,(loc,isp,str(i)))
except Exception as e:
# print e
print "Error: unable to start thread"
while 1:
pass
def proc():
for loc in loc_isp1:
for isp in loc_isp1[loc]:
num=[]
fin=open('raj/'+loc+'_'+isp,'r')
lines=fin.readlines()
for line in lines:
num.append(float(line.strip()))
try:
print locaname[loc]+'\t'+isp+'\t'+str(numpy.mean(num))+'\t'+str(numpy.min(num))+'\t'+str(numpy.max(num))+'\t'+str(numpy.std(num))+'\t'+str(numpy.median(num))
except ValueError:
# print 'travg/'+loc+'_'+isp
pass
if __name__ == '__main__':
# main()
proc() | [
"sameer@sameer.(none)"
] | sameer@sameer.(none) |
a648f8566586ffd796253fa7c5d7a3c148be0c9b | 9da67d80be888addfee7b98846c7ec05e8bb1106 | /users/serializers.py | c8b5ff4b78bd23bd040d446f70a092e8254489cd | [] | no_license | thisisfaisalhere/Init-django-react-project | 76c7267e44df09a851a140b6013442b26939071b | 0b0ec455037b6f46f79798f2712e0f36a84b06da | refs/heads/master | 2023-06-05T05:16:44.277656 | 2021-06-28T02:45:47 | 2021-06-28T02:45:47 | 368,810,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | from django.contrib.auth.models import User
from rest_framework_simplejwt.tokens import RefreshToken, TokenError
from rest_framework.exceptions import AuthenticationFailed
from rest_framework import serializers
from django.contrib import auth
from .models import *
# User serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
# RegisterUser Serializer
class RegisterUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField(required=True)
name = serializers.CharField(required=True)
password = serializers.CharField(min_length=8, write_only=True)
class Meta:
model = User
fields = ('email', 'name', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class EmailVerificationSerializer(serializers.ModelSerializer):
token = serializers.CharField(max_length=555)
class Meta:
model = User
fields = ['token']
class LoginSerializer(serializers.ModelSerializer):
email = serializers.EmailField(max_length=255, min_length=3)
password = serializers.CharField(
max_length=68, min_length=6, write_only=True)
tokens = serializers.SerializerMethodField()
name = serializers.CharField(required=False, read_only=True)
msg = serializers.CharField(required=False, read_only=True)
def get_tokens(self, obj):
user = User.objects.get(email=obj['email'])
if obj['msg'] != "Your Teacher Account hasn't been Approved yet!":
return {
'refresh': user.tokens()['refresh'],
'access': user.tokens()['access']
}
class Meta:
model = User
fields = ['tokens', 'name', 'msg', 'password', 'email']
def validate(self, attrs):
email = attrs.get('email', '')
password = attrs.get('password', '')
user = auth.authenticate(email=email, password=password)
if not user:
raise AuthenticationFailed('Invalid credentials, try again')
elif not user.is_active:
raise AuthenticationFailed('Account disabled, contact admin')
elif user:
return {
'email': user.email,
'name': user.name,
'tokens': user.tokens,
'msg': 'Logged In Successfully',
}
return super().validate(attrs)
class LogoutSerializer(serializers.Serializer):
refresh = serializers.CharField()
default_error_message = {
'bad_token': ('Token is expired or invalid')
}
def validate(self, attrs):
self.token = attrs['refresh']
return attrs
def save(self, **kwargs):
try:
RefreshToken(self.token).blacklist()
except TokenError:
self.fail('bad_token')
class ResetPasswordEmailRequestSerializer(serializers.Serializer):
email = serializers.EmailField(min_length=2)
redirect_url = serializers.CharField(max_length=500, required=False)
class Meta:
fields = ['email']
class SetNewPasswordSerializer(serializers.Serializer):
password = serializers.CharField(
min_length=6, max_length=68, write_only=True)
class Meta:
fields = ['password']
class ChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
class Meta:
fields = ['old_password', 'new_password'] | [
"thisisfaisalhere@gmail.com"
] | thisisfaisalhere@gmail.com |
457d4be0fd6afb8849118ce0b85bb64cdc5018be | 7710d3d1a6e7f6361188deff86801aba6856b9ac | /src/onedrivesdk/model/audio.py | 8e76140ae62352276af62698c60125d0d5365612 | [
"MIT"
] | permissive | Anadorr/onedrive-sdk-python | 8d2ef53a908f9be57bb7cdde107093df548c1a1b | 283fc99c63a5439ee023d6f5be4e69f4bb30474c | refs/heads/master | 2021-01-18T09:05:16.880959 | 2016-02-05T15:24:33 | 2016-02-05T15:24:33 | 50,132,468 | 0 | 0 | null | 2016-01-21T19:49:35 | 2016-01-21T19:49:35 | null | UTF-8 | Python | false | false | 7,490 | py | # -*- coding: utf-8 -*-
'''
# Copyright (c) 2015 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..one_drive_object_base import OneDriveObjectBase
class Audio(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def album(self):
"""Gets and sets the album
Returns:
str:
The album
"""
if "album" in self._prop_dict:
return self._prop_dict["album"]
else:
return None
@album.setter
def album(self, val):
self._prop_dict["album"] = val
@property
def album_artist(self):
"""Gets and sets the albumArtist
Returns:
str:
The albumArtist
"""
if "albumArtist" in self._prop_dict:
return self._prop_dict["albumArtist"]
else:
return None
@album_artist.setter
def album_artist(self, val):
self._prop_dict["albumArtist"] = val
@property
def artist(self):
"""Gets and sets the artist
Returns:
str:
The artist
"""
if "artist" in self._prop_dict:
return self._prop_dict["artist"]
else:
return None
@artist.setter
def artist(self, val):
self._prop_dict["artist"] = val
@property
def bitrate(self):
"""Gets and sets the bitrate
Returns:
int:
The bitrate
"""
if "bitrate" in self._prop_dict:
return self._prop_dict["bitrate"]
else:
return None
@bitrate.setter
def bitrate(self, val):
self._prop_dict["bitrate"] = val
@property
def composers(self):
"""Gets and sets the composers
Returns:
str:
The composers
"""
if "composers" in self._prop_dict:
return self._prop_dict["composers"]
else:
return None
@composers.setter
def composers(self, val):
self._prop_dict["composers"] = val
@property
def copyright(self):
"""Gets and sets the copyright
Returns:
str:
The copyright
"""
if "copyright" in self._prop_dict:
return self._prop_dict["copyright"]
else:
return None
@copyright.setter
def copyright(self, val):
self._prop_dict["copyright"] = val
@property
def disc(self):
"""Gets and sets the disc
Returns:
int:
The disc
"""
if "disc" in self._prop_dict:
return self._prop_dict["disc"]
else:
return None
@disc.setter
def disc(self, val):
self._prop_dict["disc"] = val
@property
def disc_count(self):
"""Gets and sets the discCount
Returns:
int:
The discCount
"""
if "discCount" in self._prop_dict:
return self._prop_dict["discCount"]
else:
return None
@disc_count.setter
def disc_count(self, val):
self._prop_dict["discCount"] = val
@property
def duration(self):
"""Gets and sets the duration
Returns:
int:
The duration
"""
if "duration" in self._prop_dict:
return self._prop_dict["duration"]
else:
return None
@duration.setter
def duration(self, val):
self._prop_dict["duration"] = val
@property
def genre(self):
"""Gets and sets the genre
Returns:
str:
The genre
"""
if "genre" in self._prop_dict:
return self._prop_dict["genre"]
else:
return None
@genre.setter
def genre(self, val):
self._prop_dict["genre"] = val
@property
def has_drm(self):
"""Gets and sets the hasDrm
Returns:
bool:
The hasDrm
"""
if "hasDrm" in self._prop_dict:
return self._prop_dict["hasDrm"]
else:
return None
@has_drm.setter
def has_drm(self, val):
self._prop_dict["hasDrm"] = val
@property
def is_variable_bitrate(self):
"""Gets and sets the isVariableBitrate
Returns:
bool:
The isVariableBitrate
"""
if "isVariableBitrate" in self._prop_dict:
return self._prop_dict["isVariableBitrate"]
else:
return None
@is_variable_bitrate.setter
def is_variable_bitrate(self, val):
self._prop_dict["isVariableBitrate"] = val
@property
def title(self):
"""Gets and sets the title
Returns:
str:
The title
"""
if "title" in self._prop_dict:
return self._prop_dict["title"]
else:
return None
@title.setter
def title(self, val):
self._prop_dict["title"] = val
@property
def track(self):
"""Gets and sets the track
Returns:
int:
The track
"""
if "track" in self._prop_dict:
return self._prop_dict["track"]
else:
return None
@track.setter
def track(self, val):
self._prop_dict["track"] = val
@property
def track_count(self):
"""Gets and sets the trackCount
Returns:
int:
The trackCount
"""
if "trackCount" in self._prop_dict:
return self._prop_dict["trackCount"]
else:
return None
@track_count.setter
def track_count(self, val):
self._prop_dict["trackCount"] = val
@property
def year(self):
"""Gets and sets the year
Returns:
int:
The year
"""
if "year" in self._prop_dict:
return self._prop_dict["year"]
else:
return None
@year.setter
def year(self, val):
self._prop_dict["year"] = val
| [
"daniel.nadeau01@gmail.com"
] | daniel.nadeau01@gmail.com |
a1bf73864ef965fced3341d97ef68ae64f794bf1 | 69c0231db6f9ab13714850bbfe3eb9a4f9d1c600 | /venv/bin/pylint | a187188dc8addd97f16d75ccd0305f3977357c49 | [] | no_license | SumanKhdka/django-polls | aa2ff0c0a327f1c34dda0cacc886ca2b507c8223 | bbd6a332b8fd8bbee236a282c74d89313a8d40d9 | refs/heads/master | 2022-01-28T16:53:08.602571 | 2019-08-23T12:45:39 | 2019-08-23T12:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/suman/PycharmProjects/mysite1/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"suman@ncit.edu.np"
] | suman@ncit.edu.np | |
95de76dbf85e358fc7d4c5589e293bd48b8d7d27 | b148cda05d07895b97f5dbc29d06999ffb4d1b33 | /sonic-pcied/tests/test_DaemonPcied.py | 2c3c953e7e483aaec37fc1251ee3d54cd23d1fbc | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | keboliu/sonic-platform-daemons | 0fb6ce76902ec0b6942cd3c1356c7586dacb8d58 | c7cbbb8db5b74d3eddcedd733000d5232006911e | refs/heads/master | 2023-08-31T00:05:25.760285 | 2022-08-09T20:13:59 | 2022-08-09T20:13:59 | 139,558,345 | 0 | 1 | NOASSERTION | 2020-03-26T10:19:47 | 2018-07-03T09:15:09 | Python | UTF-8 | Python | false | false | 8,620 | py | import datetime
import os
import sys
from imp import load_source # Replace with importlib once we no longer need to support Python 2
import pytest
# TODO: Clean this up once we no longer need to support Python 2
if sys.version_info.major == 3:
from unittest import mock
else:
import mock
from .mock_platform import MockPcieUtil
SYSLOG_IDENTIFIER = 'pcied_test'
NOT_AVAILABLE = 'N/A'
tests_path = os.path.dirname(os.path.abspath(__file__))
# Add mocked_libs path so that the file under test can load mocked modules from there
mocked_libs_path = os.path.join(tests_path, "mocked_libs")
sys.path.insert(0, mocked_libs_path)
from sonic_py_common import daemon_base
daemon_base.db_connect = mock.MagicMock()
# Add path to the file under test so that we can load it
modules_path = os.path.dirname(tests_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
load_source('pcied', os.path.join(scripts_path, 'pcied'))
import pcied
pcie_no_aer_stats = \
"""
{'correctable': {}, 'fatal': {}, 'non_fatal': {}}
"""
pcie_aer_stats_no_err = {'correctable': {'field1': '0', 'field2': '0'},
'fatal': {'field3': '0', 'field4': '0'},
'non_fatal': {'field5': '0', 'field6': '0'}}
pcie_aer_stats_err = \
"""
{'correctable': {'field1': '1', 'field2': '0'},
'fatal': {'field3': '0', 'field4': '1'},
'non_fatal': {'field5': '0', 'field6': '1'}}
"""
pcie_device_list = \
"""
[{'bus': '00', 'dev': '01', 'fn': '0', 'id': '1f10', 'name': 'PCI A'},
{'bus': '00', 'dev': '02', 'fn': '0', 'id': '1f11', 'name': 'PCI B'},
{'bus': '00', 'dev': '03', 'fn': '0', 'id': '1f13', 'name': 'PCI C'}]
"""
pcie_check_result_no = []
pcie_check_result_pass = \
"""
[{'bus': '00', 'dev': '01', 'fn': '0', 'id': '1f10', 'name': 'PCI A', 'result': 'Passed'},
{'bus': '00', 'dev': '02', 'fn': '0', 'id': '1f11', 'name': 'PCI B', 'result': 'Passed'},
{'bus': '00', 'dev': '03', 'fn': '0', 'id': '1f12', 'name': 'PCI C', 'result': 'Passed'}]
"""
pcie_check_result_fail = \
"""
[{'bus': '00', 'dev': '01', 'fn': '0', 'id': '1f10', 'name': 'PCI A', 'result': 'Passed'},
{'bus': '00', 'dev': '02', 'fn': '0', 'id': '1f11', 'name': 'PCI B', 'result': 'Passed'},
{'bus': '00', 'dev': '03', 'fn': '0', 'id': '1f12', 'name': 'PCI C', 'result': 'Failed'}]
"""
class TestDaemonPcied(object):
"""
Test cases to cover functionality in DaemonPcied class
"""
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_signal_handler(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.stop_event.set = mock.MagicMock()
daemon_pcied.log_info = mock.MagicMock()
daemon_pcied.log_warning = mock.MagicMock()
# Test SIGHUP
daemon_pcied.signal_handler(pcied.signal.SIGHUP, None)
assert daemon_pcied.log_info.call_count == 1
daemon_pcied.log_info.assert_called_with("Caught signal 'SIGHUP' - ignoring...")
assert daemon_pcied.log_warning.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 0
assert pcied.exit_code == 0
# Reset
daemon_pcied.log_info.reset_mock()
daemon_pcied.log_warning.reset_mock()
daemon_pcied.stop_event.set.reset_mock()
# Test SIGINT
test_signal = pcied.signal.SIGINT
daemon_pcied.signal_handler(test_signal, None)
assert daemon_pcied.log_info.call_count == 1
daemon_pcied.log_info.assert_called_with("Caught signal 'SIGINT' - exiting...")
assert daemon_pcied.log_warning.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 1
assert pcied.exit_code == (128 + test_signal)
# Reset
daemon_pcied.log_info.reset_mock()
daemon_pcied.log_warning.reset_mock()
daemon_pcied.stop_event.set.reset_mock()
# Test SIGTERM
test_signal = pcied.signal.SIGTERM
daemon_pcied.signal_handler(test_signal, None)
assert daemon_pcied.log_info.call_count == 1
daemon_pcied.log_info.assert_called_with("Caught signal 'SIGTERM' - exiting...")
assert daemon_pcied.log_warning.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 1
assert pcied.exit_code == (128 + test_signal)
# Reset
daemon_pcied.log_info.reset_mock()
daemon_pcied.log_warning.reset_mock()
daemon_pcied.stop_event.set.reset_mock()
pcied.exit_code = 0
# Test an unhandled signal
daemon_pcied.signal_handler(pcied.signal.SIGUSR1, None)
assert daemon_pcied.log_warning.call_count == 1
daemon_pcied.log_warning.assert_called_with("Caught unhandled signal 'SIGUSR1' - ignoring...")
assert daemon_pcied.log_info.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 0
assert pcied.exit_code == 0
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_run(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.check_pcie_devices = mock.MagicMock()
daemon_pcied.run()
assert daemon_pcied.check_pcie_devices.call_count == 1
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_check_pcie_devices(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.update_pcie_devices_status_db = mock.MagicMock()
daemon_pcied.check_n_update_pcie_aer_stats = mock.MagicMock()
pcied.platform_pcieutil.get_pcie_check = mock.MagicMock()
daemon_pcied.check_pcie_devices()
assert daemon_pcied.update_pcie_devices_status_db.call_count == 1
assert daemon_pcied.check_n_update_pcie_aer_stats.call_count == 0
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_update_pcie_devices_status_db(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.status_table = mock.MagicMock()
daemon_pcied.log_info = mock.MagicMock()
daemon_pcied.log_error = mock.MagicMock()
# test for pass resultInfo
daemon_pcied.update_pcie_devices_status_db(0)
assert daemon_pcied.status_table.set.call_count == 1
assert daemon_pcied.log_info.call_count == 1
assert daemon_pcied.log_error.call_count == 0
daemon_pcied.status_table.set.reset_mock()
daemon_pcied.log_info.reset_mock()
# test for resultInfo with 1 device failed to detect
daemon_pcied.update_pcie_devices_status_db(1)
assert daemon_pcied.status_table.set.call_count == 1
assert daemon_pcied.log_info.call_count == 0
assert daemon_pcied.log_error.call_count == 1
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
@mock.patch('pcied.read_id_file')
def test_check_n_update_pcie_aer_stats(self, mock_read):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.device_table = mock.MagicMock()
daemon_pcied.update_aer_to_statedb = mock.MagicMock()
pcied.platform_pcieutil.get_pcie_aer_stats = mock.MagicMock()
mock_read.return_value = None
daemon_pcied.check_n_update_pcie_aer_stats(0,1,0)
assert daemon_pcied.update_aer_to_statedb.call_count == 0
assert daemon_pcied.device_table.set.call_count == 0
assert pcied.platform_pcieutil.get_pcie_aer_stats.call_count == 0
mock_read.return_value = '1714'
daemon_pcied.check_n_update_pcie_aer_stats(0,1,0)
assert daemon_pcied.update_aer_to_statedb.call_count == 1
assert daemon_pcied.device_table.set.call_count == 1
assert pcied.platform_pcieutil.get_pcie_aer_stats.call_count == 1
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_update_aer_to_statedb(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.log_debug = mock.MagicMock()
daemon_pcied.device_table = mock.MagicMock()
daemon_pcied.device_name = mock.MagicMock()
daemon_pcied.aer_stats = pcie_aer_stats_no_err
"""
mocked_expected_fvp = pcied.swsscommon.FieldValuePairs(
[("correctable|field1", '0'),
("correctable|field2", '0'),
("fatal|field3", '0'),
("fatal|field4", '0'),
("non_fatal|field5", '0'),
("non_fatal|field6", '0'),
])
"""
daemon_pcied.update_aer_to_statedb()
assert daemon_pcied.log_debug.call_count == 0
assert daemon_pcied.device_table.set.call_count == 1
daemon_pcied.device_table.set.reset_mock()
| [
"noreply@github.com"
] | noreply@github.com |
fa2c6cea2321b48a974d9ded6332e8ee1a3f2ca8 | 4e5193e3f46dad38e62f4f09cd484152b66b85b2 | /colorNames.py | d365cbc81fed3eb7c52ffb6905e76275895c6b87 | [] | no_license | JacobChunn/skribbl-bot | f47acb0ede7c1574764eed32fbdb2aaddf118d5a | 3b362c3f0b698218fcc6c3beb1d4035587efbbcc | refs/heads/master | 2022-07-02T23:52:24.823736 | 2018-10-15T02:11:50 | 2018-10-15T02:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | class ColorNames:
ImageMagickColorMap = {}
ImageMagickColorMap["1"] = "#FFFFFF"
ImageMagickColorMap["2"] = "#C1C1C1"
ImageMagickColorMap["3"] = "#EF130B"
ImageMagickColorMap["4"] = "#FF7100"
ImageMagickColorMap["5"] = "#FFE400"
ImageMagickColorMap["6"] = "#00CC00"
ImageMagickColorMap["7"] = "#00B2FF"
ImageMagickColorMap["8"] = "#231FD3"
ImageMagickColorMap["9"] = "#A300BA"
ImageMagickColorMap["10"] = "#D37CAA"
ImageMagickColorMap["11"] = "#A0522D"
ImageMagickColorMap["12"] = "#000000"
ImageMagickColorMap["13"] = "#4C4C4C"
ImageMagickColorMap["14"] = "#740B07"
ImageMagickColorMap["15"] = "#C23800"
ImageMagickColorMap["16"] = "#E8A200"
ImageMagickColorMap["17"] = "#006400"
ImageMagickColorMap["18"] = "#00569E"
ImageMagickColorMap["19"] = "#0E0865"
ImageMagickColorMap["20"] = "#550069"
ImageMagickColorMap["21"] = "#A75574"
ImageMagickColorMap["22"] = "#63300D"
@staticmethod
def rgbFromStr(s):
# s starts with a #.
r, g, b = int(s[1:3],16), int(s[3:5], 16),int(s[5:7], 16)
return r, g, b
@staticmethod
def findNearestImageMagickColorName(RGB_tuple):
return ColorNames.findNearestColorName(RGB_tuple, ColorNames.ImageMagickColorMap)
@staticmethod
def findNearestColorName(RGB_tuple, Map):
R = RGB_tuple[0]
G = RGB_tuple[1]
B = RGB_tuple[2]
mindiff = None
for d in Map:
r, g, b = ColorNames.rgbFromStr(Map[d])
diff = abs(R -r)*256 + abs(G-g)* 256 + abs(B- b)* 256
if mindiff is None or diff < mindiff:
mindiff = diff
mincolorname = d
return mincolorname | [
"chunnjake@gmail.com"
] | chunnjake@gmail.com |
67a8229c60a57a6a11059aa679175a1f4283f7c7 | 8294d8e51cf5eac7879a3461264c74e057f67ab6 | /tilemaps.py | 8e0287f1e0a8a36c9eedf407a78afe557dc85fbc | [] | no_license | ZackyGameDev/Tetris-in-Python | 708081e7dbfa387f26806d61fca39a781a4047ca | dd52412b5d808e8e8842aec5cca6f7eecf8849e2 | refs/heads/master | 2023-01-10T19:43:52.731357 | 2020-10-27T09:03:27 | 2020-10-27T09:03:27 | 305,693,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | import pygame
tilemap = []
for i in range(20):
tilemap.append([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Tetriminos Shapes
z_shape = [
[[0, 1, 0, 0],
[1, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
[[1, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
]
s_shape = [
[[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]],
[[0, 1, 1, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
]
i_shape = [
[[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]],
[[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]]
]
l_shape = [
[[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[1, 1, 1, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
[[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
]
j_shape = [
[[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]],
[[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 1, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
]
o_shape = [
[[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
]
t_shape = [
[[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]],
[[0, 1, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
]
tetrimino_shapes = {
'S': s_shape,
'Z': z_shape,
'I': i_shape,
'L': l_shape,
'J': j_shape,
'O': o_shape,
'T': t_shape
}
if __name__ == '__main__':
# So that even if i run this file, the tetris game will launch
__import__('tetris')
| [
"zaacky2456@gmail.com"
] | zaacky2456@gmail.com |
c8e99972a246a077b466f45e66c23b688c79d040 | ea373d1b4296d16eaa1355972cccd28eaa336871 | /login-signup-Django/signup/views.py | 1ea7905bc7574d9d41102a129e6dab3e08283977 | [] | no_license | nazaninsbr/Web-Development | f1a03e3d26d79dda8a6f9978d443a62cc5b88b42 | 7821ec2596d1dff7c4f390e01ae7d90e3fdbf029 | refs/heads/master | 2021-05-02T16:05:09.508344 | 2018-04-27T18:20:01 | 2018-04-27T18:20:01 | 120,666,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from django.contrib.auth import login, authenticate
# from django.http import HttpResponse, JsonResponse
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from signup.serializers import SignupSerializer
import json
from rest_framework.parsers import JSONParser
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from django.contrib.auth.hashers import make_password
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.urls import reverse
# @login_required
# def home(request):
# return render(request, 'signup/home.html')
import logging
logger = logging.getLogger(__name__)
@csrf_exempt
def signup(request):
if request.method == 'GET':
response_data = {}
response_data['result'] = 'error'
response_data['message'] = 'You need to post something'
return HttpResponse(json.dumps(response_data), content_type="application/json")
if request.method == 'POST':
signupdata = JSONParser().parse(request)
serializer = SignupSerializer(data = signupdata)
if serializer.is_valid():
# serializer.save()
jsonfile = serializer.data
username = jsonfile["username"]
password = jsonfile["password"]
logger.info(username)
logger.info(password)
password = make_password(password, '1')
user = User(username=username, password=password)
user.save()
new_token = Token.objects.create(user=user)
new_token.save()
request.session["SoCkey"]=new_token.key
request.session.set_expiry(30000000)
login(request, user)
return JsonResponse({"key":new_token.key})
else:
return JsonResponse(serializer.errors)
# username = signupdata.cleaned_data.get('username')
# raw_password = signupdata.cleaned_data.get('password1')
# user = authenticate(username=username, password=raw_password)
# form = Signup(request.POST)
# if form.is_valid():
# form.save()
# username = form.cleaned_data.get('username')
# raw_password = form.cleaned_data.get('password1')
# user = authenticate(username=username, password=raw_password)
# login(request, user)
# return redirect('home')
# else:
# form = SignUpForm()
# return render(request, 'signup/signup.html', {'form': form})
| [
"nazanin.sabrii@gmail.com"
] | nazanin.sabrii@gmail.com |
edd4fc48a71d97c39564e5942970515261ac1b48 | ec8136f231f4e4e6fa369f4246f5c6a678743cc9 | /moviesite/polls/urls.py | 2d2e0c8fa9de2cbc7dfa2f16637d8f6cb3148819 | [] | no_license | alankar63/movie-app | 42bdb79e66c93c334638a4947cc4d1b376ab078d | ce01432c97d140693faebcd753ec598d0de93f32 | refs/heads/master | 2021-01-20T20:21:42.560188 | 2016-06-09T11:10:03 | 2016-06-09T11:10:03 | 60,701,474 | 0 | 0 | null | 2016-06-09T11:10:22 | 2016-06-08T13:50:28 | null | UTF-8 | Python | false | false | 214 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^([0-9a-zA-Z ~@#$^*()_+=[\]{}|\\,.?:-]+)/$',
views.info, name="info"),
url(r'^$', views.wrong_link, name='wronglink'),
]
| [
"alankar63@gmail.com"
] | alankar63@gmail.com |
3fcffdc830700a1a127d0369a3f7fa692b40d6c8 | 06e91ab41fbcc0286dcfcc5cc6ddb258dbcf01b0 | /NIM Refactored.py | 1ae5bc8d23d71302b423e1634b410d7f80fdf652 | [] | no_license | tatianarudskaya/ZJP | 154dc9c669f4089364950aab3d9b11bb83368cda | 014e96e65e78a6114b228aef3b2b33418ea0d8fc | refs/heads/master | 2020-04-16T14:38:44.562827 | 2019-01-14T14:26:57 | 2019-01-14T14:26:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,377 | py | import random
#check for win condition
def winCondition (summ, lastAction, goal, endGame):
if summ == goal:
print (lastAction, "wins!")
endGame = True
elif summ > goal:
print (lastAction, "looses")
endGame = True
return endGame
def playerInputRequest (pick, step):
try:
print ("Pick number from 1 to", step)
pick = int(input())
except ValueError:
print ("Hey! Pick the number please!")
return pick
#Players action
def inputFromPlayer (step, summ, goal, pick, endGame):
print ("\n___________________\nPlayers turn:")
lastAction = "Player"
while pick not in range(1, step+1):
pick = playerInputRequest (pick, step)
summ = printResult (lastAction, pick, summ)
endGame = winCondition (summ, lastAction, goal, endGame)
return (summ, endGame)
#print result of action
def printResult (lastAction, pick, summ):
print (lastAction, "picked number", pick)
summ = summ + pick
print ("The sum of the numbers is", summ)
return summ
#computer picks
def computerPick (step, summ, goal, lastAction):
if (goal - 1 - summ) % step == 0:
pick = random.randrange(1, step)
summ = printResult (lastAction, pick, summ)
else:
pick = (goal - 1 - summ) % step
summ = printResult (lastAction, pick, summ)
return (step, summ)
#Computer action
def computerAction (step, summ, goal, firstTurn, endGame):
print ("\n___________________\nComputers turn:")
lastAction = "Computer"
if firstTurn == 0:
firstTurn = 2
step, summ = computerPick (step, summ, goal, lastAction)
else:
if goal - summ <= step:
pick = goal - summ
endGame = True
summ = printResult (lastAction, pick, summ)
else:
step, summ = computerPick (step, summ, goal, lastAction)
endGame = winCondition (summ, lastAction, goal, endGame)
return (summ, endGame, firstTurn)
def pickGoal (step, goal):
while goal / step < 2:
try:
goal = int(input("Enter a threshold number (at least two times more than a step)\n"))
except ValueError:
print ("Hey! Pick the number please!")
return goal
def pickStep (step):
print ("_________NIM_________")
while step < 2:
try:
step = int(input("Enter the maximum step (at least 2)\n"))
except ValueError:
print ("Hey! Pick the number please!")
return step
def pickFirstOrSecondTurn (firstTurn):
while (1 < firstTurn or firstTurn < 0):
try:
firstTurn = int(input("Do you want to make the first move?\n0 - if not, 1 - if yes.\n"))
except ValueError:
print ("Choose 0 or 1, please!")
return firstTurn
#variable declaration
resultSumm, step, goal, pick, firstTurn, endGame = 0, 0, 0, 0, 2, False
#Start of the game: player picks step and goal:
step = pickStep (step)
goal = pickGoal (step, goal)
#Player picks first or second turn:
firstTurn = pickFirstOrSecondTurn (firstTurn)
#New Gameplay:
while endGame == False:
if firstTurn == 1:
resultSumm, endGame = inputFromPlayer (step, resultSumm, goal, pick, endGame)
if endGame == True:
break
else:
resultSumm, endGame, firstTurn = computerAction (step, resultSumm, goal, firstTurn, endGame)
else:
resultSumm, endGame, firstTurn = computerAction (step, resultSumm, goal, firstTurn, endGame)
if endGame == True:
break
else:
resultSumm, endGame = inputFromPlayer (step, resultSumm, goal, pick, endGame)
| [
"noreply@github.com"
] | noreply@github.com |
e1ef965f355c464b6b2b9a4fae2a4f47aab4b841 | d097ee329e5759f7e901c7216d9f057dd0892ead | /sdk/python/tests/test_historical_retrieval.py | 3a708c7503a879981b68921b340cae6aa2e7a548 | [
"Apache-2.0"
] | permissive | DolevAdas/feast | 81ac088eb1242f72593fd4180882818262c6eea0 | 2e0113e1561696edbbdd031767b428eda3aa98a8 | refs/heads/master | 2023-06-28T21:53:52.598087 | 2021-07-08T04:18:56 | 2021-07-08T04:18:56 | 373,108,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,563 | py | import os
import random
import string
import time
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
import assertpy
import numpy as np
import pandas as pd
import pytest
from google.cloud import bigquery
from pandas.testing import assert_frame_equal
from pytz import utc
import feast.driver_test_data as driver_data
from feast import RepoConfig, errors, utils
from feast.data_source import BigQuerySource, FileSource
from feast.entity import Entity
from feast.errors import FeatureNameCollisionError
from feast.feature import Feature
from feast.feature_store import FeatureStore, _validate_feature_refs
from feast.feature_view import FeatureView
from feast.infra.offline_stores.bigquery import BigQueryOfflineStoreConfig
from feast.infra.online_stores.sqlite import SqliteOnlineStoreConfig
from feast.infra.provider import DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL
from feast.value_type import ValueType
np.random.seed(0)
PROJECT_NAME = "default"
def generate_entities(date, infer_event_timestamp_col, order_count: int = 1000):
end_date = date
before_start_date = end_date - timedelta(days=365)
start_date = end_date - timedelta(days=7)
after_end_date = end_date + timedelta(days=365)
customer_entities = list(range(1001, 1110))
driver_entities = list(range(5001, 5110))
orders_df = driver_data.create_orders_df(
customers=customer_entities,
drivers=driver_entities,
start_date=before_start_date,
end_date=after_end_date,
order_count=order_count,
infer_event_timestamp_col=infer_event_timestamp_col,
)
return customer_entities, driver_entities, end_date, orders_df, start_date
def stage_driver_hourly_stats_parquet_source(directory, df):
# Write to disk
driver_stats_path = os.path.join(directory, "driver_stats.parquet")
df.to_parquet(path=driver_stats_path, allow_truncated_timestamps=True)
return FileSource(
path=driver_stats_path,
event_timestamp_column="datetime",
created_timestamp_column="",
)
def stage_driver_hourly_stats_bigquery_source(df, table_id):
client = bigquery.Client()
job_config = bigquery.LoadJobConfig()
df.reset_index(drop=True, inplace=True)
job = client.load_table_from_dataframe(df, table_id, job_config=job_config)
job.result()
def create_driver_hourly_stats_feature_view(source):
driver_stats_feature_view = FeatureView(
name="driver_stats",
entities=["driver"],
features=[
Feature(name="conv_rate", dtype=ValueType.FLOAT),
Feature(name="acc_rate", dtype=ValueType.FLOAT),
Feature(name="avg_daily_trips", dtype=ValueType.INT32),
],
input=source,
ttl=timedelta(hours=2),
)
return driver_stats_feature_view
def stage_customer_daily_profile_parquet_source(directory, df):
customer_profile_path = os.path.join(directory, "customer_profile.parquet")
df.to_parquet(path=customer_profile_path, allow_truncated_timestamps=True)
return FileSource(
path=customer_profile_path,
event_timestamp_column="datetime",
created_timestamp_column="created",
)
def stage_customer_daily_profile_bigquery_source(df, table_id):
client = bigquery.Client()
job_config = bigquery.LoadJobConfig()
df.reset_index(drop=True, inplace=True)
job = client.load_table_from_dataframe(df, table_id, job_config=job_config)
job.result()
def create_customer_daily_profile_feature_view(source):
customer_profile_feature_view = FeatureView(
name="customer_profile",
entities=["customer_id"],
features=[
Feature(name="current_balance", dtype=ValueType.FLOAT),
Feature(name="avg_passenger_count", dtype=ValueType.FLOAT),
Feature(name="lifetime_trip_count", dtype=ValueType.INT32),
Feature(name="avg_daily_trips", dtype=ValueType.INT32),
],
input=source,
ttl=timedelta(days=2),
)
return customer_profile_feature_view
# Converts the given column of the pandas records to UTC timestamps
def convert_timestamp_records_to_utc(records, column):
for record in records:
record[column] = utils.make_tzaware(record[column]).astimezone(utc)
return records
# Find the latest record in the given time range and filter
def find_asof_record(records, ts_key, ts_start, ts_end, filter_key, filter_value):
found_record = {}
for record in records:
if record[filter_key] == filter_value and ts_start <= record[ts_key] <= ts_end:
if not found_record or found_record[ts_key] < record[ts_key]:
found_record = record
return found_record
def get_expected_training_df(
customer_df: pd.DataFrame,
customer_fv: FeatureView,
driver_df: pd.DataFrame,
driver_fv: FeatureView,
orders_df: pd.DataFrame,
event_timestamp: str,
full_feature_names: bool = False,
):
# Convert all pandas dataframes into records with UTC timestamps
order_records = convert_timestamp_records_to_utc(
orders_df.to_dict("records"), event_timestamp
)
driver_records = convert_timestamp_records_to_utc(
driver_df.to_dict("records"), driver_fv.input.event_timestamp_column
)
customer_records = convert_timestamp_records_to_utc(
customer_df.to_dict("records"), customer_fv.input.event_timestamp_column
)
# Manually do point-in-time join of orders to drivers and customers records
for order_record in order_records:
driver_record = find_asof_record(
driver_records,
ts_key=driver_fv.input.event_timestamp_column,
ts_start=order_record[event_timestamp] - driver_fv.ttl,
ts_end=order_record[event_timestamp],
filter_key="driver_id",
filter_value=order_record["driver_id"],
)
customer_record = find_asof_record(
customer_records,
ts_key=customer_fv.input.event_timestamp_column,
ts_start=order_record[event_timestamp] - customer_fv.ttl,
ts_end=order_record[event_timestamp],
filter_key="customer_id",
filter_value=order_record["customer_id"],
)
order_record.update(
{
(f"driver_stats__{k}" if full_feature_names else k): driver_record.get(
k, None
)
for k in ("conv_rate", "avg_daily_trips")
}
)
order_record.update(
{
(
f"customer_profile__{k}" if full_feature_names else k
): customer_record.get(k, None)
for k in (
"current_balance",
"avg_passenger_count",
"lifetime_trip_count",
)
}
)
# Convert records back to pandas dataframe
expected_df = pd.DataFrame(order_records)
# Move "datetime" column to front
current_cols = expected_df.columns.tolist()
current_cols.remove(event_timestamp)
expected_df = expected_df[[event_timestamp] + current_cols]
# Cast some columns to expected types, since we lose information when converting pandas DFs into Python objects.
if full_feature_names:
expected_column_types = {
"order_is_success": "int32",
"driver_stats__conv_rate": "float32",
"customer_profile__current_balance": "float32",
"customer_profile__avg_passenger_count": "float32",
}
else:
expected_column_types = {
"order_is_success": "int32",
"conv_rate": "float32",
"current_balance": "float32",
"avg_passenger_count": "float32",
}
for col, typ in expected_column_types.items():
expected_df[col] = expected_df[col].astype(typ)
return expected_df
def stage_orders_bigquery(df, table_id):
client = bigquery.Client()
job_config = bigquery.LoadJobConfig()
df.reset_index(drop=True, inplace=True)
job = client.load_table_from_dataframe(df, table_id, job_config=job_config)
job.result()
class BigQueryDataSet:
def __init__(self, dataset_name):
self.name = dataset_name
def __enter__(self):
client = bigquery.Client()
dataset = bigquery.Dataset(f"{client.project}.{self.name}")
dataset.location = "US"
dataset = client.create_dataset(dataset, exists_ok=True)
return dataset
def __exit__(self, exc_type, exc_value, exc_traceback):
print("Tearing down BigQuery dataset")
client = bigquery.Client()
dataset_id = f"{client.project}.{self.name}"
client.delete_dataset(dataset_id, delete_contents=True, not_found_ok=True)
print(f"Deleted dataset '{dataset_id}'")
if exc_type:
print(
"***Logging exception {}***".format(
(exc_type, exc_value, exc_traceback)
)
)
@pytest.mark.parametrize(
"infer_event_timestamp_col", [False, True],
)
@pytest.mark.parametrize(
"full_feature_names", [False, True],
)
def test_historical_features_from_parquet_sources(
infer_event_timestamp_col, full_feature_names
):
start_date = datetime.now().replace(microsecond=0, second=0, minute=0)
(
customer_entities,
driver_entities,
end_date,
orders_df,
start_date,
) = generate_entities(start_date, infer_event_timestamp_col)
with TemporaryDirectory() as temp_dir:
driver_df = driver_data.create_driver_hourly_stats_df(
driver_entities, start_date, end_date
)
driver_source = stage_driver_hourly_stats_parquet_source(temp_dir, driver_df)
driver_fv = create_driver_hourly_stats_feature_view(driver_source)
customer_df = driver_data.create_customer_daily_profile_df(
customer_entities, start_date, end_date
)
customer_source = stage_customer_daily_profile_parquet_source(
temp_dir, customer_df
)
customer_fv = create_customer_daily_profile_feature_view(customer_source)
driver = Entity(name="driver", join_key="driver_id", value_type=ValueType.INT64)
customer = Entity(name="customer_id", value_type=ValueType.INT64)
store = FeatureStore(
config=RepoConfig(
registry=os.path.join(temp_dir, "registry.db"),
project="default",
provider="local",
online_store=SqliteOnlineStoreConfig(
path=os.path.join(temp_dir, "online_store.db")
),
)
)
store.apply([driver, customer, driver_fv, customer_fv])
job = store.get_historical_features(
entity_df=orders_df,
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
],
full_feature_names=full_feature_names,
)
actual_df = job.to_df()
event_timestamp = (
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL
if DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL in orders_df.columns
else "e_ts"
)
expected_df = get_expected_training_df(
customer_df,
customer_fv,
driver_df,
driver_fv,
orders_df,
event_timestamp,
full_feature_names=full_feature_names,
)
assert_frame_equal(
expected_df.sort_values(
by=[event_timestamp, "order_id", "driver_id", "customer_id"]
).reset_index(drop=True),
actual_df.sort_values(
by=[event_timestamp, "order_id", "driver_id", "customer_id"]
).reset_index(drop=True),
)
@pytest.mark.integration
@pytest.mark.parametrize(
"provider_type", ["local", "gcp", "gcp_custom_offline_config"],
)
@pytest.mark.parametrize(
"infer_event_timestamp_col", [False, True],
)
@pytest.mark.parametrize(
"full_feature_names", [False, True],
)
def test_historical_features_from_bigquery_sources(
provider_type, infer_event_timestamp_col, capsys, full_feature_names
):
start_date = datetime.now().replace(microsecond=0, second=0, minute=0)
(
customer_entities,
driver_entities,
end_date,
orders_df,
start_date,
) = generate_entities(start_date, infer_event_timestamp_col)
bigquery_dataset = (
f"test_hist_retrieval_{int(time.time_ns())}_{random.randint(1000, 9999)}"
)
with BigQueryDataSet(bigquery_dataset), TemporaryDirectory() as temp_dir:
gcp_project = bigquery.Client().project
# Orders Query
table_id = f"{bigquery_dataset}.orders"
stage_orders_bigquery(orders_df, table_id)
entity_df_query = f"SELECT * FROM {gcp_project}.{table_id}"
# Driver Feature View
driver_df = driver_data.create_driver_hourly_stats_df(
driver_entities, start_date, end_date
)
driver_table_id = f"{gcp_project}.{bigquery_dataset}.driver_hourly"
stage_driver_hourly_stats_bigquery_source(driver_df, driver_table_id)
driver_source = BigQuerySource(
table_ref=driver_table_id,
event_timestamp_column="datetime",
created_timestamp_column="created",
)
driver_fv = create_driver_hourly_stats_feature_view(driver_source)
# Customer Feature View
customer_df = driver_data.create_customer_daily_profile_df(
customer_entities, start_date, end_date
)
customer_table_id = f"{gcp_project}.{bigquery_dataset}.customer_profile"
stage_customer_daily_profile_bigquery_source(customer_df, customer_table_id)
customer_source = BigQuerySource(
table_ref=customer_table_id,
event_timestamp_column="datetime",
created_timestamp_column="",
)
customer_fv = create_customer_daily_profile_feature_view(customer_source)
driver = Entity(name="driver", join_key="driver_id", value_type=ValueType.INT64)
customer = Entity(name="customer_id", value_type=ValueType.INT64)
if provider_type == "local":
store = FeatureStore(
config=RepoConfig(
registry=os.path.join(temp_dir, "registry.db"),
project="default",
provider="local",
online_store=SqliteOnlineStoreConfig(
path=os.path.join(temp_dir, "online_store.db"),
),
offline_store=BigQueryOfflineStoreConfig(
type="bigquery", dataset=bigquery_dataset
),
)
)
elif provider_type == "gcp":
store = FeatureStore(
config=RepoConfig(
registry=os.path.join(temp_dir, "registry.db"),
project="".join(
random.choices(string.ascii_uppercase + string.digits, k=10)
),
provider="gcp",
offline_store=BigQueryOfflineStoreConfig(
type="bigquery", dataset=bigquery_dataset
),
)
)
elif provider_type == "gcp_custom_offline_config":
store = FeatureStore(
config=RepoConfig(
registry=os.path.join(temp_dir, "registry.db"),
project="".join(
random.choices(string.ascii_uppercase + string.digits, k=10)
),
provider="gcp",
offline_store=BigQueryOfflineStoreConfig(
type="bigquery", dataset="foo"
),
)
)
else:
raise Exception("Invalid provider used as part of test configuration")
store.apply([driver, customer, driver_fv, customer_fv])
event_timestamp = (
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL
if DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL in orders_df.columns
else "e_ts"
)
expected_df = get_expected_training_df(
customer_df,
customer_fv,
driver_df,
driver_fv,
orders_df,
event_timestamp,
full_feature_names,
)
job_from_sql = store.get_historical_features(
entity_df=entity_df_query,
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
],
full_feature_names=full_feature_names,
)
start_time = datetime.utcnow()
actual_df_from_sql_entities = job_from_sql.to_df()
end_time = datetime.utcnow()
with capsys.disabled():
print(
str(
f"\nTime to execute job_from_sql.to_df() = '{(end_time - start_time)}'"
)
)
assert sorted(expected_df.columns) == sorted(
actual_df_from_sql_entities.columns
)
assert_frame_equal(
expected_df.sort_values(
by=[event_timestamp, "order_id", "driver_id", "customer_id"]
).reset_index(drop=True),
actual_df_from_sql_entities[expected_df.columns]
.sort_values(by=[event_timestamp, "order_id", "driver_id", "customer_id"])
.reset_index(drop=True),
check_dtype=False,
)
table_from_sql_entities = job_from_sql.to_arrow()
assert_frame_equal(
actual_df_from_sql_entities, table_from_sql_entities.to_pandas()
)
timestamp_column = (
"e_ts"
if infer_event_timestamp_col
else DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL
)
entity_df_query_with_invalid_join_key = (
f"select order_id, driver_id, customer_id as customer, "
f"order_is_success, {timestamp_column}, FROM {gcp_project}.{table_id}"
)
# Rename the join key; this should now raise an error.
assertpy.assert_that(store.get_historical_features).raises(
errors.FeastEntityDFMissingColumnsError
).when_called_with(
entity_df=entity_df_query_with_invalid_join_key,
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
],
)
job_from_df = store.get_historical_features(
entity_df=orders_df,
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
],
full_feature_names=full_feature_names,
)
# Rename the join key; this should now raise an error.
orders_df_with_invalid_join_key = orders_df.rename(
{"customer_id": "customer"}, axis="columns"
)
assertpy.assert_that(store.get_historical_features).raises(
errors.FeastEntityDFMissingColumnsError
).when_called_with(
entity_df=orders_df_with_invalid_join_key,
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
],
)
# Make sure that custom dataset name is being used from the offline_store config
if provider_type == "gcp_custom_offline_config":
assertpy.assert_that(job_from_df.query).contains("foo.entity_df")
else:
assertpy.assert_that(job_from_df.query).contains(
f"{bigquery_dataset}.entity_df"
)
start_time = datetime.utcnow()
actual_df_from_df_entities = job_from_df.to_df()
end_time = datetime.utcnow()
with capsys.disabled():
print(
str(
f"Time to execute job_from_df.to_df() = '{(end_time - start_time)}'\n"
)
)
assert sorted(expected_df.columns) == sorted(actual_df_from_df_entities.columns)
assert_frame_equal(
expected_df.sort_values(
by=[event_timestamp, "order_id", "driver_id", "customer_id"]
).reset_index(drop=True),
actual_df_from_df_entities[expected_df.columns]
.sort_values(by=[event_timestamp, "order_id", "driver_id", "customer_id"])
.reset_index(drop=True),
check_dtype=False,
)
table_from_df_entities = job_from_df.to_arrow()
assert_frame_equal(
actual_df_from_df_entities, table_from_df_entities.to_pandas()
)
def test_feature_name_collision_on_historical_retrieval():
# _validate_feature_refs is the function that checks for colliding feature names
# check when feature names collide and 'full_feature_names=False'
with pytest.raises(FeatureNameCollisionError) as error:
_validate_feature_refs(
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
"customer_profile:avg_daily_trips",
],
full_feature_names=False,
)
expected_error_message = (
"Duplicate features named avg_daily_trips found.\n"
"To resolve this collision, either use the full feature name by setting "
"'full_feature_names=True', or ensure that the features in question have different names."
)
assert str(error.value) == expected_error_message
# check when feature names collide and 'full_feature_names=True'
with pytest.raises(FeatureNameCollisionError) as error:
_validate_feature_refs(
feature_refs=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
"customer_profile:avg_daily_trips",
],
full_feature_names=True,
)
expected_error_message = (
"Duplicate features named driver_stats__avg_daily_trips found.\n"
"To resolve this collision, please ensure that the features in question "
"have different names."
)
assert str(error.value) == expected_error_message
| [
"noreply@github.com"
] | noreply@github.com |
1feb1c0856d185b636ee2d1e91a96827ac7c9853 | 06b8025b10f318d4e4bb724d371a1bc25d9abfdd | /geneyenta/settings.py | 875541dc648bcf40b02a70f9262db8b867eaf246 | [] | no_license | wassermanlab/GeneYenta | bf6d79b9c7628ef403eb1f6a6d0cd7da7f594495 | e4bf1946dd012aedd38f42451caee317dc3d8977 | refs/heads/master | 2021-01-01T17:22:43.364766 | 2015-02-05T22:32:45 | 2015-02-05T22:32:45 | 11,798,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,368 | py | # Django settings for geneyenta project.
######### For static stylesheet references #######
# import os
# ROOT_PATH = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MATCH_BINARY = '/apps/GeneYenta/match.py'
######### For chron jobs i.e. matching ###
import djcelery
djcelery.setup_loader()
BROKER_URL = 'django://'
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'GeneYenta', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'gyadmin',
'PASSWORD': 'gnytdmpw',
#'USER': 'root',
#'PASSWORD': 'root',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Vancouver'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-ca'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = "/apps/GeneYenta/media"
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = "/media/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = "/apps/GeneYenta/static"
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# os.path.join(ROOT_PATH, "static"),
# '/Users/etadministrator/Sites/django_sites/geneyenta/geneyenta/static/'
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'lk*l+byjtj4npo8w3#o_(m*jdf1sb3zxhobxkjwrziuc)utr+&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geneyenta.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geneyenta.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'registration',
'django_extensions',
'cases',
'djcelery',
# temp. app for testing celery
#'celerytest',
'kombu.transport.django',
'matches',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Email settings
# Ordering of these settings matters
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
EMAIL_HOST_USER = 'root@geneyenta.cmmt.ubc.ca'
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
ALLOWED_INCLUDE_ROOTS = (
'/apps/GeneYenta/static/cases/dynatree/dev/',
'/apps/GeneYenta/matches/templates/matches/',
)
| [
"jack.wuweimin@gmail.com"
] | jack.wuweimin@gmail.com |
87cb6e6e0b682d52f6eaaa096b0a13f7c53bb789 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/udw.py | 95de41ebfb832ef789c650110a4ac9b22d6e3fbf | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'uDW':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
38739ea4cae572570555cd1043b6acf10436f45e | 3eb4d64a8bb0bc240a2ef189724f4d51b5275eac | /heltour/tournament/migrations/0099_alternate_priority_date_override.py | 863952e8d18f8da8a170a0aae4967d562e598879 | [
"MIT"
] | permissive | brucemubayiwa/heltour | c01cc88be7f86dce8246f619d7aa2da37e0e0ac2 | fa4e9b06343acaf6a8a99337860e1ad433e68f6b | refs/heads/master | 2021-01-23T19:59:04.099215 | 2017-09-06T03:34:31 | 2017-09-06T03:34:31 | 102,840,526 | 1 | 0 | null | 2017-09-08T08:53:30 | 2017-09-08T08:53:30 | null | UTF-8 | Python | false | false | 482 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 01:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0098_auto_20160916_1934'),
]
operations = [
migrations.AddField(
model_name='alternate',
name='priority_date_override',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"ben.cyanfish@gmail.com"
] | ben.cyanfish@gmail.com |
5a303ec868f550ec82de6b9a8080ee1ba8c9ac5d | d98fa32291d8a82f116b3785a732653d9e3bb5ff | /plot_exp1a.py | ac9b270fe882e473b6e9181725e9c27243a5964c | [] | no_license | karopastal/interferometry | 16b03a1bd586c9192707a6dd45c7683f2ff615be | 75943ca5a43f9e1fe6af438af3b26c01a5239b46 | refs/heads/master | 2020-05-27T12:13:49.133167 | 2019-05-25T21:28:45 | 2019-05-25T21:28:45 | 188,613,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | from lmfit import Model
import matplotlib.pyplot as plt
import numpy as np
from numpy import loadtxt
font = {'family' : 'normal', 'size' : 14}
plt.rc('font', **font)
data = loadtxt('analysis/exp1a.dat')
mili = np.power(10, -3.0)
red_lambda = 650*np.power(10, -9.0)
m = data[:, 0]
delta_m = data[:, 1]
x = data[:, 2]*mili
delta_x = data[:, 3]*mili
x_weights = 1/delta_x
def linear(x, a, b):
return (a*x + b)
linear_model = Model(linear)
result = linear_model.fit(x, weights=x_weights , x=m, a=1, b=1)
print(result.fit_report())
print(result.chisqr)
a = result.params['a'].value
a_err = result.params['a'].stderr
b = result.params['b'].value
k = (2*a)/red_lambda
delta_k = 2*a_err/red_lambda
print(k, delta_k)
fig, ax = plt.subplots()
ax.set_xlabel(r'delta m', fontsize=18)
ax.set_ylabel(r'delta x [meter]', fontsize=18)
plt.title('delta x vs delta m', fontsize=20)
plt.plot(m, x, '.C3', label='data points')
ax.errorbar(m, x, yerr=delta_x, xerr=delta_m, fmt='.k', capthick=2, label='uncertainties')
plt.plot(m, linear(m, a, b), 'C0--', label='linear fit: y=a*x+b')
plt.legend()
plt.show() | [
"tpaskaro@gmail.com"
] | tpaskaro@gmail.com |
48f477ee576243c587ae8f4313ca9343d1ffbac3 | 2611e42451513dd655db40677c37146481c060e1 | /tests/test_browser_worker.py | 897c94ede18cfd17472898db079d3f341912bb6f | [] | no_license | TrendingTechnology/crawler-cluster | cc06a4fc5e84f59fe29f4f8e4c96958a8e0795b3 | 7b27ce8e04f99005ae78893063fef2e0e142c4b0 | refs/heads/master | 2023-04-14T00:30:58.754117 | 2021-04-20T02:29:49 | 2021-04-20T02:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,191 | py | from crawler_cluster.browser_worker import BrowserWorker
from lxml.html import HtmlElement
import pytest
import pytest_asyncio
from pathlib import Path
import asyncio
import json
static_dir = Path(__file__).parent.absolute().joinpath('static')
antibot_url = f"file:///{static_dir.joinpath('Antibot.html')}"
interactions_url = f"file:///{static_dir.joinpath('interactions.html')}"
blocked_url = f"file:///{static_dir.joinpath('blocked.html')}"
pytestmark = pytest.mark.asyncio
@pytest.fixture
async def worker():
worker = BrowserWorker(disable_images=True, garbage_collect_at=500, launch_options={
'ignoreDefaultArgs': '--disable-extensions'})
#worker = BrowserWorker()
ok = await worker.launch()
assert ok
yield worker
await worker.shutdown()
async def test_fingerprint(worker):
# await worker.try_get('https://bot.sannysoft.com/')
await worker.try_get(antibot_url)
await worker.page.screenshot(path='browser_fingerprint.png', fullPage=True)
async def test_page_content(worker):
resp = await worker.try_get(antibot_url)
assert resp.status == 0
assert isinstance(worker.html, str)
assert isinstance(worker.text, str)
assert isinstance(worker.root, HtmlElement)
async def test_cookies(worker):
cookie = {'name': 'cookie', 'value': 'chocolate chip',
'domain': 'cookie.com', 'secure': False, 'sameSite': 'Lax'}
ok = await worker.set_cookie(cookie)
assert ok
ok = await worker.clear_cookies()
assert ok
cookies = await worker.page.cookies()
assert len(cookies) == 0
async def test_cdp(worker):
ok = worker.set_default_nav_timeout(60_000)
assert ok
ok = await worker.set_ad_block(True)
assert ok
async def test_blocked_urls(worker):
ok = await worker.set_blocked_urls(['saved_resource'])
await worker.try_get(antibot_url)
content = await worker.evaluate(
'() => JSON.stringify(performance.getEntries(), null, " ")')
names = [d.get('name') for d in json.loads(content)]
assert not any('saved_resource' in name for name in names)
async def test_request_abort_types(worker):
no_load = ['image', 'font', 'stylesheet', 'script', 'img']
ok = await worker.set_request_abort_types(no_load)
assert ok
async def test_disable_images(worker):
await worker.try_get(antibot_url)
content = await worker.evaluate('() => JSON.stringify(performance.getEntries(), null, " ")')
images = [d for d in json.loads(
content) if d.get('initiatorType') in ('image', 'img')]
assert len(images) == 0
async def test_evaluate_on_new_doc(worker):
ok = await worker.evaluate_on_new_doc("() => document.getElementById('title').setAttribute('id', 'awesome');")
assert ok
async def test_redirect_block(worker):
ok = await worker.set_redirect_blocking_enabled(True)
assert ok
await worker.try_get(f"file:///{static_dir.joinpath('redirect_from.html')}")
assert 'redirect_to' not in worker.page.url
async def test_scroll(worker):
await worker.try_get(antibot_url)
ok = await worker.scroll()
assert ok
async def test_hover(worker):
await worker.try_get(interactions_url)
ok = await worker.hover('//th[@onmouseover]')
assert ok
async def test_click_js_element(worker):
await worker.try_get(interactions_url)
ok = await worker.click_js_element('//table[@onclick]')
assert ok
async def test_error_status(worker):
# record more than 4 consecutive browser errors.
await worker._record_error_status(True)
await worker._record_error_status(True)
await worker._record_error_status(True)
await worker._record_error_status(True)
await worker._record_error_status(True)
assert not worker.ok
async def test_function_wait(worker):
async def sleep_long(sleep_time):
await asyncio.sleep(sleep_time)
return 0
ok, result = await worker._wait_for(sleep_long, wait_for_timeout=3, sleep_time=4)
assert not ok
assert result is None
assert worker._consecutive_errors == 1
async def test_block_detection(worker):
await worker.try_get(blocked_url)
assert worker.probability_blocked == 3
| [
"kelleherjdan@gmail.com"
] | kelleherjdan@gmail.com |
e77bc697e7aa91fc628d80f873e98e07eb2f0a92 | b64f3c17868e4ace696719d32c32a02f6253619b | /amc/models.py | 682126db5fba977d29cd0229b03d024dd0db0929 | [] | no_license | bipinks/axiscrm-django | 9c664b4f177a402dca097d255f844f907bea3b18 | 9cc779e4de33718530437b97bef41b5b38364724 | refs/heads/main | 2023-07-19T13:31:55.162159 | 2021-09-18T10:27:57 | 2021-09-18T10:27:57 | 401,621,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | from crum import get_current_user
from django.conf import settings
from django.db import models
# Create your models here.
from django.utils.datetime_safe import datetime
from clients.models import ClientProject
from projects.models import Project
class AMCRenewal(models.Model):
class Meta:
db_table = 'amc_renewals'
verbose_name_plural = "Manage AMC Renewals"
ordering = ['-id']
reference = models.CharField(max_length=255, null=True, blank=True)
client_project = models.ForeignKey(ClientProject, on_delete=models.CASCADE)
start_date = models.DateField()
end_date = models.DateField()
renewed_date = models.DateField()
description = models.TextField()
amount = models.DecimalField(decimal_places=2, max_digits=10)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True
)
created_at = models.DateTimeField(default=datetime.now, blank=True)
def save(self, *args, **kwargs):
self.created_by = get_current_user()
if not self.reference:
renewal_cnt = AMCRenewal.objects.filter(client_project=self.client_project).count()
project_code = self.client_project.project.code
client_code = self.client_project.client.code
self.reference = "AMC/" + project_code + "/" + client_code + "/" + str(renewal_cnt + 1)
super(AMCRenewal, self).save(*args, **kwargs)
def __str__(self):
return self.reference
class AMCDocuments(models.Model):
class Meta:
db_table = 'amc_files'
verbose_name_plural = "AMC Documents"
amc = models.ForeignKey(AMCRenewal, on_delete=models.CASCADE)
uploaded_at = models.DateTimeField(default=datetime.now, blank=True)
file = models.FileField(null=True, blank=True, upload_to='AMCRenewalDocs/')
| [
"bipin@directaxistech.com"
] | bipin@directaxistech.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.