blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29ea7bad2c0d0f0b10aa45a6fb658eb5f08b09eb | 739373ca3a5fe5bc9b495b040a96ec5653a1ba45 | /Introdução a Ciencia da Computação com Python - PII/w6_recursao/fatorial.py | a4fdc2243e96f154f57089fe7597a277189de199 | [] | no_license | emanuelgustavo/pythonscripts | e7e9999ad91b4c97d13ade81d2907f596479863f | 4ac7c1522407602406ce0b875493daefdcacc8b9 | refs/heads/master | 2020-06-25T11:54:05.638587 | 2020-03-05T10:18:04 | 2020-03-05T10:18:04 | 199,301,121 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | import pytest
def fatorial(n):
if n < 1:
return 1
else:
return n * fatorial(n-1)
@pytest.mark.parametrize('entrada, esperado', [
(0, 1),
(1, 1),
(2, 2),
(3, 6),
(4, 24),
(5, 120)
])
def test_fatorial(entrada, esperado):
assert fatorial(entrada) == esperado | [
"noreply@github.com"
] | emanuelgustavo.noreply@github.com |
b8f5573ff344929c69dceabf3640aea61ec7232f | bd97064b5ed9f17b11bcd3ac9a1f2c8ea9ffaf82 | /restapi/routers/Utils.py | 8284d48e5f99c5cebcbd87f7d2ecb895771a1912 | [] | no_license | IndominusByte/bhaktirahayu-backend | a33eff6d0a74894934a6643ef3b81af283542ecf | 628d5d5cdbe145696835e32c47f77ca03dc72708 | refs/heads/main | 2023-08-13T16:25:21.241086 | 2021-09-18T18:04:25 | 2021-09-18T18:04:25 | 389,309,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from fastapi import APIRouter, Depends
from fastapi_jwt_auth import AuthJWT
from schemas.utils.UtilSchema import UtilEncodingImageBase64
from libs.MagicImage import MagicImage
router = APIRouter()
@router.post('/encoding-image-base64',response_model=bytes)
async def encoding_image_base64(util_data: UtilEncodingImageBase64, authorize: AuthJWT = Depends()):
authorize.jwt_required()
return MagicImage.convert_image_as_base64(util_data.path_file)
| [
"nyomanpradipta120@gmail.com"
] | nyomanpradipta120@gmail.com |
d2563ff00aa638ceb1ed6a4ea968d43973b51742 | a0128c94ae8ad9fa4ab3996a9448605acee929b0 | /array/missing_number.py | 3ac731a88e097005f3db078877aec9c124a11c37 | [] | no_license | AbhinavJain13/Leetcode-Solutions | bc901188c260698f4d705c6934c87f9f44a00531 | ae2607093beb4257c96beb6eb8b03720ab29784d | refs/heads/master | 2021-01-12T14:48:19.082161 | 2016-10-17T21:24:53 | 2016-10-17T21:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n=len(nums)
return (n*(n+1))/2-sum(nums)
| [
"noreply@github.com"
] | AbhinavJain13.noreply@github.com |
22d4fbf7a6fdf7f62c977d62dddb5f36d9ca10dc | 3bcfe9a0e176947b76ff35341e3ab7938c6e680a | /Fundamentos/tipos_datos.py | 16bfa93699b20b3cf76a52acb55078fb66746b20 | [] | no_license | algonca00/UniversidadPython | 5dd057eac0705ee922b9fa4b2ddc1705e692af91 | 6984b630348701cb3b2e0ab61c913e7f6cfa76ef | refs/heads/master | 2022-11-26T00:46:44.060404 | 2020-07-25T01:12:51 | 2020-07-25T01:12:51 | 280,888,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Declaramos la variable x como int o entero
x = 5
# Declaramos la variable y como float o flotante
y = 10.1
# Declaramos la variable z como bool o boleano
z = False
# Declaramos la variable a como str o string
a = "Saludos"
print(x)
print(y)
print(z)
print(a)
# Si queremos saber de que tipo es una variable lo hacemos desde la consola de Python usando la funcion: type(nombre_de_la_variable), en este caso: type(x) type(y) type(z) type(a)
| [
"algonca00@gmail.com"
] | algonca00@gmail.com |
068b4097287bd91889c5dd9c73b0d6816f42d864 | 0651c62a9bfb5484a696975b29a1bcae864c13bd | /commonsense-qa/utils/preprocess_obqa.py | 795d53f10f444c991371619919c5d4504a6e3336 | [] | no_license | min942773/path_generator | cfb288240704cfdc65770f7df27a2dfed3cfc22e | 7cf5fd0810d60acd5d02a3671c2531e85de9382f | refs/heads/main | 2023-04-06T02:46:14.115838 | 2021-04-15T12:15:31 | 2021-04-15T12:15:31 | 350,245,011 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,422 | py | import os
import pickle
import torch
import json
from collections import defaultdict, OrderedDict
import random
from tqdm import tqdm, trange
from transformers import *
class PreprocessData_Ground(object):
"""docstring for PreprocessData"""
def __init__(self, data_name, gpt_tokenizer_type, context_len):
super(PreprocessData_Ground, self).__init__()
self.tokenizer = GPT2Tokenizer.from_pretrained(gpt_tokenizer_type, cache_dir='../cache/')
data_dir = os.path.join('./data', data_name)
self.ground_path = os.path.join(data_dir, 'ground_token_context{}_{}.pkl'.format(context_len, gpt_tokenizer_type))
self.context_len = context_len
self.tokenizer.add_tokens(['<PAD>'])
self.tokenizer.add_tokens(['<SEP>'])
self.tokenizer.add_tokens(['<END>'])
self.PAD = self.tokenizer.convert_tokens_to_ids('<PAD>')
self.SEP = self.tokenizer.convert_tokens_to_ids('<SEP>')
self.END = self.tokenizer.convert_tokens_to_ids('<END>')
if not os.path.exists(self.ground_path):
train_context_path = os.path.join(data_dir, 'grounded', 'train.grounded.jsonl')
train_contexts = self.load_context(train_context_path)
dev_context_path = os.path.join(data_dir, 'grounded', 'dev.grounded.jsonl')
dev_contexts = self.load_context(dev_context_path)
test_context_path = os.path.join(data_dir, 'grounded', 'test.grounded.jsonl')
test_contexts = self.load_context(test_context_path)
token_dataset = {}
token_dataset['train'] = train_contexts
token_dataset['dev'] = dev_contexts
token_dataset['test'] = test_contexts
with open(self.ground_path, 'wb') as handle:
pickle.dump(token_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_context(self, data_path):
data_context = []
question_context = []
with open(data_path, 'r') as fr:
for _id, line in enumerate(tqdm(fr)):
obj = json.loads(line)
qc_list = obj['qc']
ac_list = obj['ac']
choice_context = []
sample_qc_num = min(len(qc_list), 6)
sample_ac_num = min(len(ac_list), 6)
sample_qc_list = random.sample(qc_list, sample_qc_num)
sample_ac_list = random.sample(ac_list, sample_ac_num)
for qc in sample_qc_list:
qc = qc.replace('_', ' ')
for ac in sample_ac_list:
ac = ac.replace('_', ' ')
context = ac + '<SEP>' + qc
context = self.tokenizer.encode(context, add_special_tokens=False)[:self.context_len]
context += [self.PAD] * (self.context_len - len(context))
choice_context.append(context)
num_context = len(choice_context)
for _ in range(36 - num_context):
_input = [self.PAD] * self.context_len
choice_context.append(_input)
question_context.append(choice_context)
if (_id + 1) % 4 == 0:
data_context.append(question_context)
question_context = []
data_context = torch.tensor(data_context, dtype=torch.long)
return data_context
| [
"wanpifeng4ever@gmail.com"
] | wanpifeng4ever@gmail.com |
96f9cd408c789bb3d86cc01acb1306bd78afc4ad | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SSD/.dev_scripts/batch_test.py | e5d863211eaf2182497d8ddfbc0f40ce48f93e99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,393 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
some instructions
1. Fill the models that needs to be checked in the modelzoo_dict
2. Arange the structure of the directory as follows, the script will find the
corresponding config itself:
model_dir/model_family/checkpoints
e.g.: models/faster_rcnn/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
models/faster_rcnn/faster_rcnn_r101_fpn_1x_coco_20200130-047c8118.pth
3. Excute the batch_test.sh
"""
import argparse
import json
import os
import subprocess
import mmcv
import torch
from mmcv import Config, get_logger
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
modelzoo_dict = {
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py': {
'bbox': 0.374
},
'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py': {
'bbox': 0.382,
'segm': 0.347
},
'configs/rpn/rpn_r50_fpn_1x_coco.py': {
'AR@1000': 0.582
}
}
def parse_args():
parser = argparse.ArgumentParser(
description='The script used for checking the correctness \
of batch inference')
parser.add_argument('model_dir', help='directory of models')
parser.add_argument(
'json_out', help='the output json records test information like mAP')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def check_finish(all_model_dict, result_file):
# check if all models are checked
tested_cfgs = []
with open(result_file, 'r+') as f:
for line in f:
line = json.loads(line)
tested_cfgs.append(line['cfg'])
is_finish = True
for cfg in sorted(all_model_dict.keys()):
if cfg not in tested_cfgs:
return cfg
if is_finish:
with open(result_file, 'a+') as f:
f.write('finished\n')
def dump_dict(record_dict, json_out):
# dump result json dict
with open(json_out, 'a+') as f:
mmcv.dump(record_dict, f, file_format='json')
f.write('\n')
def main():
args = parse_args()
# touch the output json if not exist
with open(args.json_out, 'a+'):
pass
# init distributed env first, since logger depends on the dist
# info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, backend='nccl')
rank, world_size = get_dist_info()
logger = get_logger('root')
# read info of checkpoints and config
result_dict = dict()
for model_family_dir in os.listdir(args.model_dir):
for model in os.listdir(
os.path.join(args.model_dir, model_family_dir)):
# cpt: rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth
# cfg: rpn_r50_fpn_1x_coco.py
cfg = model.split('.')[0][:-18] + '.py'
cfg_path = os.path.join('configs', model_family_dir, cfg)
assert os.path.isfile(
cfg_path), f'{cfg_path} is not valid config path'
cpt_path = os.path.join(args.model_dir, model_family_dir, model)
result_dict[cfg_path] = cpt_path
assert cfg_path in modelzoo_dict, f'please fill the ' \
f'performance of cfg: {cfg_path}'
cfg = check_finish(result_dict, args.json_out)
cpt = result_dict[cfg]
try:
cfg_name = cfg
logger.info(f'evaluate {cfg}')
record = dict(cfg=cfg, cpt=cpt)
cfg = Config.fromfile(cfg)
# cfg.data.test.ann_file = 'data/val_0_10.json'
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
# build the dataloader
samples_per_gpu = 2 # hack test with 2 image per gpu
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, cpt, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, 'tmp')
if rank == 0:
ref_mAP_dict = modelzoo_dict[cfg_name]
metrics = list(ref_mAP_dict.keys())
metrics = [
m if m != 'AR@1000' else 'proposal_fast' for m in metrics
]
eval_results = dataset.evaluate(outputs, metrics)
print(eval_results)
for metric in metrics:
if metric == 'proposal_fast':
ref_metric = modelzoo_dict[cfg_name]['AR@1000']
eval_metric = eval_results['AR@1000']
else:
ref_metric = modelzoo_dict[cfg_name][metric]
eval_metric = eval_results[f'{metric}_mAP']
if abs(ref_metric - eval_metric) > 0.003:
record['is_normal'] = False
dump_dict(record, args.json_out)
check_finish(result_dict, args.json_out)
except Exception as e:
logger.error(f'rank: {rank} test fail with error: {e}')
record['terminate'] = True
dump_dict(record, args.json_out)
check_finish(result_dict, args.json_out)
# hack there to throw some error to prevent hang out
subprocess.call('xxx')
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
0daa079dba8849106e62b0e5b1dd126ae4b154a4 | c09423f3643a41af943121d8637ff9bc93cd2166 | /autodoc_cache.py | 480c8e319399e232d5bdd54e3cb7895bff12ec7c | [] | no_license | brenthuisman/sphinx-autodoc-cache | 602c0ba6c98082ce352bafe6592bf32666c6cd92 | e8b79fa7a6ddbe259691923f348bccb2e1db1794 | refs/heads/main | 2023-04-01T16:09:53.768665 | 2021-04-09T18:59:27 | 2021-04-09T18:59:27 | 356,372,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | import os, sys
# Path to Python Binding (_arbor)
try:
autodoc_output_file = this_path=os.path.join(os.path.split(os.path.abspath(__file__))[0],'reference.rst')
if os.path.exists(autodoc_output_file):
os.remove(autodoc_output_file)
# Add the local build directory to where Python searches for Arbor.
print("--- generating autodoc cache ---")
# Generate title such that the page shows up in Sphinx.
with open(autodoc_output_file, "w") as file_object:
file_object.write('Python API reference\n')
file_object.write('====================\n')
# Override add_line and intercept intermediate rst output. Replace arbor._arbor while we're at it
import sphinx.ext.autodoc
def add_line(self, line, source, *lineno):
"""Append one line of generated reST to the output."""
line = line.replace('arbor._arbor','arbor')
with open(autodoc_output_file, "a") as file_object:
file_object.write(self.indent + line + '\n')
self.directive.result.append(self.indent + line, source, *lineno)
sphinx.ext.autodoc.Documenter.add_line = add_line
except ImportError:
# If not package here, hope autodoc_output_file is already checked in.
# Setup mock imports to stop autodoc from complaining about a missing package.
autodoc_mock_imports = ['arbor._arbor'] | [
"brent@huisman.pl"
] | brent@huisman.pl |
47e3124b6380abdf4b2d239094235a5d05760df7 | 45a5da5f73f197a18bbe8e106666d83db46466f8 | /iris/script.py | 5a5beb63a5c938cddd768291aaba3dc358b1b650 | [
"MIT"
] | permissive | jmoiron/iris | f77a38617757c861457a13005787e8f2992f6d6f | a7f61ec2161544e765789150f235fda8a6a40d69 | refs/heads/master | 2023-08-13T19:34:15.938697 | 2011-02-27T04:57:25 | 2011-02-27T04:57:25 | 1,027,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,240 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Support for the iris script."""
import os
from cmdparse import Command, CommandParser
from iris import backend
def insert_photos(paths):
"""Insert a single photo. Meant to be run in a parallelized scenario."""
from iris.loaders.file import UnknownImageTypeException
collection = backend.Photo.objects.collection
inserter = backend.BulkInserter(collection, threshold=50)
for path in paths:
photo = backend.Photo()
try:
photo.load_file(path)
except UnknownImageTypeException:
continue
inserter.insert(photo)
inserter.flush()
class AddCommand(Command):
"""Add a photo or directory of photos."""
def __init__(self):
Command.__init__(self, "add", summary="add files or directories.")
self.add_option('-r', '--recursive', action='store_true', default=False)
self.add_option('', '--parallelize', action='store_true', default=False, help='run on more than one CPU')
def run(self, options, args):
"""Args here are a bunch of file or directory names. We want to
mostly defer to other functions that do the stuff for us."""
from iris import utils
paths = utils.recursive_walk(*args) if options.recursive else args
if options.parallelize:
utils.auto_parallelize(insert_photos, paths)
return None
return insert_photos(paths)
class TagCommand(Command):
"""Tag one or more photos.
You can tag photos based on filename:
iris tag photos/italy/*.JPG
Or via a query on iris' database:
iris tag -q ...
"""
def __init__(self):
Command.__init__(self, "tag", summary="tag photos by filename, query, etc.")
self.add_option('-r', '--recursive', action='store_true', default=False)
self.add_option('-q', '--query', action='store_true', default=False, help="query instead of paths")
def run(self, options, args):
print "tag: ", options, args
class HelpCommand(Command):
"""Provides extended help for other commands."""
def __init__(self):
Command.__init__(self, "help", summary="extended help for other commands.")
def run(self, options, args):
if not args:
self.parser.print_help()
return
name = args[0]
cmd = self.parser.find_command(name)
print cmd.__doc__
cmd.print_help()
class ListCommand(Command):
def __init__(self):
Command.__init__(self, "list", summary="list photos in iris")
self.add_option('-v', '--verbose', action='count', help='increase verbosity')
self.add_option('-c', '--count', action='store_true', help='count files matching spec')
def run(self, options, args):
from iris import utils
if options.count:
print '%d photos' % backend.Photo.objects.find().count()
return
photos = backend.Photo.objects.find(sort=[('path', backend.pymongo.ASCENDING)], paged=100)
if options.verbose > 1:
import pprint
pprint.pprint([p.__dict__ for p in photos])
elif options.verbose == 1:
for photo in photos:
moved_tag = '[%s]' % utils.bold('e', utils.red) if getattr(photo, 'moved', False) else ''
print '-- %s %s' % (utils.bold(photo.path), moved_tag)
tagstr = ' tags: %s' % ', '.join(photo.tags) if photo.tags else ''
print ' %dx%d, %s%s' % (photo.x, photo.y, utils.humansize(photo.size), tagstr)
else:
for photo in photos:
print photo.path
print ''
print '%d photos' % backend.Photo.objects.find().count()
class SyncCommand(Command):
def __init__(self):
Command.__init__(self, 'sync', summary='sync all images currently in iris')
self.add_option('-v', '--verbose', action='count', help='increase verbosity')
def run(self, options, args):
from iris import utils
db = backend.get_database()
photos = [backend.Photo(p) for p in db.photos.find()]
def log(string):
if options.verbose:
print string
for photo in photos:
if not os.path.exists(photo.path):
photo.moved = True
photo.save()
log('%s [%s]' % (photo.path, utils.bold('e', color=utils.red)))
continue
if photo.moved:
photo.moved = None
#photo.sync()
log('%s' % photo.path)
class FlushCommand(Command):
def __init__(self):
Command.__init__(self, 'flush', summary='flush iris\' database; this cannot be reversed!')
self.add_option('-y', '--yes', action='store_true', help='do not prompt')
def run(self, options, args):
from iris import utils
if options.yes:
backend.flush()
return
while True:
prompt = 'Flush database? (this cannot be reversed!) [%s]|%s: '
prompt = prompt % (utils.bold('n'), utils.bold('y', color=utils.red))
answer = raw_input(prompt)
if answer not in 'yYnN':
print 'Invalid; please answer y or n.'
continue
if answer in 'yY':
backend.flush()
return
def run_with_profile(command, options, args):
import cProfile as Profile
import pstats, tempfile
outfile = tempfile.NamedTemporaryFile(dir='/dev/shm/')
Profile.runctx('command.run(options, args)', globals(), locals(), outfile.name)
stats = pstats.Stats(outfile.name)
stats.sort_stats('cumulative').print_stats(25)
outfile.close() # deletes the temp file
return 0
def run_with_timer(command, options, args):
from iris import utils
import time
t0 = time.time()
ret = command.run(options, args)
td = time.time() - t0
print "timer results: %ss" % (utils.bold("%0.3f" % td))
return ret
def main():
import utils
import pymongo
parser = CommandParser()
parser.add_option('', '--profile', action='store_true', help='profile the running command')
parser.add_option('', '--timer', action='store_true', help='record the time it takes to run the command')
parser.add_command(HelpCommand())
parser.add_command(AddCommand())
parser.add_command(TagCommand())
parser.add_command(ListCommand())
parser.add_command(SyncCommand())
parser.add_command(FlushCommand())
command, options, args = parser.parse_args()
if command is None:
parser.print_help()
return 0
try:
if options.profile:
return run_with_profile(command, options, args)
if options.timer:
return run_with_timer(command, options, args)
return command.run(options, args)
except KeyboardInterrupt:
return -1
except pymongo.errors.AutoReconnect:
from iris import config
cfg = config.IrisConfig()
host, port = cfg.host, cfg.port
host = host if host else 'localhost'
port = port if port else 27017
utils.error("could not connect to mongodb (%s:%s); is it running?" % (host, port))
| [
"jmoiron@jmoiron.net"
] | jmoiron@jmoiron.net |
157a1726a40ed6f8b768bdbc7a038fc8ccbd0799 | a1ddafe2130e8c7176467d4536634a4690fdeec4 | /DataGenerator.py | 784a169d6ca4c37ccfc437b2a77f92a09e3f5975 | [] | no_license | BrainNetwork/BrainNet | 364f9ca0259e5a1b86650a1465264a67fda71563 | a5a0a2a36ac012cc75e5cc5f8d9b7d39be933660 | refs/heads/master | 2022-11-13T09:55:08.789948 | 2020-06-25T15:36:56 | 2020-06-25T15:36:56 | 271,578,560 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | import numpy as np
import torch.nn.functional as F
import torch
from network import BrainNet
# inputs are random data with each entry taken from normal distribution
# n points in 'dim' dimensions which are labelled by by halfspace
def random_halfspace_data(dim, n, b = 0):
vec = 2 * (np.random.rand(dim) - 0.5)
pts = 2 * (np.random.rand(n, dim) - 0.5)
labels = np.sign(np.dot(pts, vec) + b)
return pts, labels == 1
# Same as random_halfspace_data. Flipped label with prob. p.
def random_halfspace_error_data(dim, n, p):
pts, labels = random_halfspace_data(dim, n)
for i in range(len(labels)):
if np.random.uniform(low = 0, high = 1) < p:
labels[i] = 1 - labels[i]
return pts, labels
# 1st layer: k relu with random weights.
# 2nd layer: sum of outputs of first layer
def layer_relu_data(dim, n, k):
pts = 2 * (torch.rand(n, dim) - 0.5)
weights = 2 * (torch.rand(dim, k) - 0.5)
out1 = F.relu(torch.matmul(pts, weights))
w1 = 2 * (torch.rand(k, 2) - 0.5)
out2 = F.softmax(torch.matmul(out1, w1))
return np.array(pts), np.array(np.argmax(out2,axis=1))
def brainnet_data(n, dim, labels, num_v = 20, p = .15, cap = 5, rounds = 1):
pts = 2 * (torch.rand(n, dim) - 0.5)
pts = pts.double()
net = BrainNet(dim, labels, num_v = num_v, p = p, cap = cap, rounds = rounds, full_gd = True, outlayer_connected = True)
with torch.no_grad():
out = net(pts)
return np.array(pts), np.array(np.argmax(out,axis=1))
| [
"noreply@github.com"
] | BrainNetwork.noreply@github.com |
b8613843b9f02e0da6d0a0295a7dc12ceaa1dc83 | ffc5936db35e0e6a7d38d41a5e40c4b4bed0bc42 | /archiver/fetcher.py | 32430c3f429e97645565685d22394b60ec751ee5 | [
"MIT"
] | permissive | shurain/archiver | a4a2278e68bedd19cc787f053e1bc0c4eb86001b | 06d3c5489d9c87f693aa7d220906ae03fd51d9cd | refs/heads/master | 2021-01-22T07:27:10.184198 | 2013-11-13T06:31:18 | 2013-11-13T06:31:18 | 9,668,319 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | # -*- coding: utf-8 -*-
"""
A module for fetching resource indicated by a URL.
"""
import requests
import logging
class URLFetcher(object):
SIZELIMIT = 100 * 2**10 * 2**10 # 100M
def __init__(self, url):
self.url = url
# XXX Could allowing redirects become a problem?
# XXX maybe stream might timeout?
self.response = requests.get(url, allow_redirects=True, stream=True, verify=False)
@property
def content_type(self):
if 'content-type' in self.response.headers:
return self.response.headers['content-type'].split(';')[0].strip()
else:
return None
def is_image(self):
if self.content_type in ['image/gif', 'image/png', 'image/jpeg']:
#FIXME supporting more image types?
return True
elif self.response.content[:8] == '\x89PNG\r\n\x1a\n':
#png magic number
# map(hex, map(ord, self.response.content[:8])) == ['0x89', '0x50', '0x4e', '0x47', '0xd', '0xa', '0x1a', '0xa']
return True
elif self.response.content[:2] == '\xff\xd8':
#jpeg magic number
return True
elif self.response.content[:6] in ("GIF89a", "GIF87a"):
return True
else:
return False
def image_content_type(self):
"""Returns the content type of the image.
This method assumes that you have already confirmed that the resource is an image.
Returns None when no content type matches.
"""
if self.content_type in ['image/gif', 'image/png', 'image/jpeg']:
return self.content_type
elif self.response.content[:8] == '\x89PNG\r\n\x1a\n':
return 'image/png'
elif self.response.content[:2] == '\xff\xd8':
return 'image/jpeg'
elif self.response.content[:6] in ("GIF89a", "GIF87a"):
return 'image/gif'
def is_PDF(self):
"""Check if the resource is a PDF document.
It will try to check the content-type of the response header,
and peep the content for magic number indicating the content type.
"""
if self.content_type == 'application/pdf':
return True
if self.response.content[:4] == '%PDF':
return True
else:
return False
def is_HTML(self):
"""Check if the resource is a HTML document.
Just checks the content-type of the response header.
"""
if self.content_type == 'text/html':
return True
else:
return False
def is_text(self):
"""Check if the resource is a plain text.
Just checks the content-type of the response header.
"""
if self.content_type == 'text/plain':
return True
else:
return False
def fetch(self):
"""Fetch the resource content.
Has a guard to check if the content exceeds the size limit.
Size limit can be overrided by settings the SIZELIMIT variable.
"""
if 'content-length' not in self.response.headers:
logging.info("No content-length header, proceeding anyway.")
elif int(self.response.headers['content-length']) > self.SIZELIMIT:
#FIXME create a specific exception
raise Exception("File too large")
return self.response.content
| [
"shurain@gmail.com"
] | shurain@gmail.com |
9166dc2e456f9adbf39f8f327bc6c3f432090aa9 | 976d399110f839ba98dc30e51004297385c56479 | /phone.py | cd062200df0418c8ebf51a5f6d08aaded568f901 | [] | no_license | EileenLL/Phone-App-Practice | 4f9bb0eda10e505c833b79d15e21b5e3525399f6 | 3b83fd7547a4248752f89255f530e19710b91033 | refs/heads/master | 2020-12-05T02:01:28.760728 | 2017-03-02T05:15:49 | 2017-03-02T05:15:49 | 83,637,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,924 | py |
class Phone(object):
"""A simple Phone class to keep track of contacts"""
def __init__(self, number, name, contacts=None):
self.number = number
self.name = name
if contacts:
self.contacts = contacts
else:
self.contacts = {}
# The __repr__ method gives the class a print format that is meaningful to
# humans, in this case we chose first and last name
def __repr__(self):
return self.name
def add_contact(self, first_name, last_name, number):
"""Creates new Contact instance and adds the instance to contacts"""
entry = Contact(first_name, last_name, number)
self.contacts[self._get_contact_key(first_name, last_name)] = entry
print self.contacts
# See the types of each parameter from the function call in contact_ui.py
pass
def call(self, first_name, last_name):
"""Call a contact."""
call_name = self._get_contact_key(first_name, last_name)
contact = self.contacts[self._get_contact_key(first_name, last_name)]
contact_number = contact.phone_number
# look up number in dictionary through name key
print "You are calling " + str(call_name) + " at " + str(contact_number)
pass
def text(self, first_name, message):
"""Send a contact a message."""
pass
def del_contact(self, first_name, last_name):
"""Remove a contact from phone"""
del self.contacts[self._get_contact_key(first_name, last_name)]
pass
def _get_contact_key(self, first_name, last_name):
"""This is a private method. It's meant to be used only from within
this class. We notate private attributes and methods by prepending with
an underscore.
"""
return first_name.lower() + " " + last_name.lower()
# class definition for a Contact
class Contact(object):
"""A class to hold information about an individual"""
# initialize an instance of the object Contact
def __init__(self,
first_name,
last_name,
phone_number,
email="",
twitter_handle=""):
self.first_name = first_name
self.last_name = last_name
self.phone_number = phone_number
self.email = email
self.twitter_handle = twitter_handle
# The __repr__ method gives the class a print format that is meaningful to
# humans, in this case we chose first and last name
def __repr__(self):
return "%s %s" % (self.first_name, self.last_name)
def full_name(self):
return self.first_name + " " + self.last_name
# some examples of how to use these two classes
# Make a Phone instace
# tommys_phone = Phone(5555678, "Tommy Tutone's Phone")
# Use the Phone class to add new contacts!
# tommys_phone.add_contact("Jenny", "From That Song", 8675309)
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
cc831def9e82980ee13075b9666095f9b36861a9 | c668cba1d3a1c2de1ad160a575e1ac556fc0f064 | /Project/KNN.py | 5a4db5537c3258571c25d112879de1eb08b7e606 | [] | no_license | AliMuhammad229/AI_106394 | 644974396f39e1d910a4890e1e9b708b69c8b49d | b4ff235594f5418115bda1238af72db310d62b56 | refs/heads/main | 2023-06-22T13:15:51.057187 | 2021-07-19T20:08:58 | 2021-07-19T20:08:58 | 329,240,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,407 | py | import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import math
#function to perform convolution
def convolve2D(image, filter):
fX, fY = filter.shape # Get filter dimensions
fNby2 = (fX//2)
n = 28
nn = n - (fNby2 *2) #new dimension of the reduced image size
newImage = np.zeros((nn,nn)) #empty new 2D imange
for i in range(0,nn):
for j in range(0,nn):
newImage[i][j] = np.sum(image[i:i+fX, j:j+fY]*filter)//25
return newImage
#Read Data from CSV
train = pd.read_csv("Kaggle Data/train.csv")
X = train.drop('label',axis=1)
Y = train['label']
#Create Filter for convolution 5 x 5
# Same Dimension
filter = np.array([
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]
])
# Different Dimensions
filter = np.array([
[1,1,1,1,1],
[1, 2, 2, 2, 1],
[1, 2, 3, 2, 1],
[1, 2, 2, 2, 1],
[1,1,1,1,1]
])
# Apply only for 5 x 5 filters i.e. two different sizes
#convert from dataframe to numpy array
X = X.to_numpy()
print(f'Number of Rows & Columns: {X.shape}')
#new array with reduced number of features to store the small size images
sX = np.empty((0,576), int)
ss = 42000 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
nImg = convolve2D(img2D,filter)
nImg1D = np.reshape(nImg, (-1,576))
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
print(sY.shape)
print(sX.shape)
# train and test model
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
print('\n')
# # Total Length
print('Length: ',len(yTest))
print('K: ',math.sqrt(len(yTest)))
print('\n')
# K = 91 We used odd value for K because error ratio is decreasing
# If p = 2 Euclidean Distance etc. is used for Arbitrary data
# If p = 1 Manhatten Distance
classifier = KNeighborsClassifier(n_neighbors=91,p=2,metric='euclidean')
classifier.fit(sXTrain, yTrain)
Y_pred = classifier.predict(sXTest)
print(f'Score: {classifier.score(sXTest, yTest)}')
# To predict our model on test.csv
predictedClasses = classifier.predict(sXTest)
print(sX.shape)
# It creates a dataframes on Image Id col and Labels col which has the rows 28000
submissions=pd.DataFrame({"ImageId": list(range(1,len(predictedClasses)+1)), "Label": predictedClasses})
# To create this submission it will turn it into comma separated values CSV
submissions.to_csv("submission.csv", index = False, header = True)
# Now we download this submission file
from google.colab import files
files.download('submission.csv')
#Create Filter for convolution 7 x 7
# Same Dimension
filter = np.array([
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]
])
# # Different Dimension
filter = np.array([
[1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 1, 1, 1],
[1, 2, 3, 2, 1, 1, 1],
[1, 2, 2, 2, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]
])
# # Apply only for 7 x 7 filters i.e. two different sizes
# #convert from dataframe to numpy array
X = X.to_numpy()
print(f'Number of Rows & Columns: {X.shape}')
#new array with reduced number of features to store the small size images
sX = np.empty((0,484), int)
# img = X[6]
ss = 42000 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
# print(img2D.shape)
# print(img2D)
nImg = convolve2D(img2D,filter)
# print(nImg.shape)
# print(nImg)
nImg1D = np.reshape(nImg, (-1,484))
# print(nImg.shape)
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
# print(sY)
print(sY.shape)
print(sX.shape)
# train and test model
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
print('\n')
# # Total Length
print('Length: ',len(yTest))
print('K: ',math.sqrt(len(yTest)))
print('\n')
# K = 91 We used odd value for K because error ratio is decreasing
# If p = 2 Euclidean Distance etc. is used for Arbitrary data
# If p = 1 Manhatten Distance
classifier = KNeighborsClassifier(n_neighbors=91,p=2,metric='euclidean')
classifier.fit(sXTrain, yTrain)
Y_pred = classifier.predict(sXTest)
print(f'Score: {classifier.score(sXTest, yTest)}')
# To predict our model on test.csv
predictedClasses = classifier.predict(sXTest)
print(sX.shape)
# #Create Filter for convolution 9 x 9
# # Same Dimension
filter = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]
])
# # Different Dimension
filter = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 1, 1, 1, 1, 1],
[1, 2, 3, 2, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]
])
# # Apply only for 9 x 9 filters i.e. two different sizes
# #convert from dataframe to numpy array
X = X.to_numpy()
print(f'Number of Rows & Columns: {X.shape}')
#new array with reduced number of features to store the small size images
sX = np.empty((0,400), int)
ss = 42000 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
nImg = convolve2D(img2D,filter)
nImg1D = np.reshape(nImg, (-1,400))
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
# print(sY)
print(sY.shape)
print(sX.shape)
# train and test model
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
print('\n')
# # Total Length
print('Length: ',len(yTest))
print('K: ',math.sqrt(len(yTest)))
print('\n')
# K = 91 We used odd value for K because error ratio is decreasing
# If p = 2 Euclidean Distance etc. is used for Arbitrary data
# If p = 1 Manhatten Distance
classifier = KNeighborsClassifier(n_neighbors=91,p=2,metric='euclidean')
classifier.fit(sXTrain, yTrain)
Y_pred = classifier.predict(sXTest)
print(f'Score: {classifier.score(sXTest, yTest)}')
# To predict our model on test.csv
predictedClasses = classifier.predict(sXTest)
print(sX.shape)
| [
"noreply@github.com"
] | AliMuhammad229.noreply@github.com |
4d6bb68dfd8b9172ae8c84898dd1da1e196c4571 | 12031b04d77627c6f4b7d15f73c3d9cad8d1d5fb | /adaSepConv/config.py | 74228b47464b6c014c347f4b1777df943c1d2925 | [] | no_license | priyanshagarwal18/Image-Interpolation-via-adaptive-separable-convolution | 4e41d2b8017daf4f02c6299e4a2a1b4b44939d0a | 50706ced3e8fd27a3918e70954d9eb79bf1ef8b0 | refs/heads/master | 2022-11-11T16:39:43.144168 | 2020-07-02T10:57:58 | 2020-07-02T10:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # The size of the input images to be fed to the network during training.
CROP_SIZE_fraction: float = 128/150
# The size of the patches to be extracted from the datasets
PATCH_SIZE = (150, 150)
# Number of epochs used for training
EPOCHS: int = 10
# Kernel size of the custom Separable Convolution layer
OUTPUT_1D_KERNEL_SIZE: int = 51
# The batch size used for mini batch gradient descent
BATCH_SIZE: int = 1
# Path to the dataset directory
TFRECORD_DATASET_DIR = './dataset'
#input shape
INPUT_SHAPE = (128,128,6)
#Dataset directory
DATASET_DIR = './dataset'
#Prediction height
PREDICTION_H: int = 128
#Prediction weight
PREDICTION_W: int = 128
#Prediction Batch
PREDICTION_BATCH: int = 1 | [
"gsingh2@cs.iitr.ac.in"
] | gsingh2@cs.iitr.ac.in |
66c6f4405ca42ccd0367fea8c24a5f124c4726ad | 18a7cabee0609a4ceb76697ce322bd6a04c662f3 | /v2.1/src/main/python/remote/RemoteClient.py | bfd3890e7945f13b14518357405a063c20d08865 | [] | no_license | blakeolsen/design2 | 061469c7a8c0352f6b74c0c913e4796db257bf6f | 660ce0b7ad5ec5ae88720effefc08c9eb07a3b14 | refs/heads/master | 2021-01-17T08:05:22.301030 | 2017-05-08T12:36:43 | 2017-05-08T12:36:43 | 83,846,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | import bluetooth
discovered_devices = bluetooth.discovered_devices(lookup_names=True)
print("found %d devices" % len(discovered_devices))
for add, name in discovered_devices:
print(" %s - %s" % (addr, name)) | [
"blakeolsen@Blakes-MBP.wv.cc.cmu.edu"
] | blakeolsen@Blakes-MBP.wv.cc.cmu.edu |
911dc637c6b1edb5e176cad2461ee313780171ac | 9b645c8c1702e8d0e9d1229f2cb98ca15692a494 | /weather.py | 40e98e1211b402a5391f21df6a78968ac80329d3 | [] | no_license | Poporad/flask_app | edc82e7c7df0d905d22fc78480fafa55da02e89c | e051c6dc795f0f1077c913fff7cc2687faf3985b | refs/heads/master | 2020-06-11T12:20:01.727247 | 2016-12-06T01:46:57 | 2016-12-06T01:46:57 | 75,669,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py |
import forecastio
from geopy.geocoders import Nominatim
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
address = "Philadelphia, PA"
def get_weather(address):
# api_key = "824e878ba5af96ac4fb6a18a14e7792e"
api_key = os.environ['FORECASTIO_API_KEY']
geolocator = Nominatim()
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
forecast = forecastio.load_forecast(api_key, latitude, longitude).currently()
summary = forecast.summary
temperature = forecast.temperature
return "{} and {}° at {}".format(summary, temperature, address)
#print(get_weather(address, api_key)) | [
"MichaelPoporad@Michaels-MacBook-Pro-2.local"
] | MichaelPoporad@Michaels-MacBook-Pro-2.local |
43ce02be5dc4ee0bf7d78d1c0ceda9d1ad69fc72 | fbcede1fe60f99aad37855b812bd9bd798079e41 | /stepik_some_problems_selenium_autotest/final/pages/locators.py | 9febe8923d2b7e326ef53c9f8f26c9acc671b295 | [] | no_license | haykeminyan/DataNerds-AI | d6af46812ec176c5ba178bed4d111f95b31aefbc | 0c56938a1069215f4300f57c3d8168715b89178d | refs/heads/master | 2021-07-06T11:41:32.213873 | 2020-08-16T22:21:30 | 2020-08-16T22:21:30 | 232,107,833 | 1 | 0 | null | 2021-04-20T19:02:35 | 2020-01-06T13:33:57 | Jupyter Notebook | UTF-8 | Python | false | false | 1,202 | py | from selenium.webdriver.common.by import By
class BasePageLocators(object):
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
CART_LINK = (By.CSS_SELECTOR, ".basket-mini a")
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class LoginPageLocators(object):
LOGIN_FORM = (By.ID, "login_form")
REGISTER_FORM = (By.ID, "register_form")
REGISTRATION_EMAIL = (By.ID, "id_registration-email")
REGISTRATION_PASSWORD = (By.ID, "id_registration-password1")
REGISTRATION_PASSWORD_AGAIN = (By.ID, "id_registration-password2")
REGISTRATION_BUTTON = (By.CSS_SELECTOR, "[name='registration_submit']")
class CartPageLocators(object):
EMPTY_CART_MESSAGE = (By.CSS_SELECTOR, "#content_inner > p")
CART_ITEMS = (By.CSS_SELECTOR, ".basket-items")
class ProductPageLocators(object):
PRODUCT_NAME = (By.CSS_SELECTOR, ".product_main h1")
PRODUCT_PRICE = (By.CSS_SELECTOR, ".price_color")
ADD_TO_CART_BUTTON = (By.CSS_SELECTOR, ".btn-add-to-basket")
SUCCESS_MESSAGE = (By.CSS_SELECTOR, ".alert-success:nth-child(1)")
ALERT_PRODUCT_NAME = (By.CSS_SELECTOR, ".alert:nth-child(1) strong")
ALERT_PRODUCT_PRICE = (By.CSS_SELECTOR, ".alert:nth-child(3) strong") | [
"ibhayk@gmail.com"
] | ibhayk@gmail.com |
4048bc44274c151ff46d315bd4632988cfc2f2fc | 63c4a439209bf4b86db9fabc77169122b8f34ff2 | /migrations/versions/2bf098c4b83c_item_table.py | 7c5a58b134258ea814b0008e9c70c796c5a704a4 | [
"MIT"
] | permissive | denikond/LP19_avitko | 9c7b2a9cefc21c433df91f38868843fc09c1edcb | 84bed264d5ed9e1da5b3166fa717d4ffe4ee9d6b | refs/heads/main | 2023-08-18T11:22:37.217226 | 2021-10-10T14:53:37 | 2021-10-10T14:53:37 | 322,697,273 | 0 | 0 | null | 2021-01-24T17:24:37 | 2020-12-18T20:25:53 | Python | UTF-8 | Python | false | false | 1,324 | py | """Item table
Revision ID: 2bf098c4b83c
Revises:
Create Date: 2021-01-04 13:54:32.900023
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2bf098c4b83c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('key', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('num_of_ad', sa.String(length=32), nullable=True),
sa.Column('creation_date', sa.Date(), nullable=True),
sa.Column('address', sa.String(length=255), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('extended_text', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('key')
)
op.create_index(op.f('ix_item_description'), 'item', ['description'], unique=False)
op.create_index(op.f('ix_item_num_of_ad'), 'item', ['num_of_ad'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_item_num_of_ad'), table_name='item')
op.drop_index(op.f('ix_item_description'), table_name='item')
op.drop_table('item')
# ### end Alembic commands ###
| [
"denikond@gmail.com"
] | denikond@gmail.com |
a850529d075a3b5c5354d23ed691648543625593 | da2c159f1971cf71cf2b733dc0dbdd3684761180 | /Hackerrank/Sets/Check subset.py | adc1740e8ccc00cee9daa9d1681fdf8ae372ac68 | [] | no_license | shravan0409/LTI-preparation | a11bacb485893e294bde89b65ae30912b1cb466f | 72c692d147c0da74b9cca9efd7acd6988dc47c87 | refs/heads/main | 2023-03-24T13:16:33.728761 | 2021-03-21T17:55:14 | 2021-03-21T17:55:14 | 348,372,387 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # Enter your code here. Read input from STDIN. Print output to STDOUTT = int(input())
T = int(input())
for _ in range(T):
a = input()
A = set(input().split())
b = int(input())
B = set(input().split())
print(A.issubset(B)) #this is a very useful function which is used for checking if the set a is a subset of set b
#OUTPUT:
#A = [1,2,3,4,5,6,7]
#B = [1,2,3,4]
# OUTPUT:
# TRUE
| [
"noreply@github.com"
] | shravan0409.noreply@github.com |
db9c194c9611682fa335e114a692b5fe6ecfd658 | a6218288efd1b6165a1801cfb46cbb4310e0bace | /project_tests/data_generation_scripts/data_gen_utils.py | 4b677cb42487bed0a93f3ce12bdb052f2cc57fca | [
"MIT"
] | permissive | kevin5naug/column_store | 9a85f29e556923465a3efe204a73f7bbfaef2634 | a82c3bce33b7421cd0def340e00685e5fcd8f6ec | refs/heads/master | 2022-12-06T03:04:58.975438 | 2020-08-17T23:43:58 | 2020-08-17T23:43:58 | 288,309,161 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | #!/usr/bin/python
import sys, string
from random import choice
import random
from string import ascii_lowercase
from scipy.stats import beta, uniform
import numpy as np
import struct
import pandas as pd
def openFileHandles(testNum, TEST_DIR=""):
# if a directory base specified, we want to add the trailing separator `/`
if TEST_DIR != "":
TEST_DIR += "/"
if testNum < 10:
output_file = open(TEST_DIR + "test0{}gen.dsl".format(testNum),"w")
exp_output_file = open(TEST_DIR + "test0{}gen.exp".format(testNum),"w")
else:
output_file = open(TEST_DIR + "test{}gen.dsl".format(testNum),"w")
exp_output_file = open(TEST_DIR + "test{}gen.exp".format(testNum),"w")
return output_file, exp_output_file
def closeFileHandles(output_file, exp_output_file):
output_file.flush()
exp_output_file.flush()
output_file.close()
exp_output_file.close()
def generateHeaderLine(dbName, tableName, numColumns):
outputString = []
for i in range(1, numColumns+1):
outputString.append('{}.{}.col{}'.format(dbName, tableName, i))
#outputString.append('{}.{}.col{}'.format(dbName, tableName, numColumns))
return outputString
def outputPrint(pandasArray):
if pandasArray.shape[0] == 0:
return ''
else:
return pandasArray.to_string(header=False,index=False)
| [
"kevinguan_37@outlook.com"
] | kevinguan_37@outlook.com |
f751956740a746fddfd1314675b51149bca9f311 | 4b4ff2c0d135d3615caaeb80735c2ad6ee987914 | /venv/bin/gunicorn | fd714863ba0e3663ea60c7f331e3d7f1e5b2cd40 | [] | no_license | Nicolas-Turck/Tuto-deployement-heroku | 23060837b47f195d9af2eb280a85836d1a8f8efd | 54d104054c06070420ae36b6bbb45089492da286 | refs/heads/master | 2023-08-01T07:18:13.563988 | 2021-05-20T16:07:56 | 2021-05-20T16:07:56 | 257,563,781 | 0 | 0 | null | 2021-09-22T18:54:33 | 2020-04-21T10:46:23 | Python | UTF-8 | Python | false | false | 256 | #!/home/nicos/PycharmProjects/Tuto-heroku/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"nicolas.turck@gmail.com"
] | nicolas.turck@gmail.com | |
870f4ef3cedddc663fb7b8f310d6b86d04b6de4f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03588/s377192087.py | 33be853cadb78a71d1fcb119905a836c2c06e43c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | n = int(input())
max_a = 0
st_b = 0
for i in range(n):
a,b = map(int,input().split())
if max_a < a:
max_a = a
st_b = b
print(max_a+st_b) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b161bebc0c95852fe0b52a202ab484c6b1740aa5 | 1a6773c3a17445d26b7c0f1eb124520bd572e8b9 | /venv/Scripts/easy_install-3.7-script.py | a9fb4887a64a89b107dc6d8be18cf4e2405427d3 | [] | no_license | JohnsonLC/TwoGram | 4a78202d465d53aa6bb2d4dabaa556369a511fbd | 3880cee5740bdf0c5e24a26e7e667b57a075438f | refs/heads/master | 2021-10-25T04:24:30.957225 | 2019-03-31T23:49:57 | 2019-03-31T23:49:57 | 158,175,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #!E:\workplace\Python\twoGram_improve\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"17319255967@163.com"
] | 17319255967@163.com |
b7bbd84e06ca31ba392526763b0e4e6a5b8174a3 | 7a17a8fa5131d034c7e980d5982294da07b67f2b | /Si_Streifensensor/Auswertung/Vermessung Laser/Pitch.py | a5d4566b387ec1eed63c282f4b7c084bc670ca5e | [] | no_license | Ninilini/FP_Teilchen | 982f35f3794ea0fe008bba3c7b7046028d01c174 | f55acd0765a5883dac85b0fbb711d3dc7e897748 | refs/heads/master | 2020-05-15T01:55:40.846114 | 2019-07-28T14:49:40 | 2019-07-28T14:49:40 | 182,033,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,892 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Zeile:Position, Spalte:Kanalnummer
daten_df = pd.read_csv('Laserscan.txt', encoding='utf-8', comment='#', sep='\t')
daten = daten_df.values
Intervall = len(daten)
Kanalanzahl = len(daten[2])
Position = np.linspace(0, Intervall*10, Intervall)
Maxima = [0.0]*21
Kanalnummer = [0]*21
Maximaposition = [0]*21
Maxpos = [0]*21
#Pitch
#Maxima jedes Kanals finden
k=61
i=0
while k <82 :
Kanalnummer[i]=k
Maxima[i]= np.max(daten[:,k])
k=k+1
i=i+1
#print(Maxima)
#Position zu Maxima finden -> Position entspricht Zeilenindex
k=61
i=0
while k < 82:
ctr = 0
while ctr < Intervall:
if daten[ctr,k]==Maxima[i]:
Maximaposition[i]=ctr*100
Maxpos[i]=ctr
ctr = Intervall
else:
ctr = ctr +1
i=i+1
k=k+1
#print(Maximaposition)
#print(Kanalnummer)
plt.plot(Kanalnummer,Maximaposition, linestyle = '', marker='x' )
plt.xlabel(r'$Kanalnummer$')
plt.ylabel(r'$Position\;[\mathrm{\mu m}]$')
plt.savefig('Maxima.pdf')
plt.show()
plt.clf()
Abstand = [0.0]*20
i=0
while i < 20:
Abstand[i]=abs(Maximaposition[i+1]-Maximaposition[i])
i=i+1
print(Maximaposition)
Pitch = np.mean(Abstand)
Pitch_error = np.std(Abstand, ddof=1)/np.sqrt(len(Abstand))
print('Pitch')
print(Pitch)
print(Pitch_error)
#15.5 Mikrometer
#Laserausdehnung
#Ansteigende Flanke
steigend=[0]*21
k=61
i=0
while k < 82:
ctr = 0
while daten[ctr,k]<1:
ctr = ctr +1
steigend[i]=ctr*100
k=k+1
i=i+1
#Abfallende Flanke
fallend=[0]*21
k=61
i=0
while k < 82:
ctr = Maxpos[i]
while daten[ctr,k]>1:
if ctr == Intervall-1:
ctr = 0
break
ctr = ctr +1
fallend[i]=ctr*100
k=k+1
i=i+1
pos = np.arange(0, 3.4, 0.1)
plt.plot(pos, daten[:,65:66], color ='darkblue', label=r'Signal')
plt.vlines(2.4, -3, 59, color = 'forestgreen', linestyle='--', label=r'Start/Ende' )
plt.vlines(2.7, -3, 59, color = 'maroon', linestyle='--', label=r'Maximum' )
plt.vlines(3.1, -3, 59, color = 'forestgreen', linestyle='--')
plt.xlim(2.0,3.5)
plt.xlabel(r'Position$\;$[mm]')
plt.ylabel(r'ADCC')
plt.legend()
plt.savefig('Flanken.pdf')
plt.show()
diff_steig=[0]*0
diff_fall=[0]*0
i=0
while i < 21:
if steigend[i]>0:
diff_steig.append(abs(steigend[i]-Maximaposition[i]))
if fallend[i]>0:
diff_fall.append(abs(fallend[i]-Maximaposition[i]))
i=i+1
print(diff_steig)
print(diff_fall)
ausdehnung = (np.mean(diff_steig)+np.mean(diff_fall))/2
ausdehnung_error = ((np.std(diff_steig, ddof=1)/np.sqrt(len(diff_steig))) + (np.std(diff_fall, ddof=1)/np.sqrt(len(diff_fall))))/2
print('Ausdehnung')
print(ausdehnung)
print(ausdehnung_error)
#292.5 Mikrometer | [
"miriam.schwarze@tu-dortmund.de"
] | miriam.schwarze@tu-dortmund.de |
e6722233e9c3812429f0849cb2e97f51dd1144b5 | 67feeb09d72e82eab00aefd27416ac8d575a5080 | /visualizekinases.py | d2c848f8d614a62ec533b890ebfdef508e0d3957 | [] | no_license | jugerrnaut/synopsis_2019 | 3c9e48c82adab607d5e34f5942e9c65e29d14e0f | 14560239fa96fee726ebf4e1775f1545f1d0b4b2 | refs/heads/master | 2020-11-27T21:51:40.931321 | 2020-03-04T19:50:55 | 2020-03-04T19:50:55 | 229,615,291 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,764 | py | import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import bs4
from bs4 import BeautifulSoup as bs
import pandas as pd #for csv files
import random
import sklearn #some of the non nueral net machine learning stuff
import requests #making requests to the html
import json #lovely json
import pubchempy as pcp
from mpl_toolkits import mplot3d
#add in interactions between atoms
CID = [4133,2244]
columns = ["num atoms","coors", "bond coors","formal charge"]
test_df = pd.DataFrame(columns = columns)
bondslist = []
pointslookup = []
bondcoors = []
formal_charge = 0
elements = []
all_elements = []
def normalize(x):
return (x + 2)/4
def createbondinglist(response):
global bondslist
leftbonds = response["aid1"]
rightbonds = response["aid2"]
bondtype = response["order"]
for i in range(0,len(leftbonds)):
bondslist.append([leftbonds[i],rightbonds[i],bondtype[i]])
return bondslist
def create_data(CID):
global test_df
global bondslist
global pointslookup
global bondcoors
global formal_charge
global all_elements
ax = plt.axes(projection='3d')
for mol in CID:
sample = pcp.Compound.from_cid('{}'.format(mol),record_type='3d')
normal_sample = pcp.Compound.from_cid('{}'.format(mol))
a = sample.record
c = normal_sample.record
atomic_info = sample.to_dict(properties=['atoms', 'bonds', 'inchi'])
base = atomic_info["atoms"]
for i in range(0,len(base)):
elements.append(base[i]["element"])
formal_charge = normal_sample.charge
mapped_formal_charge = normalize(formal_charge)
x_base = a["coords"][0]["conformers"][0]["x"]
y_base = a["coords"][0]["conformers"][0]["y"]
z_base = a["coords"][0]["conformers"][0]["z"]
bonds = a["bonds"]
for i in range(0,len(x_base)):
markerstr = ""
eoi = elements[i]
if eoi =='C':
markerstr = "x"
elif eoi == "O":
markerstr = "o"
elif eoi == "N":
markerstr = "v"
elif eoi == "S":
markerstr = "s"
else:
markerstr = "*"
ax.plot([x_base[i]],[y_base[i]],[z_base[i]],marker=markerstr, markersize=10, color='black',alpha = mapped_formal_charge)
pointslookup.append([[x_base[i]],[y_base[i]],[z_base[i]],eoi])
createbondinglist(bonds)
for n in range(0,len(pointslookup)):
slpoint = bondslist[n][0]
srpoint = bondslist[n][1]
color = bondslist[n][2]
colorstr = ""
if color == 1:
colorstr = "red"
elif color == 2:
colorstr = "blue"
else:
colorstr = "green"
sl_point_x = pointslookup[slpoint-1][0][0]
sl_point_y = pointslookup[slpoint-1][1][0]
sl_point_z = pointslookup[slpoint-1][2][0]
sr_point_x = pointslookup[srpoint-1][0][0]
sr_point_y = pointslookup[srpoint-1][1][0]
sr_point_z = pointslookup[srpoint-1][2][0]
bondcoors.append([sl_point_x,sl_point_y,sl_point_z,sr_point_x,sr_point_y,sr_point_z])
ax.plot((sl_point_x,sr_point_x),(sl_point_y,sr_point_y),(sl_point_z,sr_point_z),color = (colorstr))
data = [[len(pointslookup),pointslookup,bondcoors,formal_charge]]
df2 = pd.DataFrame(data,columns = columns)
test_df = test_df.append(df2,ignore_index = True)
print(test_df.head())
data = []
bondslist = []
pointslookup = []
bondcoors = []
formal_charge = 0
create_data(CID)
plt.show()
| [
"23Athreyad@students.harker.org"
] | 23Athreyad@students.harker.org |
277f296b84f1fc951d6d8bf2819ad45a0b1e03fa | ba6345d365f994fbdcea9ea65c521bdf8e4271bd | /ambiguities_of_atomic_sv/vcf_interpreters.py | 1d18f3c7187b1f2cffcc9906faa3ca90ffceb89b | [
"MIT"
] | permissive | ITBE-Lab/MSV-EVAL | 286d0976923afd37a4ae3281ea9627b83a6372ef | 5e41573e96899b634b6f50d91c1c1ee1889d7bab | refs/heads/master | 2023-07-22T16:01:12.518544 | 2023-07-18T06:48:47 | 2023-07-18T06:48:47 | 329,263,731 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,327 | py | from MS import *
from MA import *
from MSV import *
import math
import traceback
import os
logged_errors = set()
def log_error(call, error_file, interpreter_name, e=None, do_exit=True, force_log=False):
# don't log an error twice
key = interpreter_name + call["ALT"]
if "SVTYPE" in call["INFO"]:
key = interpreter_name + call["INFO"]["SVTYPE"]
if key in logged_errors and not force_log:
return
logged_errors.add(key)
print("unrecognized sv:", call, "by", interpreter_name)
error_file.write("============== unrecognized sv ==============\n")
error_file.write("in interpreter: " + interpreter_name + "\n")
error_file.write(str(call))
if not e is None:
error_file.write("\n")
error_file.write(str(e))
error_file.write(traceback.format_exc())
error_file.write("\n\n\n")
if do_exit:
exit()
def sniffles_interpreter(call, pack, error_file):
def find_confidence(call):
if call["FILTER"] != "PASS":
return 0
return int(float(call["INFO"]["RE"]))
def find_from_and_to_pos(call):
from_pos = int(call["POS"]) + pack.start_of_sequence(call["CHROM"])
to_pos = int(call["INFO"]["END"]) + pack.start_of_sequence(call["INFO"]["CHR2"])
return from_pos, to_pos
def find_std_from_std_to(call):
std_from = math.ceil(float(call["INFO"]["STD_quant_start"]))
std_to = math.ceil(float(call["INFO"]["STD_quant_stop"]))
return std_from, std_to
try:
from_pos, to_pos = find_from_and_to_pos(call)
if "PRECISE" in call["INFO"]:
std_from, std_to = (0, 0)
elif "IMPRECISE" in call["INFO"]:
std_from, std_to = find_std_from_std_to(call)
#underflow protection
if from_pos < std_from//2:
std_from = from_pos//2
#underflow protection
if to_pos < std_to//2:
std_to = to_pos//2
from_pos -= std_from//2
to_pos -= std_to//2
else:
raise Exception("found neither precise nor imprecise in INFO")
return [(from_pos, to_pos, int(call["ID"]), call["ALT"] + "-conf:" + str(find_confidence(call)))]
except Exception as e:
log_error(call, error_file, "sniffles", e)
def delly_interpreter(call, pack, error_file):
def find_confidence(call):
if call["FILTER"] != "PASS":
return 0
conf = 0
if "PE" in call["INFO"]:
conf += int(float(call["INFO"]["PE"]))
if "SR" in call["INFO"]:
conf += int(float(call["INFO"]["SR"]))
return conf
def find_std_from_std_to(call):
std_from = call["INFO"]["CIPOS"].split(",")
std_to = call["INFO"]["CIEND"].split(",")
return math.ceil(float(std_from[1]) - float(std_from[0])), math.ceil(float(std_to[1]) - float(std_to[0]))
def find_from_and_to_pos(call):
from_pos = int(call["POS"]) + pack.start_of_sequence(call["CHROM"]) - 1
if "CHR2" in call["INFO"]:
to_pos = int(call["INFO"]["END"]) + pack.start_of_sequence(call["INFO"]["CHR2"]) - 1
else:
to_pos = int(call["INFO"]["END"]) + pack.start_of_sequence(call["CHROM"]) - 1
return from_pos, to_pos
try:
from_pos, to_pos = find_from_and_to_pos(call)
if "PRECISE" in call["INFO"]:
std_from, std_to = (0, 0)
elif "IMPRECISE" in call["INFO"]:
std_from, std_to = find_std_from_std_to(call)
#underflow protection
if from_pos < std_from//2:
std_from = from_pos//2
#underflow protection
if to_pos < std_to//2:
std_to = to_pos//2
from_pos -= int(std_from/2)
to_pos -= int(std_to/2)
else:
raise Exception("found neither precise nor imprecise in INFO")
call_name = call["ALT"] + " " + call["INFO"]["SVTYPE"]
if "CT" in call["INFO"]:
call_name += " " + call["INFO"]["CT"]
return [(from_pos, to_pos, int(call["ID"][4:]), call_name + "-conf:" + str(find_confidence(call)))]
except Exception as e:
log_error(call, error_file, "delly", e)
bnd_mate_dict_manta = {}
manta_id = 0
def manta_interpreter(call, pack, error_file):
global manta_id
def find_confidence(call):
if call["FILTER"] != "PASS":
return 0
return int(float(call["QUAL"]))
def find_std_from_std_to(call):
std_from = call["INFO"]["CIPOS"].split(",")
if "CIEND" in call["INFO"]:
std_to = call["INFO"]["CIEND"].split(",")
else:
std_to = (0, 0)
return math.ceil(float(std_from[1]) - float(std_from[0])), math.ceil(float(std_to[1]) - float(std_to[0]))
def find_from_and_to_pos(call):
from_pos = int(call["POS"]) + pack.start_of_sequence(call["CHROM"])
if "END" in call["INFO"]:
to_pos = int(call["INFO"]["END"]) + pack.start_of_sequence(call["CHROM"])
else:
to_pos = 0
return from_pos, to_pos
def find_bnd_name(call):
return "BND-" + call["ALT"][1] + "-" + call["ALT"][-1]
try:
from_pos, to_pos = find_from_and_to_pos(call)
if "IMPRECISE" in call["INFO"]:
std_from, std_to = find_std_from_std_to(call)
#underflow protection
if from_pos < std_from//2:
std_from = from_pos//2
#underflow protection
if to_pos < std_to//2:
std_to = to_pos//2
from_pos -= int(std_from/2)
to_pos -= int(std_to/2)
else:
std_from, std_to = (0, 0)
to_insert = []
if call["ALT"] == "<DUP:TANDEM>":
to_insert.append((from_pos, to_pos, str(manta_id), call["ALT"] + "-conf:" + str(find_confidence(call))))
elif call["ALT"] == "<DUP>":
to_insert.append((from_pos, to_pos, str(manta_id), call["ALT"] + "-conf:" + str(find_confidence(call))))
elif call["INFO"]["SVTYPE"] == "DEL":
to_insert.append((from_pos-1, to_pos, str(manta_id), call["ALT"] + "-conf:" + str(find_confidence(call))))
elif call["INFO"]["SVTYPE"] == "INS":
to_insert.append((from_pos, to_pos, str(manta_id), call["ALT"] + "-conf:" + str(find_confidence(call))))
elif call["INFO"]["SVTYPE"] == "BND":
if call["INFO"]["MATEID"] in bnd_mate_dict_manta:
mate = bnd_mate_dict_manta[call["INFO"]["MATEID"]]
from_pos = int(mate["POS"]) + pack.start_of_sequence(mate["CHROM"])
to_pos = int(call["POS"]) + pack.start_of_sequence(call["CHROM"])
to_insert.append((from_pos, to_pos, str(manta_id) + "_1", find_bnd_name(call) + "-conf:" + str(find_confidence(call))))
to_insert.append((from_pos, to_pos, str(manta_id) + "_2", find_bnd_name(call) + "-conf:" + str(find_confidence(call))))
del bnd_mate_dict_manta[call["INFO"]["MATEID"]]
else:
bnd_mate_dict_manta[call["ID"]] = call
else:
raise Exception("could not classify call")
manta_id += 1
return to_insert
except Exception as e:
log_error(call, error_file, "manta", e)
bnd_mate_dict_gridss = {}
def gridss_interpreter(call, pack, error_file):
def find_confidence(call):
if call["FILTER"] != "PASS":
return 0
return int(float(call["QUAL"]))
def find_bnd_name(call):
if call["ALT"][1] == "[":
return "BND-fwd-rht"
if call["ALT"][1] == "]":
return "BND-rev-lft"
elif call["ALT"][0] == "[":
return "BND-rev-rht"
elif call["ALT"][0] == "]":
return "BND-fwd-lft"
else:
raise Exception("could not classify call")
try:
to_insert = []
if call["INFO"]["SVTYPE"] == "BND":
if "MATEID" in call["INFO"]:
if call["INFO"]["MATEID"] in bnd_mate_dict_gridss:
mate = bnd_mate_dict_gridss[call["INFO"]["MATEID"]]
from_pos = int(mate["POS"]) + pack.start_of_sequence(mate["CHROM"])
to_pos = int(call["POS"]) + pack.start_of_sequence(call["CHROM"])
to_insert.append((from_pos, to_pos, call["ID"][6:] + "_1", find_bnd_name(call) + "-conf:" + str(find_confidence(call))))
to_insert.append((from_pos, to_pos, call["ID"][6:] + "_2", find_bnd_name(call) + "-conf:" + str(find_confidence(call))))
del bnd_mate_dict_gridss[call["INFO"]["MATEID"]]
else:
bnd_mate_dict_gridss[call["ID"]] = call
else:
from_pos = int(call["POS"]) + pack.start_of_sequence(call["CHROM"])
to_insert.append((from_pos, from_pos, call["ID"][6:], "BND-INS-conf:" + str(find_confidence(call))))
else:
raise Exception("could not classify call")
return to_insert
except Exception as e:
log_error(call, error_file, "gridss", e)
def vcf_parser(file_name):
class VCFFile:
def __init__(self, d, names, layer, info):
self.data = d
self.names = names
self.layer = layer
self.info = info
def __getitem__(self, name):
if not name in self.data:
return []
return self.data[name]
def __contains__(self, name):
return name in self.data
def __str__(self):
s = ""
#for info_line in self.info:
# s += info_line + "\n"
s += "{\n"
for key, val in self.data.items():
for _ in range(self.layer + 1):
s += "\t"
s += str(key) + ": " + str(val) + "\n"
for _ in range(self.layer):
s += "\t"
return s + "}"
def from_format(self, key, value_list_idx=-1):
idx = self["FORMAT"].split(":").index(key)
return self[self.names[value_list_idx]].split(":")[idx]
def has_format(self, key):
return key in self["FORMAT"].split(":")
with open(file_name, "r") as vcf_file:
names = []
info = []
for line in vcf_file:
if line[-1] == "\n":
line = line[:-1]
if line[:2] == "##":
info.append(line)
elif line[0] == "#":
names = line[1:].split("\t")
else:
d = {}
for name, field in zip(names, line.split("\t")):
if name == "INFO":
d2 = {}
keys = []
for key_value in field.split(";"):
if "=" in key_value:
key, value = key_value.split("=")
keys.append(key)
d2[key] = value
else:
d2[key_value] = True
d[name] = VCFFile(d2, keys, 1, [])
else:
d[name] = field
yield VCFFile(d, names, 0, info) | [
"markus.rainer.schmidt@gmail.com"
] | markus.rainer.schmidt@gmail.com |
b2f818edd882f67e46d972d7a4d35799f360ed1c | 895dcde2f74d2b522d36dc4b32606d7b92f85d03 | /myproject/myproject/settings.py | 9dfc686038f07c447886b9d31ca3a1fe8035b541 | [] | no_license | prajjwalhacker/django_todo_app | 34e7029a4380feec07766aa717139c6fcfcab58b | 17d470e2aff6e106639ec1d527a3cc43c30dc0b4 | refs/heads/master | 2022-12-21T23:23:30.635019 | 2020-10-03T05:22:15 | 2020-10-03T05:22:15 | 300,204,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2z56eq720*=x&+$%c@^3f=9#l-6m%6$=*0i!b2a(h(!v0e7w-f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Todo',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"prajjwalsoni123@gmail.com"
] | prajjwalsoni123@gmail.com |
8a597972c820bb5328d31309b1dbeb799a63f055 | 86e5f574ec2d503c82c1da7599cd2f44bfc098f3 | /process_image.py | f1d11c5a53857c159681efab11c55003e8c32083 | [] | no_license | keepitsimple/ocrtest | 54b34e88a789d43ca474fcdeac97ca5ac477b7e3 | cbd7bbd2ccd146a51df95b410754e0155fcda826 | refs/heads/master | 2016-09-10T10:39:03.421732 | 2013-11-05T20:53:18 | 2013-11-05T20:53:18 | 11,807,330 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | from scipy import ndimage
from skimage import feature
from skimage.color import rgb2gray
from skimage.io import imread, imsave
import numpy as np
from skimage.transform import resize
from sliding_window import sliding_window
class Image(object):
def __init__(self, imagePath, windowSize=(64, 64), shiftSize=(32, 32), tagPosition=None):
self.imagePath = imagePath
self.windowSize = windowSize
self.shiftSize = shiftSize
# version for tagPosition creation with height and width instead of low-right corner coordinates
# t = tagPosition
# self.tagPosition = (t[0], t[1], t[0]+t[2], t[1]+t[2])
self.tagPosition = tagPosition
self.finalWindowResolution = (32, 32)
def prepare(self):
self.sourceImage = rgb2gray(imread(self.imagePath))
# remove black borders from image
iim = self.sourceImage > 0
self.bounds = ndimage.find_objects(iim)[0]
self.image = self.sourceImage[self.bounds[0], self.bounds[1]]
# get new tag position after cutting the image
if self.tagPosition:
t, b = self.tagPosition, self.bounds
self.tagPosition = (t[0] - b[0].start, t[1] - b[1].start, t[2] - b[0].start, t[3] - b[1].start)
# extend image to be divisible by window shift
imsh = self.image.shape
self.missingRows = 0
if imsh[0] % self.shiftSize[0] != 0:
missingRows = self.shiftSize[0] - (imsh[0] % self.shiftSize[0])
self.image = np.vstack([np.reshape(np.zeros(missingRows * imsh[1]), (missingRows, imsh[1])), self.image])
self.missingRows = missingRows
if self.tagPosition:
t = self.tagPosition
self.tagPosition = (t[0] + missingRows, t[1], t[2] + missingRows, t[3])
imsh = self.image.shape
self.missingColumns = 0
if imsh[1] % self.shiftSize[1] != 0:
missingColumns = self.shiftSize[1] - (imsh[1] % self.shiftSize[1])
self.image = np.hstack([np.reshape(np.zeros(missingColumns * imsh[0]), (imsh[0], missingColumns)), self.image])
self.missingColumns = missingColumns
if self.tagPosition:
t = self.tagPosition
self.tagPosition = (t[0], t[1] + missingColumns, t[2], t[3] + missingColumns)
def extractFeatures(self, positiveImageTemplate=None):
windowSize, shiftSize, tagPosition = self.windowSize, self.shiftSize, self.tagPosition
# if positiveImageTemplate is not None:
# imsave(positiveImageTemplate % (-1,), self.image)
# count rows/columns amount
s = ((np.array(self.image.shape) - np.array(windowSize)) // np.array(shiftSize)) + 1
self.windowsAmountInfo = s
windows = sliding_window(self.image, windowSize, shiftSize)
self.positiveExamples = []
self.negativeExamples = []
j = 0
for i, w in enumerate(windows):
x, y = (i / s[1])*shiftSize[0], (i % s[1])*shiftSize[1]
wSized = resize(w, self.finalWindowResolution)
features = feature.hog(wSized)
if self.tagPosition \
and (x+windowSize[0] - tagPosition[0]) >= (windowSize[0] / 3) \
and (y+windowSize[1] - tagPosition[1]) >= (windowSize[1] / 3) \
and (tagPosition[2] - x) >= (windowSize[0] / 3) \
and (tagPosition[3] - y) >= (windowSize[1] / 3):
if positiveImageTemplate is not None:
imsave(positiveImageTemplate % (j,), w)
j += 1
self.positiveExamples.append(features)
else:
self.negativeExamples.append(features)
def process(self, positiveImageTemplate=None):
self.prepare()
self.extractFeatures(positiveImageTemplate)
return self.positiveExamples, self.negativeExamples
def process_single_image(filename, tagPosition, positiveImageTemplate=None):
image = Image(filename, tagPosition=tagPosition)
return image.process(positiveImageTemplate=positiveImageTemplate)
if __name__ == '__main__':
testDATALINEfilename = '5_07000.jpg'
i = Image(testDATALINEfilename, tagPosition=(437, 488, 453, 581))
i.process()
# imsave('5_07000_ngr.jpg', i.image) | [
"tasman.main@gmail.com"
] | tasman.main@gmail.com |
977f96cbafdb166e91ae9ec70b8bb92fa69656d8 | 5b115ee1a961af6987616ef4d83d45fd3c8917c7 | /blog/migrations/0010_auto_20210204_2126.py | 336256c8807e281a0f96e5bf04bbeec8cdd22811 | [] | no_license | yunsik0115/piro14dogotogether | 42b24403a3894b678738ad88ac01767642c99dea | 799cc6e35751cd419f66ec9699fc62c400c6e48a | refs/heads/master | 2023-03-28T06:34:16.863626 | 2021-03-26T13:05:18 | 2021-03-26T13:05:18 | 335,192,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # Generated by Django 2.2.1 on 2021-02-04 21:26
from django.db import migrations, models
import piroproject.utils
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_post_image'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(upload_to=piroproject.utils.uuid_upload_to),
),
]
| [
"jts159753@snu.ac.kr"
] | jts159753@snu.ac.kr |
6b9e622167f094ae64afbc50ef25bd1956b2e164 | 0fd230dcc317e641787a8d0924b39f2974daba94 | /ansible-modules/netscaler_server.py | 4d9c998e0f9694378a98b04310d99de53600e78d | [] | no_license | giorgos-nikolopoulos/netscaler-ansible-modules | 6140805adbd5e6cc56d95baf113d6a0e04f09634 | 8df6e54a1761ea40f7f83c940d749b0cc3947bf6 | refs/heads/master | 2021-01-20T03:54:42.285642 | 2017-04-27T15:19:19 | 2017-04-27T15:19:19 | 89,609,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,563 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# TODO review status and supported_by when migrating to github
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'commiter',
'version': '1.0'}
# TODO: Add appropriate documentation
DOCUMENTATION = '''
---
module: netscaler_server
short_description: Manage server configuration
description:
- Manage server configuration
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance
version_added: 2.2.3
options:
name:
description:
- Name for the server.
- "Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters."
- Can be changed after the name is created.
- Minimum length = 1
ipaddress:
description:
- IPv4 or IPv6 address of the server. If you create an IP address based server, you can specify the name of the server, instead of its IP address, when creating a service. Note. If you do not create a server entry, the server IP address that you enter when you create a service becomes the name of the server.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
# TODO: Add appropriate examples
EXAMPLES = '''
- name: Connect to netscaler appliance
local_action:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
ssl_cert_validation: no
module: netscaler_server
operation: present
name: vserver1
ipaddress: 192.168.1.1
'''
# TODO: Update as module progresses
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }
'''
from ansible.module_utils.basic import AnsibleModule
import StringIO
def main():
from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.server import server
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
python_sdk_imported = True
except ImportError as e:
python_sdk_imported = False
module_specific_arguments = dict(
name=dict(type='str'),
ipaddress=dict(type='str'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not python_sdk_imported:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
client.login()
# Instantiate Server Config object
readwrite_attrs = ['name', 'ip', 'ipaddress']
readonly_attrs = []
equivalent_attributes = {
'ip': ['ipaddress',]
}
server_proxy = ConfigProxy(
actual=server(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
)
def server_exists():
if server.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def server_identical():
if server.count_filtered(client, 'name:%s' % module.params['name']) == 0:
return False
server_list = server.get_filtered(client, 'name:%s' % module.params['name'])
if server_proxy.has_equal_attributes(server_list[0]):
return True
else:
return False
def diff_list():
return server_proxy.diff_object(server.get_filtered(client, 'name:%s' % module.params['name'])[0]),
try:
# Apply appropriate operation
if module.params['operation'] == 'present':
if not server_exists():
if not module.check_mode:
server_proxy.add()
server_proxy.update()
client.save_config()
module_result['changed'] = True
elif not server_identical():
if not module.check_mode:
server_proxy.update()
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for result
if not module.check_mode:
if not server_exists():
module.fail_json(msg='Server does not seem to exist', **module_result)
if not server_identical():
module.fail_json(
msg='Server is not configured according to parameters given',
diff=diff_list(),
**module_result
)
elif module.params['operation'] == 'absent':
if server_exists():
if not module.check_mode:
server_proxy.delete()
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for result
if not module.check_mode:
if server_exists():
module.fail_json(msg='Server seems to be present', **module_result)
module_result['actual_attributes'] = server_proxy.get_actual_rw_attributes()
except nitro_exception as e:
msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| [
"giorgos.nikolopoulos@citrix.com"
] | giorgos.nikolopoulos@citrix.com |
8b183bf27487b5db210287a08477ad86698afa14 | 7d328fa9c4b336f28fa357306aad5483afa2d429 | /BinTreeFromSortedArray.py | 2d3addba12667610b79141ff6049c7dda7f413fa | [] | no_license | ktyagi12/LeetCode | 30be050f1e2fcd16f73aa38143727857cc943536 | 64e68f854b327ea70dd1834de25e756d64957514 | refs/heads/master | 2021-07-01T21:24:26.765487 | 2021-05-09T11:42:50 | 2021-05-09T11:42:50 | 230,497,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | #Problem available at: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/submissions/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = self.sortedArrayToBST(nums[:mid])
root.right = self.sortedArrayToBST(nums[mid+1:])
return root
| [
"karishmatyagi12@gmail.com"
] | karishmatyagi12@gmail.com |
a9585d2fb2a2cf450cb6557969f377511254d318 | a4bc6525f3c73957d6fd7678f5a6420d2b48e1c0 | /userauth/forms.py | 51fb2848bdd6fe2b1b22f7f3316c9aeb761b5a65 | [] | no_license | Srikrishnayaji/Expenditure-web-app | d2d1175fbd1500a90732912f4ab30be634689bbd | f6fb01936393c6207634dedaab52d19568c386cd | refs/heads/master | 2020-05-18T12:32:24.657699 | 2019-05-07T23:19:35 | 2019-05-07T23:19:35 | 184,411,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from django import forms
from django.contrib.auth.models import User
class User_register_form(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
password_repeat = forms.CharField(widget=forms.PasswordInput)
username = forms.CharField()
email = forms.CharField(widget=forms.EmailInput)
class Meta:
model = User
fields = ['username', 'email', 'password']
def clean(self):
cleaned_data = super(User_register_form, self).clean()
password = cleaned_data['password']
conf_password = cleaned_data['password_repeat']
if password == conf_password:
return(cleaned_data)
| [
"noreply@github.com"
] | Srikrishnayaji.noreply@github.com |
15225b8ed699b8710acd02ca79f4d765e1fdcdbf | 150af06564fbd615479d67385e39b491d55a2ac2 | /examples/aio.py | c590d077ca02df6b350e0b14348466c5b12f2d8d | [
"MIT"
] | permissive | colanconnon/graphql-ws | 3d340abe167a7202cca858fe86d829dd700dc99a | 3df53014dc60762007e2669d45135fb0f574e759 | refs/heads/master | 2021-05-07T17:36:38.551202 | 2017-10-25T21:16:27 | 2017-10-25T21:16:27 | 108,750,259 | 0 | 0 | null | 2017-10-29T16:21:07 | 2017-10-29T16:21:07 | null | UTF-8 | Python | false | false | 1,645 | py | from aiohttp import web, WSMsgType
from template import render_graphiql
from schema import schema
from graphql import format_error
import json
from graphql_ws import WebSocketSubscriptionServer
async def graphql_view(request):
payload = await request.json()
response = await schema.execute(payload.get('query', ''), return_promise=True)
data = {}
if response.errors:
data['errors'] = [format_error(e) for e in response.errors]
if response.data:
data['data'] = response.data
jsondata = json.dumps(data,)
return web.Response(text=jsondata, headers={'Content-Type': 'application/json'})
async def graphiql_view(request):
return web.Response(text=render_graphiql(), headers={'Content-Type': 'text/html'})
subscription_server = WebSocketSubscriptionServer(schema)
async def subscriptions(request):
ws = web.WebSocketResponse(protocols=('graphql-ws',))
await ws.prepare(request)
await subscription_server.handle(ws)
# async for msg in ws:
# if msg.type == WSMsgType.TEXT:
# if msg.data == 'close':
# await ws.close()
# else:
# await ws.send_str(msg.data + '/answer')
# elif msg.type == WSMsgType.ERROR:
# print('ws connection closed with exception %s' %
# ws.exception())
# print('websocket connection closed')
return ws
app = web.Application()
app.router.add_get('/subscriptions', subscriptions)
app.router.add_get('/graphiql', graphiql_view)
app.router.add_get('/graphql', graphql_view)
app.router.add_post('/graphql', graphql_view)
web.run_app(app, port=8000)
| [
"me@syrusakbary.com"
] | me@syrusakbary.com |
6086d4474d2f806d1f2455f22c382cf6bc687c6d | 2eb2460c76e5e6f268c1b757a0c5793f96b86c02 | /band3_script.py | 3f1b74fd40aa334d7225e321f61450e0aa91b864 | [] | no_license | ecell/microscope | 02a9df0bde46f5a01fc75d80a01e100008f4feeb | 15a0adda18e6835d230e5230fa132f9ef0fc6f0c | refs/heads/master | 2020-07-05T07:19:53.925014 | 2013-07-19T06:41:21 | 2013-07-19T06:41:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | """
band3_script.py:
User script to create the image from the simulated Epifluoroscence Microscopy (EPIFM)
"""
import sys
import os
#from epifm_handler import EPIFMConfigs, EPIFMVisualizer
from kinesin_handler import KinesinConfigs, KinesinVisualizer
def test_b3c(t0, t1) :
# create EPIF Microscopy
epifm = KinesinConfigs()
epifm.set_LightSource(source_type='LASER', wave_mode='TEM00', M2_factor=1.00, wave_length=473, power=10e-3, radius=0.32e-3)
epifm.set_BeamExpander(expander_type='Keplerian', focal_length1=300e-3, focal_length2=20e-3, pinhole_radius=23e-6)
#epifm.set_Fluorophore(fluorophore_type='Tetramethylrhodamine(TRITC)')
epifm.set_Fluorophore(fluorophore_type='Gaussian', wave_length=578, width=(10.0, 20.0))
#epifm.set_Fluorophore(fluorophore_type='Point-like', wave_length=578)
epifm.set_Objective(NA=1.49, Nm=1.37, focal_length=1.9e-3, efficiency=0.90)
#epifm.set_DichroicMirror('FF562-Di03-25x36')
#epifm.set_EmissionFilter('FF01-593_40-25')
epifm.set_TubeLens1(focal_length=160e-3)
epifm.set_ScanLens(focal_length=50e-3)
epifm.set_TubeLens2(focal_length=200e-3)
epifm.set_Detector(detector='EMCCD', zoom=1, emgain=100, pixel_length=0.16e-6, focal_point=(0.0,0.5,0.5), \
start_time=t0, end_time=t1, fps=1, exposure_time=1)
epifm.set_Movie(image_file_dir='./images_b3c', movie_filename='./movies/band3_cluster.mp4')
epifm.set_DataFile(['./data/lattice/band3_cluster.csv'])
# create image and movie
create = KinesinVisualizer(configs=epifm)
#create.get_plots(plot_filename='./plots/epifm_plots.pdf')
create.output_frames(num_div=16)
#create.output_movie(num_div=16)
if __name__ == "__main__":
t0 = float(sys.argv[1])
t1 = float(sys.argv[2])
test_b3c(t0, t1)
| [
"onoue@likr-lab.com"
] | onoue@likr-lab.com |
51b9d85a67e999addd2899a420954e72eea8ab63 | 978248bf0f275ae688f194593aa32c267832b2b6 | /xlsxwriter/test/comparison/test_table14.py | f0690c66bde3644bc9256ba5ff345a5604768e7d | [
"BSD-2-Clause-Views"
] | permissive | satish1337/XlsxWriter | b0c216b91be1b74d6cac017a152023aa1d581de2 | 0ab9bdded4f750246c41a439f6a6cecaf9179030 | refs/heads/master | 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'table14.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'num_format': '0.00;[Red]0.00', 'dxf_index': 2})
format2 = workbook.add_format({'num_format': '0.00_ ;\-0.00\ ', 'dxf_index': 1})
format3 = workbook.add_format({'num_format': '0.00_ ;[Red]\-0.00\ ', 'dxf_index': 0})
data = [
['Foo', 1234, 2000, 4321],
['Bar', 1256, 4000, 4320],
['Baz', 2234, 3000, 4332],
['Bop', 1324, 1000, 4333],
]
worksheet.set_column('C:F', 10.288)
worksheet.add_table('C2:F6', {'data': data,
'columns': [{},
{'format': format1},
{'format': format2},
{'format': format3},
]})
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
ca757f83faaeeff0ea90f0ec3e5f20363038a469 | 7366af7fce80919f2620f6a55265dbb66ccf49e9 | /openstack_heat/pp.py | 9911ab53d9517ffadfce8058586f16bb5ea42671 | [] | no_license | gitkwalsh/pocshare | 7455572573903b96bd2e8523f62e1fdf88144fdb | 436bcc02f9999915a21f49f15ec50b61ea3da7b8 | refs/heads/master | 2020-06-11T15:26:11.142117 | 2017-01-27T04:52:44 | 2017-01-27T04:52:44 | 75,640,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import os, sys
buf ="use csadata;\ndelete from picklist where ptype='%s';\n" % sys.argv[1]
f= open(sys.argv[2],'r')
for l in f:
ar = l.split(',')
buf = buf + 'insert into picklist (pvalue,pdisplay,pdesc,ptype,ptype1) values ("%s","%s","%s","%s","");\n' % (ar[1].replace("\n",""),ar[0],ar[0].replace("\n",""),sys.argv[1])
f.close()
print buf
| [
"kwalshde@optonline.net"
] | kwalshde@optonline.net |
29364cf4b5cd65da0cd388cc764c62c489ae59e4 | 026a7252f1f27741b37217fa756b5dbbd1b94ff8 | /support/utilities/obj_geometa/obj_geometa.py | 201336e8a96ae437ebeff9d8fa2a9ac32922ed12 | [
"Apache-2.0"
] | permissive | Kitware/Danesfield-App | c987ca6bdec9682342dce4785043f0d368bb2f6f | c9557039af7027278fa15f2c76427a8d7261750b | refs/heads/master | 2023-08-31T20:30:37.116095 | 2022-11-18T23:12:47 | 2022-11-18T23:12:47 | 159,849,781 | 35 | 7 | Apache-2.0 | 2023-04-29T23:32:30 | 2018-11-30T16:26:34 | Python | UTF-8 | Python | false | false | 6,978 | py | #!/usr/bin/env python
"""
Set geospatial metadata on a Girder item for an OBJ file.
Requires information from 3 files:
- The OBJ file.
- A text file containing 3 lines with floating point values that indicate a
global (x, y, z), offset.
- A reference GeoTIFF image in the AOI from which to get the source coordinate
reference system.
Requires Python 3.
Tip to install gdal Python bindings on Ubuntu using pip:
Install the following packages:
- libgdal-dev
- python3-dev
Run:
pip install --global-option=build_ext --global-option="-I/usr/include/gdal" GDAL==$(gdal-config --version)
"""
import argparse
import gdal
import girder_client
import json
import logging
import os
import osr
import pyproj
import shapely
import sys
import tempfile
from shapely.geometry import MultiPoint
from pathlib import Path
def readOffsetFile(name):
"""
Read offset file with three floating point numbers representing x, y, and z
offsets on separate lines.
"""
offsets = []
with open(name, 'r') as f:
for line in f:
if line.startswith('#'):
continue
offsets.append(float(line))
if len(offsets) != 3:
raise RuntimeError('Offset file must contain 3 floating point values')
return offsets
def readObjFileVertices(name):
"""
Read the vertices from an OBJ file. Returns a generator that yields each (x,y,z) vertex.
"""
with open(name, 'r') as f:
for line in f:
if line.startswith('#'):
continue
if line.startswith('v '):
line = line.strip()
coords = line[2:].split(' ')
if len(coords) != 3:
raise RuntimeError('Vertex definition must contain 3 floating point values')
coords = [float(coord) for coord in coords]
yield coords
continue
def getProjection(name):
"""
Get the projection from a geospatial image file. Returns a PROJ.4 string.
"""
image = gdal.Open(name, gdal.GA_ReadOnly)
if image is None:
raise RuntimeError('Unable to open image')
projection = image.GetProjection()
srs = osr.SpatialReference(wkt=projection)
return pyproj.Proj(srs.ExportToProj4())
# From https://github.com/OpenGeoscience/girder_geospatial/blob/9c928d5/geometa/__init__.py#L12
def clamp(number, lowerBound, upperBound):
return max(lowerBound, min(number, upperBound))
# Based on https://github.com/OpenGeoscience/girder_geospatial/blob/9c928d5/geometa/__init__.py#L16
def boundsToGeoJson(bounds, sourceProj, destProj):
LONGITUDE_RANGE = (-180.0, 180.0)
LATITUDE_RANGE = (-90.0, 90.0)
try:
xmin, ymin = pyproj.transform(sourceProj, destProj, *bounds[:2])
xmax, ymax = pyproj.transform(sourceProj, destProj, *bounds[2:])
wgs84_bounds = shapely.geometry.Polygon.from_bounds(
clamp(xmin, *LONGITUDE_RANGE),
clamp(ymin, *LATITUDE_RANGE),
clamp(xmax, *LONGITUDE_RANGE),
clamp(ymax, *LATITUDE_RANGE))
return shapely.geometry.mapping(wgs84_bounds)
except RuntimeError:
return ''
def getGeospatialMetadata(sourceProj, destProj, offsetFileName, objFileName):
"""
Get geospatial metadata object compatible with the girder_geospatial plugin schema.
:param sourceProj: Source projection
:type sourceProj: pyproj.Proj
:param destProj: Destination projection
:type destProj: pyproj.Proj
:param offsetFileName: Name of offset file
:type offsetFileName: str
:param objFileName: Name of OBJ file
:type objFileName: str
"""
# Read vertices in OBJ file
points = list(readObjFileVertices(objFileName))
# Read offset from text file
offset = readOffsetFile(offsetFileName)
# Compute bounds
multiPoint = MultiPoint(points)
bounds = multiPoint.bounds
# Apply offset to bounds
offsetBounds = [
bounds[0] + offset[0],
bounds[1] + offset[1],
bounds[2] + offset[0],
bounds[3] + offset[1]
]
# Compute GeoJSON bounds in destination projection
geoJsonBounds = boundsToGeoJson(offsetBounds, sourceProj, destProj)
return {
'crs': sourceProj.srs,
'nativeBounds': {
'left': offsetBounds[0],
'bottom': offsetBounds[1],
'right': offsetBounds[2],
'top': offsetBounds[3]
},
'bounds': geoJsonBounds,
'type_': 'vector',
'driver': 'OBJ'
}
def main(args):
# Configure argument parser
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--api-url',
type=str,
required=True,
help='Girder API URL')
parser.add_argument(
'--obj-file-id',
type=str,
required=True,
help='OBJ file ID')
parser.add_argument(
'--offset-file-id',
type=str,
required=True,
help='Offset text file ID')
parser.add_argument(
'--tiff-file-id',
type=str,
required=True,
help='GeoTIFF file ID of image in AOI')
# Parse arguments
args = parser.parse_args(args)
# Get Girder API key from environment
apiKey = os.environ.get('GIRDER_API_KEY')
if apiKey is None:
raise RuntimeError('GIRDER_API_KEY environment variable must be set')
# Create and authenticate Girder client
client = girder_client.GirderClient(apiUrl=args.api_url)
client.authenticate(apiKey=apiKey)
# Download files to temporary directory
with tempfile.TemporaryDirectory(prefix='obj-geospatial-metadata-') as tempDir:
tempDirPath = Path(tempDir)
objFileName = (tempDirPath / 'model.obj').as_posix()
offsetFileName = (tempDirPath / 'offset.txt').as_posix()
tiffFileName = (tempDirPath / 'image.tiff').as_posix()
client.downloadFile(args.obj_file_id, path=objFileName)
client.downloadFile(args.offset_file_id, path=offsetFileName)
client.downloadFile(args.tiff_file_id, path=tiffFileName)
# Get source projection from image
sourceProj = getProjection(tiffFileName)
# Destination projection
destProj = pyproj.Proj(init='epsg:4326')
logging.info('sourceProj: {}'.format(sourceProj.srs))
logging.info('destProj: {}'.format(destProj.srs))
# Get geospatial metadata
metadata = getGeospatialMetadata(sourceProj, destProj, offsetFileName, objFileName)
logging.info('geometa:\n{}'.format(json.dumps(metadata, indent=4)))
# Update item's geospatial metadata
objFile = client.getFile(args.obj_file_id)
client.put('/item/{}/geometa'.format(objFile['itemId']), parameters={
'geometa': json.dumps(metadata)
})
if __name__ == '__main__':
loglevel = os.environ.get('LOGLEVEL', 'WARNING').upper()
logging.basicConfig(level=loglevel)
main(sys.argv[1:])
| [
"matthew.ma@kitware.com"
] | matthew.ma@kitware.com |
e5c4b6d4c1599915e9426a9c04b64e22883ba6cc | efac669c3351e2b4055d575638205199b9296680 | /pytorch_lightning/tuner/tuning.py | b1a38bd27688ca53c2f8926ab1afb36155bcdff2 | [
"Apache-2.0"
] | permissive | peteriz/pytorch-lightning | 5c90456f57b9cbe4688d71999c8a8240f799a7c6 | 49a4a36ad45b937dd0124ecfb08eb7400dbf3950 | refs/heads/master | 2022-03-15T19:08:00.991416 | 2022-03-08T18:10:18 | 2022-03-08T18:10:18 | 235,549,600 | 0 | 0 | Apache-2.0 | 2020-02-13T10:03:24 | 2020-01-22T10:28:16 | Python | UTF-8 | Python | false | false | 9,167 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Union
import pytorch_lightning as pl
from pytorch_lightning.trainer.states import TrainerStatus
from pytorch_lightning.tuner.batch_size_scaling import scale_batch_size
from pytorch_lightning.tuner.lr_finder import _LRFinder, lr_find
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
class Tuner:
"""Tuner class to tune your model."""
def __init__(self, trainer: "pl.Trainer") -> None:
self.trainer = trainer
def on_trainer_init(self, auto_lr_find: Union[str, bool], auto_scale_batch_size: Union[str, bool]) -> None:
self.trainer.auto_lr_find = auto_lr_find
self.trainer.auto_scale_batch_size = auto_scale_batch_size
def _tune(
self,
model: "pl.LightningModule",
scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,
lr_find_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Optional[Union[int, _LRFinder]]]:
scale_batch_size_kwargs = scale_batch_size_kwargs or {}
lr_find_kwargs = lr_find_kwargs or {}
# return a dict instead of a tuple so BC is not broken if a new tuning procedure is added
result = {}
self.trainer.strategy.connect(model)
is_tuning = self.trainer.auto_scale_batch_size or self.trainer.auto_lr_find
if self.trainer._accelerator_connector.is_distributed and is_tuning:
raise MisconfigurationException(
"`trainer.tune()` is currently not supported with"
f" `Trainer(strategy={self.trainer.strategy.strategy_name!r})`."
)
# Run auto batch size scaling
if self.trainer.auto_scale_batch_size:
if isinstance(self.trainer.auto_scale_batch_size, str):
scale_batch_size_kwargs.setdefault("mode", self.trainer.auto_scale_batch_size)
result["scale_batch_size"] = scale_batch_size(self.trainer, model, **scale_batch_size_kwargs)
# Run learning rate finder:
if self.trainer.auto_lr_find:
lr_find_kwargs.setdefault("update_attr", True)
result["lr_find"] = lr_find(self.trainer, model, **lr_find_kwargs)
self.trainer.state.status = TrainerStatus.FINISHED
return result
def _run(self, *args: Any, **kwargs: Any) -> None:
"""`_run` wrapper to set the proper state during tuning, as this can be called multiple times."""
self.trainer.state.status = TrainerStatus.RUNNING # last `_run` call might have set it to `FINISHED`
self.trainer.training = True
self.trainer._run(*args, **kwargs)
self.trainer.tuning = True
def scale_batch_size(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, "pl.LightningDataModule"]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional["pl.LightningDataModule"] = None,
mode: str = "power",
steps_per_trial: int = 3,
init_val: int = 2,
max_trials: int = 25,
batch_arg_name: str = "batch_size",
) -> Optional[int]:
"""Iteratively try to find the largest batch size for a given model that does not give an out of memory
(OOM) error.
Args:
model: Model to tune.
train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a
:class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.
In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.
val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
mode: Search strategy to update the batch size:
- ``'power'`` (default): Keep multiplying the batch size by 2, until we get an OOM error.
- ``'binsearch'``: Initially keep multiplying by 2 and after encountering an OOM error
do a binary search between the last successful batch size and the batch size that failed.
steps_per_trial: number of steps to run with a given batch size.
Ideally 1 should be enough to test if a OOM error occurs,
however in practise a few are needed
init_val: initial batch size to start the search with
max_trials: max number of increase in batch size done before
algorithm is terminated
batch_arg_name: name of the attribute that stores the batch size.
It is expected that the user has provided a model or datamodule that has a hyperparameter
with that name. We will look for this attribute name in the following places
- ``model``
- ``model.hparams``
- ``trainer.datamodule`` (the datamodule passed to the tune method)
"""
self.trainer.auto_scale_batch_size = True
result = self.trainer.tune(
model,
train_dataloaders=train_dataloaders,
val_dataloaders=val_dataloaders,
datamodule=datamodule,
scale_batch_size_kwargs={
"mode": mode,
"steps_per_trial": steps_per_trial,
"init_val": init_val,
"max_trials": max_trials,
"batch_arg_name": batch_arg_name,
},
)
self.trainer.auto_scale_batch_size = False
return result["scale_batch_size"]
def lr_find(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, "pl.LightningDataModule"]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional["pl.LightningDataModule"] = None,
min_lr: float = 1e-8,
max_lr: float = 1,
num_training: int = 100,
mode: str = "exponential",
early_stop_threshold: float = 4.0,
update_attr: bool = False,
) -> Optional[_LRFinder]:
"""Enables the user to do a range test of good initial learning rates, to reduce the amount of guesswork in
picking a good starting learning rate.
Args:
model: Model to tune.
train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a
:class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.
In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.
val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
min_lr: minimum learning rate to investigate
max_lr: maximum learning rate to investigate
num_training: number of learning rates to test
mode: Search strategy to update learning rate after each batch:
- ``'exponential'`` (default): Will increase the learning rate exponentially.
- ``'linear'``: Will increase the learning rate linearly.
early_stop_threshold: threshold for stopping the search. If the
loss at any point is larger than early_stop_threshold*best_loss
then the search is stopped. To disable, set to None.
update_attr: Whether to update the learning rate attribute or not.
Raises:
MisconfigurationException:
If learning rate/lr in ``model`` or ``model.hparams`` isn't overridden when ``auto_lr_find=True``,
or if you are using more than one optimizer.
"""
self.trainer.auto_lr_find = True
result = self.trainer.tune(
model,
train_dataloaders=train_dataloaders,
val_dataloaders=val_dataloaders,
datamodule=datamodule,
lr_find_kwargs={
"min_lr": min_lr,
"max_lr": max_lr,
"num_training": num_training,
"mode": mode,
"early_stop_threshold": early_stop_threshold,
"update_attr": update_attr,
},
)
self.trainer.auto_lr_find = False
return result["lr_find"]
| [
"noreply@github.com"
] | peteriz.noreply@github.com |
51f2e463a518c7a1031c4bed1fb2e6a8fde9357b | eb798836ab2cb014022c15dbdba1f0b9d2ccab1f | /vuuvv/core.py | 5b26a9f26b1b7a7d082a152365e8df46ea32a7c4 | [] | no_license | vuuvv/vuuvv-test | 16ce5cbe0f3942d8d702b93dac2ffcae9b02393f | d138554ae323d5421b8964ff3210950956bf9733 | refs/heads/master | 2021-04-09T17:22:40.158651 | 2012-02-16T09:29:04 | 2012-02-16T09:29:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | import re
import sys
from flask import Flask
from flask import current_app as app, g
from sqlalchemy.engine.url import URL
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData, Table
from sqlalchemy.orm import scoped_session, sessionmaker, mapper
def camel_convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class Application(Flask):
def __init__(self, import_name, **kw):
Flask.__init__(self, import_name, **kw)
self.load_config()
self.register_blueprints()
self.init_database()
def load_config(self):
from vuuvv import default_config
self.config.from_object(default_config)
import config
self.config.from_object(config)
def register_blueprints(self):
blue_prints = self.config['BLUEPRINTS']
default = None
if self.config['DEFAULT_BLUEPRINT'] is None and blue_prints:
default = self.config['DEFAULT_BLUEPRINT'] = blue_prints[0]
for b in blue_prints:
module = __import__(b)
url_prefix = None if b == default else b
self.register_blueprint(module.blueprint, url_prefix=url_prefix)
def connect_database(self, reflect_all=False):
config = self.config
args = [config[name] for name in ('DRIVERNAME', 'USERNAME',
'PASSWORD', 'HOST', 'PORT', 'DATABASE')]
url = URL(*args)
self.db_engine = create_engine(str(url), echo = config['DEBUG'])
self.db_meta = MetaData()
if reflect_all:
self.db_meta.reflect(bind=self.db_engine)
self.db_session_cls = scoped_session(sessionmaker(
autocommit=False, autoflush=False, bind=self.db_engine))
def init_database(self):
self.connect_database()
self.find_models()
@self.before_request
def func():
g.db_session = app.db_session_cls()
@self.teardown_request
def func(exc):
g.db_session.commit()
def find_models(self):
blue_prints = self.config['BLUEPRINTS']
meta = self.db_meta
engine = self.db_engine
for b in blue_prints:
name = "%s.models" % b
__import__(name)
m = sys.modules[name]
for modelname in m.__all__:
model = getattr(m, modelname)
tablename = camel_convert(modelname)
table = Table(tablename, meta, autoload=True, autoload_with=engine)
mapper(model, table)
| [
"vuuvv@qq.com"
] | vuuvv@qq.com |
f077dfaabd3e27708dc50b30c0da69f23e239938 | 7d7462ca9b94934c71886a1fcf09ea7f16ca94b8 | /python/python_assignment4/python_4/pro9.py | 5ee8f42f4393f711ca3b6d81464c8137e7f232f9 | [] | no_license | karthik1017/mca | 16de1c0b87e5d5617de61dc08a5d61d6c7cf8ec7 | b2182c2bf62151150304aad23d476ee956dbf605 | refs/heads/master | 2020-04-22T20:10:53.308901 | 2019-08-14T07:22:30 | 2019-08-14T07:22:30 | 170,632,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # 9. Recursively convert a decimal number to binary number. Handle the exceptions.
try:
dec = int(input('enter a decimal number to be converted: '))
except ValueError as err:
print('not a valid input:')
def convertToBinary(n):
if n > 1:
convertToBinary(n//2)
print(n % 2,end = '')
convertToBinary(dec)
| [
"karthikdevaraj1017@gmail.com"
] | karthikdevaraj1017@gmail.com |
6360d7934685b7b4440fe3735b5347041e34ca39 | bd52c0dfc1b599632aecd0991764f86df2a31666 | /X5gR.py | c939ec6e587f53f50c747bbf0988864ed324d86a | [] | no_license | techgymjp/techgym_python_en | 10ba970a325210a880d3c1a99cab3d09e3bb5bbb | 772ee4d7b1e789fa3918aaa4b523b1add7f67f33 | refs/heads/master | 2023-01-28T14:29:04.682604 | 2023-01-13T13:47:28 | 2023-01-13T13:47:28 | 206,546,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | friends = ['Tom', 'Jane', 'Brian', 'Carol', 'Jack']
i = 1
for friend in friends:
print(f"{i}: {friend}")
i += 1
| [
"tanaka@rexvirt.com"
] | tanaka@rexvirt.com |
c8731d5db8772877d57210962433f592482e044c | fe158f3c17bf65f7ee01c37487b421e321121d6e | /scripts/synthmix.py | c68d667c3bf4911361f73f701e6ba05d75fbf966 | [
"MIT"
] | permissive | schneidereits/EO_hyperspec_hub | 23c11b030ab6a489c0dbfef751b4c0646fc2f786 | 40bc35d55776990e84d8faf1a40fc9d563ec446e | refs/heads/main | 2023-09-05T01:36:17.993647 | 2021-11-15T12:50:52 | 2021-11-15T12:50:52 | 377,424,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,367 | py | # =====================================================================================================================
# Linear spectral mixing to create synthetic training data
# =====================================================================================================================
import numpy as np
import pandas as pd
# linear-spectral-mixing analysis function
def lsma(df, col_response=None, response_id=None, n=1000, within_class_mixture=True, response_mixture=False,
includeEndmember=True, targetRange=(0, 1), mix_complexity=None, p_mix_complexity=None):
"""
Linear Spectral Mixture Analysis function to create synthetic training data from endmembers.
:param df: Dataframe containing input features to be mixed as well as the response variable.
:param col_response: (string) Column name of response variable.
:param response_id: (int) Numeric value corresponding to target class of "col_response".
:param n: (int) Number of synthetic features to create.
:param within_class_mixture: (bool) Allow mixtures within classes apart from target class.
:param response_mixture: (bool) Allow mixtures within the target class.
:param includeEndmember: (bool) Include input endmembers in output.
:param targetRange: (int, tuple) Tuple of boundary values of the desired target range.
:param mix_complexity: (int, list) List of integers referring to number of classes to be mixed. E.g. [2, 3] means
that there will be mixtures of 2 and 3 classes
:param p_mix_complexity: (float, list) List of floats referring to the probabilities associated with the
mix_complexity, hence the expected frequency of certain mixtures.
:return: Dataframe with synthetic mixtures of predictor and response variable.
"""
if mix_complexity is None:
mix_complexity = [2, 3, 4]
if p_mix_complexity is None:
p_mix_complexity = [0.7, 0.2, 0.1]
response = np.asarray(df[col_response])
unique_response = np.unique(response)
classes = len(unique_response)
features = np.asarray(df.drop([col_response], axis=1)).T # bands in rows features in columns
classLikelihoods = {i + 1: len(np.where(response == i + 1)[0]) / len(response) for i in range(classes)}
# cache label indices and setup 0%/100% fractions from class labels
indices = dict()
zeroOneFractions = np.zeros((classes, features.shape[1]), dtype=np.float32)
for label in range(1, classes + 1):
indices[label] = np.where(response == label)[0]
zeroOneFractions[label - 1, indices[label]] = 1.
# create mixtures
mixtures = list()
fractions = list()
classLikelihoods2 = {k: v / (1 - classLikelihoods[response_id]) for k, v in classLikelihoods.items() if k != response_id}
for i in range(n):
# get mixing complexity
complexity = np.random.choice(mix_complexity, p=p_mix_complexity)
# define current target class
l_response = [response_id]
# ...
if within_class_mixture:
if response_mixture:
l_response.extend(np.random.choice(list(classLikelihoods.keys()), size=complexity - 1, replace=True,
p=list(classLikelihoods.values())))
else:
l_response.extend(np.random.choice(list(classLikelihoods2.keys()), size=complexity - 1, replace=True,
p=list(classLikelihoods2.values())))
else:
l_response.extend(np.random.choice(list(classLikelihoods2.keys()), size=complexity - 1, replace=False,
p=list(classLikelihoods2.values())))
drawnIndices = [np.random.choice(indices[label]) for label in l_response]
drawnFeatures = features[:, drawnIndices]
drawnFractions = zeroOneFractions[:, drawnIndices]
randomWeights = list()
for i in range(complexity - 1):
if i == 0:
weight = np.random.random() * (targetRange[1] - targetRange[0]) + targetRange[0]
else:
weight = np.random.random() * (1. - sum(randomWeights))
randomWeights.append(weight)
randomWeights.append(1. - sum(randomWeights))
assert sum(randomWeights) == 1.
mixtures.append(np.sum(drawnFeatures * randomWeights, axis=1))
fractions.append(np.sum(drawnFractions * randomWeights, axis=1)[response_id - 1])
if includeEndmember:
mixtures.extend(features.T)
fractions.extend(np.float32(response == response_id)) # 1. for target class, 0. for the rest
# convert to df
df_final = pd.DataFrame(np.column_stack([np.repeat(response_id, len(mixtures)), mixtures, fractions]),
columns=list(df.columns)+['fraction'])
return df_final
# input
input_csv = "/Users/shawn/Documents/humbolt/semester_02/EO_hyperspec/spectral_library/spectral_library_hyperspec_extended"
output_csv = "/Users/shawn/Documents/humbolt/semester_02/EO_hyperspec/spectral_library/spectral_library_hyperspec_extended.csv"
# string with .csv ending; file does not need to exist
df = pd.read_csv(input_csv) # .csv table
'''
- columns: one column holding class_id as integer (e.g., 1, 2, ..., n), the remaining columns are bands
- each row represents a single pure endmember point
- cleaned of nodata values, only valid observations (otherwise they might be mixed in)
'''
target_attr = 'class_ID' # name of column which holds the class_id
n_samples = 2500 # number of synthetically mixed training points to be generated
# run
unique_classes = np.unique(df[target_attr]) # retrieved the unique classes to mix n_samples for each target class
df_fraction = pd.DataFrame()
for i in unique_classes:
df_fraction = df_fraction.append(lsma(df, col_response=target_attr,
response_id=i, n=n_samples, mix_complexity=[2, 3, 4],
p_mix_complexity=[0.75, 0.20, 0.05], targetRange=(0, 1),
within_class_mixture=True, response_mixture=True, includeEndmember=True))
df_fraction[target_attr] = df_fraction[target_attr].astype('int')
df_fraction.to_csv(output_csv, index=False)
# EOF
| [
"s1637673@ed.ac.uk"
] | s1637673@ed.ac.uk |
38e529b495e60f0de653ce05fa87c4851b1a26a2 | db19c44480696092801aeff2b487c128976d23e4 | /util/tz.py | 6856688d6c7e84781e2171f6f3400da6533545ff | [] | no_license | herb/aacoevents | dee7eea711e49613fdfcd2e7527e59a588d6b597 | 4d83033bebf5daf0287d12a85f1092fb62514071 | refs/heads/master | 2021-01-01T19:02:01.894347 | 2017-06-04T22:20:52 | 2017-06-04T22:20:52 | 32,565,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | import datetime
class US_Pacific(datetime.tzinfo):
"""Implementation of the Pacific timezone. Stolen from App Engine
documentation."""
def utcoffset(self, dt):
return datetime.timedelta(hours=-8) + self.dst(dt)
def _FirstSunday(self, dt):
"""First Sunday on or after dt."""
return dt + datetime.timedelta(days=(6-dt.weekday()))
def dst(self, dt):
# 2 am on the second Sunday in March
dst_start = self._FirstSunday(datetime.datetime(dt.year, 3, 8, 2))
# 1 am on the first Sunday in November
dst_end = self._FirstSunday(datetime.datetime(dt.year, 11, 1, 1))
if dst_start <= dt.replace(tzinfo=None) < dst_end:
return datetime.timedelta(hours=1)
else:
return datetime.timedelta(hours=0)
def tzname(self, dt):
if self.dst(dt) == datetime.timedelta(hours=0):
return "PST"
else:
return "PDT"
class UTC(datetime.tzinfo):
def utcoffset(self , dt):
return datetime.timedelta(hours=0)
def dst(self, dt):
return datetime.timedelta(hours=0)
def tzname(self, dt):
return "UTC"
utc = UTC()
us_pacific = US_Pacific()
| [
"hho@hho-macbookpro.(none)"
] | hho@hho-macbookpro.(none) |
f5f45b8a6765db769354eaeb1c09969bd52c3e8e | 8d3d66fef6935242e238a6bb5700ffde3a29d851 | /algorithms/adstar.py | c449779df2eff6113faf3a384f19b0fad3d9b0ca | [] | no_license | vrmnyg/pathfinding-simulator | dfcc7ebe41158d60668e32f6839a8ba05f1a44c5 | 2a31c76fbce2f4f872c6368c1577d3a6601f9c1b | refs/heads/main | 2023-01-08T07:05:32.150800 | 2020-11-12T11:07:26 | 2020-11-12T11:07:26 | 312,243,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,541 | py | """
AD* algorithm
https://www.cs.cmu.edu/~ggordon/likhachev-etal.anytime-dstar.pdf
"""
import math
from algorithms.abstract_algorithm import AbstractAlgorithm
from algorithms.motions8_algorithm import Motions8Algorithm
class ADStar(AbstractAlgorithm, Motions8Algorithm):
def __init__(self, s_start, s_goal, eps, heuristic_type, env, y, x, m, obs, border, c):
AbstractAlgorithm.__init__(self, s_goal, heuristic_type, y, x, m, obs, border, c)
self.s_start = s_start
#print("init")
self.Env = env # class Env
self.eps = eps
self.eps_delta = 0.5
self.orig_eps = eps
# dictionaries
self.g, self.rhs, self.OPEN = {}, {}, {}
# set all rhs and g values to infinity
for i in range(0, self.y):
for j in range(0, self.x):
self.rhs[(i, j)] = math.inf
self.g[(i, j)] = math.inf
# start searching from goal by setting goal rhs to zero
self.rhs[self.s_goal] = 0.0
self.OPEN[self.s_goal] = self.Key(self.s_goal)
self.CLOSED, self.INCONS = set(), dict()
# visited nodes (node gets added when OPEN is popped)
self.visited = set()
# update g-table
def fix(self, s):
if self.g[s] > self.rhs[s]:
self.g[s] = self.rhs[s]
for sn in self.get_neighbor(s):
self.UpdateState(sn)
else:
self.g[s] = math.inf
for sn in self.get_neighbor(s):
self.UpdateState(sn)
self.UpdateState(s)
def init(self):
# dictionaries
self.g, self.rhs, self.OPEN = {}, {}, {}
# set all rhs and g values to infinity
for i in range(0, self.y):
for j in range(0, self.x):
self.rhs[(i, j)] = math.inf
self.g[(i, j)] = math.inf
# start searching from goal by setting goal rhs to zero
self.rhs[self.s_goal] = 0.0
self.OPEN[self.s_goal] = self.Key(self.s_goal)
self.CLOSED, self.INCONS = set(), dict()
# visited nodes (node gets added when OPEN is popped)
self.visited = set()
# check route, fix path if needed
"""for r in route:
for s in r:
#if self.rhs[s] != self.g[s]:
if self.g[s] > self.rhs[s]:
self.g[s] = self.rhs[s]
for sn in self.get_neighbor(s):
self.UpdateState(sn)
else:
self.g[s] = math.inf
for sn in self.get_neighbor(s):
self.UpdateState(sn)
self.UpdateState(s)"""
#return list
def Key(self, s):
if self.g[s] > self.rhs[s]:
return [self.rhs[s] + self.eps * self.h(self.s_start, s), self.rhs[s]]
#return [self.rhs[s] + self.h(self.s_start, s), self.rhs[s]]
else:
return [self.g[s] + self.h(self.s_start, s), self.g[s]]
def observe(self, s):
return self.Env.get_obstacles(s)
# keys are used to rank cells in OPEN list
def TopKey(self):
s = min(self.OPEN, key=self.OPEN.get)
return s, self.OPEN[s]
def testLooping(self):
path = [self.s_start]
s = self.s_start
pathCosts = []
while True:
g_list = {}
for x in self.get_neighbor(s):
if not self.is_collision(s, x):
g_list[x] = self.g[x]
s_parent = s
s = min(g_list, key=g_list.get)
cost = self.cost_no_collision(s_parent, s)
pathCosts.append(cost)
path.append(s)
# test looping
if len(path) > 3 and path[-1] == path[-3]:
looping_s = path[-1]
#print("looping_s: ")
#print(looping_s)
#print(path)
del path[-1]
#del path[-1]
#del path[-1]
#print(path)
return True, looping_s
if s == self.s_goal:
break
return False, None
# find the best path, calculate cost of path
def extract_path(self):
path = [self.s_start]
s = self.s_start
pathCosts = []
while True:
g_list = {}
for x in self.get_neighbor(s):
if not self.is_collision(s, x):
g_list[x] = self.g[x]
s_parent = s
s = min(g_list, key=g_list.get)
cost = self.cost_no_collision(s_parent, s)
pathCosts.append(cost)
path.append(s)
#print(s)
if s == self.s_goal:
break
return list(path), pathCosts
def UpdateState(self, s):
# for all nodes except goal node
if s != self.s_goal:
# set rhs to infinity // find the best route to s from neighbors
self.rhs[s] = math.inf
for x in self.get_neighbor(s):
self.rhs[s] = min(self.rhs[s], self.g[x] + self.computeCost(x, s))
if s in self.OPEN:
self.OPEN.pop(s)
if self.g[s] != self.rhs[s]:
if s not in self.CLOSED:
self.OPEN[s] = self.Key(s)
else:
self.INCONS[s] = 0
def ComputeOrImprovePath(self):
#if self.rhs[self.s_start] == self.g[self.s_start]:
#self.g[self.s_start] = math.inf
while True:
s, v = self.TopKey()
if v >= self.Key(self.s_start) and self.rhs[self.s_start] == self.g[self.s_start]:
break
self.OPEN.pop(s)
self.visited.add(s)
print(s)
if self.g[s] > self.rhs[s]:
self.g[s] = self.rhs[s]
self.CLOSED.add(s)
for sn in self.get_neighbor(s):
self.UpdateState(sn)
else:
self.g[s] = math.inf
for sn in self.get_neighbor(s):
self.UpdateState(sn)
self.UpdateState(s)
# update g-table when obstacles are found
def ChangeEdgeCosts(self, s):
self.eps = self.orig_eps
for i in s:
if i not in self.obs:
self.obs.add(i)
self.g[i] = math.inf
self.rhs[i] = math.inf
#else:
# self.obs.remove(i)
# self.UpdateState(i)
if i not in self.border:
for sn in self.get_neighbor(i):
self.UpdateState(sn)
def run(self, s_start):
self.s_start = s_start
while True:
if self.eps < 1.0:
break
print("running with e: " + str(self.eps))
#print(self.OPEN)
self.ComputeOrImprovePath()
#self.visited = set()
loop, sl = self.testLooping()
if loop:
self.init()
self.ComputeOrImprovePath()
# not functioning yet
"""loop, sl = self.testLooping()
while loop:
self.fix(sl)
self.ComputeOrImprovePath()
loop, sl = self.testLooping()
print(sl)
wait = input("ss")"""
path, pathCosts = self.extract_path()
# if changes in edge costs are detected
# for all directed edges (u, v) with changed edge costs
# Update the edge cost c(u, v);
# UpdateState(u);
# if significant edge cost changes were observed
# increase e or replan from scratch;
#self.ChangeEdgeCosts()
self.eps -= self.eps_delta
self.OPEN.update(self.INCONS)
for s in self.OPEN:
self.OPEN[s] = self.Key(s)
self.CLOSED = set()
yield path, list(self.visited), pathCosts
#wait = input("PRESS ENTER TO CONTINUE.")
| [
"noreply@github.com"
] | vrmnyg.noreply@github.com |
c71d9f09ac7994a732168a2d135ce1f8cc93bff3 | a60a57a1083fb328a34d60c6b6fba036416989b2 | /FinalExam/main.py | 404ff42f0ecbc1a7d2494e8ac33083857b38499f | [] | no_license | Sethhealy/Design-Patterns-for-Web-Programming | 9aad4d12cfa559595a45bc74abe5a615e3aae6f3 | 8af2302d45db5b4a1e0cd12077a56d4d1e4f5a61 | refs/heads/master | 2020-07-23T23:49:38.786706 | 2014-08-01T02:08:03 | 2014-08-01T02:08:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,940 | py | """
Seth Healy
Final Exam
07/31/14
"""
import json
import webapp2
from urllib2 import urlopen, Request, build_opener
class MainHandler(webapp2.RequestHandler):
# im getting my requests so that i can display the information
def get(self):
if self.request.GET:
p = Page()
musical = self.request.GET['music']
mm = musicModel()
searchdata = mm.songs(self, musical)
mv = musicView()
mv.music = searchdata
# this is where all my viewable data is contained
class musicView(object):
def __init__(self):
self.__music = musicDataObject()
# this is my model where all my data is contained.
class musicModel(object):
def __init__(self, songs):
self.__music = songs
def song(self):
musicrequest = Request('http://rebeccacarroll.com/api/music/music.json')
#build the request
request = Request("http://rebeccacarroll.com/api/music/music.json")
# create an object that fetches pages for us
opener = build_opener()
#tell object what to fetch
music = opener.open(request)
musicresponse = urlopen(musicrequest)
musicObject = json.load(musicresponse)
#calling my dataobject using these.
do = musicObject()
do.musicObject = self.title
do.musicObject = self.artist
do.musicObject = self.length
do.musicObject = self.year
do.musicObject = self.label
do.musicObject = self.cover
do.musicObject = self.file
return do
#creating my dataobject class where i can define all the and pull all the json information.
class musicDataObject(object):
def __init__(self):
self.musicObject = None
self.title = ''
self.artist = ''
self.length = 0
self.year = 0
self.label = ''
self.cover = ''
self.file = ''
# this is where all my html is located.
class Page(object):
_head = """<!DOCTYPE HTML>
<head>
<title> Final exam </title>
</head>
<body>"""
_content = '''
<h1> Top 10 Pop Hits </h1>
<a href="#"><button> Like a Rolling Stone </button></a>
<a href="#"><button> Satisfaction </button></a>
<a href="#"><button> Imagine </button></a>
<a href="#"><button> What's Going On </button></a>
<a href="#"><button> Respect </button></a>
<a href="#"><button> Good Vibrations </button></a>
<a href="#"><button> Hey Jude </button></a>
<a href="#"><button> Smells Like Teen Spirit </button></a>
<a href="#"><button> What'd I Say </button></a>
'''
_close = """
</body>
</html>"""
def print_out(self):
return self._head + self._content + self._close
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| [
"sethhealy@Seths-MacBook-Pro.local"
] | sethhealy@Seths-MacBook-Pro.local |
ef297538dbdda1ba03ef3bd3400677dee6aa2c18 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/A3COM-HUAWEI-DOT11-ROAM-MIB.py | d7ff4a0c1ceab21807297768f81b42c90780999e | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 15,440 | py | #
# PySNMP MIB module A3COM-HUAWEI-DOT11-ROAM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-DOT11-ROAM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:49:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
h3cDot11, = mibBuilder.importSymbols("A3COM-HUAWEI-DOT11-REF-MIB", "h3cDot11")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, IpAddress, Integer32, iso, Gauge32, Counter64, NotificationType, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Bits, ObjectIdentity, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "IpAddress", "Integer32", "iso", "Gauge32", "Counter64", "NotificationType", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Bits", "ObjectIdentity", "Counter32", "Unsigned32")
DisplayString, MacAddress, RowStatus, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "MacAddress", "RowStatus", "TruthValue", "TextualConvention")
h3cDot11ROAM = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10))
h3cDot11ROAM.setRevisions(('2010-08-04 18:00', '2009-05-07 20:00', '2008-07-23 12:00',))
if mibBuilder.loadTexts: h3cDot11ROAM.setLastUpdated('201008041800Z')
if mibBuilder.loadTexts: h3cDot11ROAM.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
class H3cDot11RoamMobileTunnelType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("ipv4", 1), ("ipv6", 2))
class H3cDot11RoamAuthMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("none", 1), ("md5", 2))
class H3cDot11RoamIACTPStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("init", 1), ("idle", 2), ("joinRequestWait", 3), ("joinResponseWait", 4), ("joinConfirmWait", 5), ("joinError", 6), ("run", 7))
h3cDot11RoamCfgGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1))
h3cDot11RoamStatusGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2))
h3cDot11RoamStatisGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 3))
h3cDot11RoamStatis2Group = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 4))
h3cDot11MobGrpTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1), )
if mibBuilder.loadTexts: h3cDot11MobGrpTable.setStatus('current')
h3cDot11MobGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1), ).setIndexNames((0, "A3COM-HUAWEI-DOT11-ROAM-MIB", "h3cDot11MobGrpName"))
if mibBuilder.loadTexts: h3cDot11MobGrpEntry.setStatus('current')
h3cDot11MobGrpName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 15)))
if mibBuilder.loadTexts: h3cDot11MobGrpName.setStatus('current')
h3cdot11MobGrpTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 2), H3cDot11RoamMobileTunnelType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cdot11MobGrpTunnelType.setStatus('current')
h3cDot11MobGrpSrcIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 3), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDot11MobGrpSrcIPAddr.setStatus('current')
h3cDot11MobGrpAuthMode = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 4), H3cDot11RoamAuthMode().clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDot11MobGrpAuthMode.setStatus('current')
h3cDot11MobGrpAuthKey = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDot11MobGrpAuthKey.setStatus('current')
h3cDot11MobGrpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDot11MobGrpEnable.setStatus('current')
h3cDot11MobGrpRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDot11MobGrpRowStatus.setStatus('current')
h3cDot11MobGrpMemberTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2), )
if mibBuilder.loadTexts: h3cDot11MobGrpMemberTable.setStatus('current')
h3cDot11MobGrpMemberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-DOT11-ROAM-MIB", "h3cDot11MobGrpName"), (0, "A3COM-HUAWEI-DOT11-ROAM-MIB", "h3cDot11MobGrpMemberIpAddr"))
if mibBuilder.loadTexts: h3cDot11MobGrpMemberEntry.setStatus('current')
h3cDot11MobGrpMemberIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2, 1, 1), InetAddress())
if mibBuilder.loadTexts: h3cDot11MobGrpMemberIpAddr.setStatus('current')
h3cDot11MobGrpMemberStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2, 1, 2), H3cDot11RoamIACTPStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11MobGrpMemberStatus.setStatus('current')
h3cDot11MobGrpMemberIf = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11MobGrpMemberIf.setStatus('current')
h3cDot11MobGrpMemberUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2, 1, 4), Integer32()).setUnits('second').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11MobGrpMemberUpTime.setStatus('current')
h3cDot11MobGrpMemberRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDot11MobGrpMemberRowStatus.setStatus('current')
h3cDot11RoamInInfoTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1), )
if mibBuilder.loadTexts: h3cDot11RoamInInfoTable.setStatus('current')
h3cDot11RoamInInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1, 1), ).setIndexNames((0, "A3COM-HUAWEI-DOT11-ROAM-MIB", "h3cDot11RoamClientMAC"))
if mibBuilder.loadTexts: h3cDot11RoamInInfoEntry.setStatus('current')
h3cDot11RoamClientMAC = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: h3cDot11RoamClientMAC.setStatus('current')
h3cDot11RoamInClientBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamInClientBSSID.setStatus('current')
h3cDot11RoamInClientVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamInClientVlanID.setStatus('current')
h3cDot11RoamInHomeACIPType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1, 1, 4), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamInHomeACIPType.setStatus('current')
h3cDot11RoamInHomeACIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 1, 1, 5), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamInHomeACIPAddr.setStatus('current')
h3cDot11RoamOutInfoTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2), )
if mibBuilder.loadTexts: h3cDot11RoamOutInfoTable.setStatus('current')
h3cDot11RoamOutInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-DOT11-ROAM-MIB", "h3cDot11RoamClientMAC"))
if mibBuilder.loadTexts: h3cDot11RoamOutInfoEntry.setStatus('current')
h3cDot11RoamOutClientBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamOutClientBSSID.setStatus('current')
h3cDot11RoamOutClientVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamOutClientVlanID.setStatus('current')
h3cDot11RoamOutForeignACIPType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2, 1, 3), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamOutForeignACIPType.setStatus('current')
h3cDot11RoamOutForeignACIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2, 1, 4), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamOutForeignACIPAddr.setStatus('current')
h3cDot11RoamOutClientUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 2, 1, 5), Integer32()).setUnits('second').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamOutClientUpTime.setStatus('current')
h3cDot11RoamTrackTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3), )
if mibBuilder.loadTexts: h3cDot11RoamTrackTable.setStatus('current')
h3cDot11RoamTrackEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1), ).setIndexNames((0, "A3COM-HUAWEI-DOT11-ROAM-MIB", "h3cDot11RoamTrackIndex"))
if mibBuilder.loadTexts: h3cDot11RoamTrackEntry.setStatus('current')
h3cDot11RoamTrackIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cDot11RoamTrackIndex.setStatus('current')
h3cDot11RoamTrackClientMAC = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamTrackClientMAC.setStatus('current')
h3cDot11RoamTrackBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamTrackBSSID.setStatus('current')
h3cDot11RoamTrackUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1, 4), Integer32()).setUnits('second').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamTrackUpTime.setStatus('current')
h3cDot11RoamTrackACIPType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1, 5), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamTrackACIPType.setStatus('current')
h3cDot11RoamTrackACIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 2, 3, 1, 6), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11RoamTrackACIPAddr.setStatus('current')
h3cDot11IntraACRoamingSuccCnt = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11IntraACRoamingSuccCnt.setStatus('current')
h3cDot11InterACRoamingSuccCnt = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 3, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11InterACRoamingSuccCnt.setStatus('current')
h3cDot11InterACRoamOutSuccCnt = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 3, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11InterACRoamOutSuccCnt.setStatus('current')
h3cDot11IntraACRoamingSuccCnt2 = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 4, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11IntraACRoamingSuccCnt2.setStatus('current')
h3cDot11InterACRoamingSuccCnt2 = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 4, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11InterACRoamingSuccCnt2.setStatus('current')
h3cDot11InterACRoamOutSuccCnt2 = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 75, 10, 4, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot11InterACRoamOutSuccCnt2.setStatus('current')
mibBuilder.exportSymbols("A3COM-HUAWEI-DOT11-ROAM-MIB", h3cdot11MobGrpTunnelType=h3cdot11MobGrpTunnelType, h3cDot11MobGrpSrcIPAddr=h3cDot11MobGrpSrcIPAddr, h3cDot11RoamOutClientBSSID=h3cDot11RoamOutClientBSSID, h3cDot11RoamTrackEntry=h3cDot11RoamTrackEntry, h3cDot11InterACRoamOutSuccCnt=h3cDot11InterACRoamOutSuccCnt, h3cDot11MobGrpMemberIf=h3cDot11MobGrpMemberIf, h3cDot11RoamTrackClientMAC=h3cDot11RoamTrackClientMAC, h3cDot11MobGrpAuthKey=h3cDot11MobGrpAuthKey, h3cDot11RoamOutInfoTable=h3cDot11RoamOutInfoTable, h3cDot11RoamInInfoEntry=h3cDot11RoamInInfoEntry, h3cDot11InterACRoamingSuccCnt=h3cDot11InterACRoamingSuccCnt, PYSNMP_MODULE_ID=h3cDot11ROAM, h3cDot11RoamInClientVlanID=h3cDot11RoamInClientVlanID, h3cDot11MobGrpMemberEntry=h3cDot11MobGrpMemberEntry, H3cDot11RoamMobileTunnelType=H3cDot11RoamMobileTunnelType, h3cDot11MobGrpTable=h3cDot11MobGrpTable, H3cDot11RoamAuthMode=H3cDot11RoamAuthMode, h3cDot11MobGrpMemberStatus=h3cDot11MobGrpMemberStatus, h3cDot11MobGrpMemberUpTime=h3cDot11MobGrpMemberUpTime, h3cDot11RoamOutForeignACIPAddr=h3cDot11RoamOutForeignACIPAddr, H3cDot11RoamIACTPStatus=H3cDot11RoamIACTPStatus, h3cDot11RoamClientMAC=h3cDot11RoamClientMAC, h3cDot11RoamTrackTable=h3cDot11RoamTrackTable, h3cDot11ROAM=h3cDot11ROAM, h3cDot11IntraACRoamingSuccCnt=h3cDot11IntraACRoamingSuccCnt, h3cDot11IntraACRoamingSuccCnt2=h3cDot11IntraACRoamingSuccCnt2, h3cDot11RoamInHomeACIPAddr=h3cDot11RoamInHomeACIPAddr, h3cDot11InterACRoamOutSuccCnt2=h3cDot11InterACRoamOutSuccCnt2, h3cDot11RoamStatusGroup=h3cDot11RoamStatusGroup, h3cDot11InterACRoamingSuccCnt2=h3cDot11InterACRoamingSuccCnt2, h3cDot11RoamStatis2Group=h3cDot11RoamStatis2Group, h3cDot11RoamInClientBSSID=h3cDot11RoamInClientBSSID, h3cDot11RoamTrackBSSID=h3cDot11RoamTrackBSSID, h3cDot11RoamInInfoTable=h3cDot11RoamInInfoTable, h3cDot11RoamInHomeACIPType=h3cDot11RoamInHomeACIPType, h3cDot11RoamOutInfoEntry=h3cDot11RoamOutInfoEntry, h3cDot11MobGrpName=h3cDot11MobGrpName, h3cDot11RoamTrackIndex=h3cDot11RoamTrackIndex, h3cDot11RoamTrackACIPType=h3cDot11RoamTrackACIPType, h3cDot11MobGrpEntry=h3cDot11MobGrpEntry, h3cDot11RoamStatisGroup=h3cDot11RoamStatisGroup, h3cDot11MobGrpMemberTable=h3cDot11MobGrpMemberTable, h3cDot11MobGrpAuthMode=h3cDot11MobGrpAuthMode, h3cDot11MobGrpMemberRowStatus=h3cDot11MobGrpMemberRowStatus, h3cDot11RoamOutForeignACIPType=h3cDot11RoamOutForeignACIPType, h3cDot11RoamTrackUpTime=h3cDot11RoamTrackUpTime, h3cDot11MobGrpRowStatus=h3cDot11MobGrpRowStatus, h3cDot11RoamOutClientVlanID=h3cDot11RoamOutClientVlanID, h3cDot11MobGrpMemberIpAddr=h3cDot11MobGrpMemberIpAddr, h3cDot11RoamCfgGroup=h3cDot11RoamCfgGroup, h3cDot11RoamTrackACIPAddr=h3cDot11RoamTrackACIPAddr, h3cDot11MobGrpEnable=h3cDot11MobGrpEnable, h3cDot11RoamOutClientUpTime=h3cDot11RoamOutClientUpTime)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
e4ffe00c916e89355819e85740bcf8d28dbd4023 | 5fd88b87174555accc0a7996d40482074b15bfe7 | /final_pjt/recommend/forms.py | b93a3fe1d93d9d405c7539717d23ad28fa70b012 | [] | no_license | KangminP/GoDjango | 9111ce9f79e485911d5c8b28fa06a4a155fbdeb3 | c3dab38d47fb9d92ead09fdca02b423bc43ca9b2 | refs/heads/master | 2023-03-09T11:10:28.909786 | 2021-02-18T09:27:13 | 2021-02-18T09:27:13 | 331,010,299 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django import forms
from .models import Photo
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = ['image'] | [
"mygangmini@naver.com"
] | mygangmini@naver.com |
199ad53b72479123f59bce74532e57cadc7eb8dc | 97dc866edcc9047b16f51ea8d366729f1e7988ea | /src/perspective_transform_matrix.py | 8ff2554df5c21e80f9dcae2e0341ca5eac308427 | [] | no_license | tsenying/CarND-Advanced-Lane-Lines | 77025f84760f9d4bbe3d8633ec76c0001cf7517d | 89cb5e70960c64d925033746ba877717bdd4795b | refs/heads/master | 2020-04-06T04:04:30.898252 | 2017-04-29T19:49:54 | 2017-04-29T19:49:54 | 83,051,855 | 0 | 0 | null | 2017-02-24T14:54:06 | 2017-02-24T14:54:05 | null | UTF-8 | Python | false | false | 2,527 | py | # Calculate the perspective transform matrix M and inverse Minv
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.path import Path
import matplotlib.patches as patches
from image_utils import image_warp
# Read in the saved camera matrix and distortion coefficients
# These are the arrays calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "camera_cal/calibration_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = mpimg.imread('test_images/straight_lines1.jpg')
# Define 4 source points
# top left [622,435]
# top right [662,435]
# bottom right [1040,675]
# bottom left [272,675]
src_points = [[596,450],[685,450],[1100,720],[200,720]]
src = np.float32(src_points)
# Define 4 destination points
dst_points = [
[320, 0],
[960, 0],
[960, 720],
[320, 720]]
dst = np.float32(dst_points)
# Use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Inverse transform matrix for transforming warped back to perspective view
Minv = cv2.getPerspectiveTransform(dst, src)
# Save the perspective transform result for later use
pXform_pickle = {}
pXform_pickle["M"] = M
pXform_pickle["Minv"] = Minv
pickle.dump( pXform_pickle, open( "./camera_cal/perspective_transform_pickle.p", "wb" ) )
### Test out the perspective transform
# warp image
img_warped = image_warp(img, mtx, dist, M)
# display result
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
fig.tight_layout()
ax1.imshow(img)
# draw the src points boundary
verts = np.float32( src_points + [src_points[0]] )
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
patch = patches.PathPatch(path, edgecolor='red', facecolor='none', lw=2)
ax1.add_patch(patch)
ax1.set_title('Original Image', fontsize=20)
ax2.imshow( img_warped )
# draw the dst points boundary
verts = np.float32( dst_points + [dst_points[0]] )
path = Path(verts, codes)
patch = patches.PathPatch(path, edgecolor='red', facecolor='none', lw=2)
ax2.add_patch(patch)
ax2.set_title('Undistorted and Warped Image', fontsize=20)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
fig.savefig('./output_images/perspective_transform_test.jpg')
#plt.show()
cv2.imwrite('./output_images/straight_lines1.jpg', img)
cv2.imwrite('./output_images/straight_lines1_warped.jpg', img_warped)
| [
"ying_hong@trimble.com"
] | ying_hong@trimble.com |
0b61ccd08991ebb0902f43a83ba3074f2e60a203 | 18305efd1edeb68db69880e03411df37fc83b58b | /pdb_files3000rot/g7/1g7v/tractability_450/pymol_results_file.py | b3ca0aa99f8776269651041e072c2f991de4c442 | [] | no_license | Cradoux/hotspot_pipline | 22e604974c8e38c9ffa979092267a77c6e1dc458 | 88f7fab8611ebf67334474c6e9ea8fc5e52d27da | refs/heads/master | 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,214 | py |
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.4940004349":[], "16.4940004349_arrows":[]}
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(6.0), float(103.5), float(82.5), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([6.0,103.5,82.5], [3.903,105.552,80.989], color="blue red", name="Arrows_16.4940004349_1")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(108.0), float(80.5), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([9.5,108.0,80.5], [11.728,106.388,80.182], color="blue red", name="Arrows_16.4940004349_2")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(105.0), float(79.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([9.5,105.0,79.0], [11.728,106.388,80.182], color="blue red", name="Arrows_16.4940004349_3")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(105.5), float(77.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([9.5,105.5,77.0], [11.141,102.835,76.967], color="blue red", name="Arrows_16.4940004349_4")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(11.0), float(110.5), float(81.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([11.0,110.5,81.0], [13.419,110.042,82.914], color="blue red", name="Arrows_16.4940004349_5")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(7.42102675834), float(107.749665562), float(78.4210819103), float(1.0)]
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(5.5), float(113.5), float(80.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([5.5,113.5,80.0], [5.021,110.73,80.529], color="red blue", name="Arrows_16.4940004349_6")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(8.5), float(115.0), float(78.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([8.5,115.0,78.0], [6.555,117.389,78.438], color="red blue", name="Arrows_16.4940004349_7")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(10.5), float(109.0), float(79.5), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([10.5,109.0,79.5], [11.883,106.786,77.978], color="red blue", name="Arrows_16.4940004349_8")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(11.5), float(113.0), float(78.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([11.5,113.0,78.0], [13.328,115.357,77.05], color="red blue", name="Arrows_16.4940004349_9")
cmd.load_cgo(cluster_dict["16.4940004349"], "Features_16.4940004349", 1)
cmd.load_cgo(cluster_dict["16.4940004349_arrows"], "Arrows_16.4940004349")
cmd.set("transparency", 0.2,"Features_16.4940004349")
cmd.group("Pharmacophore_16.4940004349", members="Features_16.4940004349")
cmd.group("Pharmacophore_16.4940004349", members="Arrows_16.4940004349")
if dirpath:
f = join(dirpath, "label_threshold_16.4940004349.mol2")
else:
f = "label_threshold_16.4940004349.mol2"
cmd.load(f, 'label_threshold_16.4940004349')
cmd.hide('everything', 'label_threshold_16.4940004349')
cmd.label("label_threshold_16.4940004349", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.4940004349', members= 'label_threshold_16.4940004349')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
| [
"cradoux.cr@gmail.com"
] | cradoux.cr@gmail.com |
4bfd070d68d3a71a825baa6e50737eb2e3242721 | 548db811eb568d4149bb202af97b6d889791ec0c | /meiduo_mall/meiduo_mall/settings/dev.py | 9268db3f06e7abe6d535e36c5fb57891921a8a0a | [] | no_license | endeavor-hxs/meiduo_project | 3ac85bae9860df98a609b059cc7f72a5fdfb4092 | 8e64b335e2fff3e592078dc70fa970e30335e333 | refs/heads/main | 2023-01-14T17:12:31.440220 | 2020-11-24T16:01:54 | 2020-11-24T16:01:54 | 306,054,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,695 | py | """
Django settings for meiduo_mall project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os, sys
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve( ).parent.parent
# 定义apps子模块的路径,通过insert方式
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z6tcau+md8a0ei2g)p9wpdlmkn3k_m$9cc_tl%z4877x55p+)o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users', # 用户模块
'contents', # 首页广告模块
'verifications', # 验证码模块
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduo_mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
#配置jinja2的环境
'environment': 'meiduo_mall.utils.jinja2_env.jinja2_environment',
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo_mall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'default': {
'ENGINE': 'django.db.backends.mysql', # 数据库引擎
'HOST': '119.23.229.55', # 数据库主机
'PORT': 3306, # 数据库端口
'USER': 'test', # 数据库用户名
'PASSWORD': '123456', # 数据库用户密码
'NAME': 'meiduo' # 数据库名字
},
}
}
#配置redis缓存
CACHES = {
"default": { # 默认
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://119.23.229.55:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": { # session
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://119.23.229.55:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"verify_code": { # 图形验证码
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://119.23.229.55:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# 配置静态文件加载路径
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
#配置日志文件
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(os.path.dirname(BASE_DIR), 'logs/meiduo.log'), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
| [
"huangyunlong22@gmail.com"
] | huangyunlong22@gmail.com |
3c1414d17c449561e276f13e399900b1c4bd8035 | 72a9d5019a6cc57849463fc315eeb0f70292eac8 | /Python-Programming/6- Numpy/Numpy_.py | 98ac37a1616122702019f51a69f73e320c98fe2f | [] | no_license | lydiawawa/Machine-Learning | 393ce0713d3fd765c8aa996a1efc9f1290b7ecf1 | 57389cfa03a3fc80dc30a18091629348f0e17a33 | refs/heads/master | 2020-03-24T07:53:53.466875 | 2018-07-22T23:01:42 | 2018-07-22T23:01:42 | 142,578,611 | 1 | 0 | null | 2018-07-27T13:08:47 | 2018-07-27T13:08:47 | null | UTF-8 | Python | false | false | 3,509 | py | # %%%%%%%%%%%%% Python %%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Authors %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Dr. Martin Hagan----->Email: mhagan@okstate.edu
# Dr. Amir Jafari------>Email: amir.h.jafari@okstate.edu
# %%%%%%%%%%%%% Date:
# V1 Jan - 04 - 2018
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Numpy Python %%%%%%%%%%%%%%%%%%%%%%%%%%%%
# =============================================================
import numpy as np
# ----------------------------------------------------------------------------------
#---------------------- creating numpy array----------------------------------------
x = np.array([1, 2, 3, 4])
y = np.linspace(-5, 1, 10)
z = np.arange(0, 10)
print(x)
print(y)
print(z)
type(x)
print(x.dtype)
# ----------------------------------------------------------------------------------
#---------------------- Step Size---------------------------------------------------
x1 = np.arange(0, 10, 2)
x2 = np.arange(0, 5, .5)
x3 = np.arange(0, 1, .1)
y1 = np.linspace(1, 5, 2)
List = list(x1)
print(List)
Min = np.amin(x1)
print(Min)
Max = np.amax(y1)
print(Max)
# ----------------------------------------------------------------------------------
#---------------------- Array Operands----------------------------------------------
a1 = np.array([1, 1, 1, 1]) + np.array([2, 2, 2, 2])
print(a1)
a2 = np.array([1, 1, 1, 1]) - np.array([2, 2, 2, 2])
print(a2)
a3 = np.array([1, 1, 1, 1]) * np.array([2, 2, 2, 2])
print(a3)
a4 = np.array([1, 1, 1, 1]) / np.array([2, 2, 2, 2])
print(a4)
a5 = np.array([True, True, False]) + np.array([True, False, False])
print(a5)
a6 = np.array([True, True, False]) * np.array([True, False, False])
print(a6)
# ----------------------------------------------------------------------------------
#---------------------- Mathematical Function---------------------------------------
print (abs(-2))
list1 = [-1, -2, -3]
s1 = []
for i in range(len(list1)):
s1.append(abs(list1[i]))
print(s1)
np.abs(-3)
np.abs([-2, -7, 1])
# ----------------------------------------------------------------------------------
#---------------------- Indexing----------------------------------------------------
a7 = np.arange(1, 5, .5)
print(len(a7))
second_element = a7[1]
print(second_element)
first_three_elements = a7[0:3]
print(first_three_elements)
# ----------------------------------------------------------------------------------
# --------------------------Masking-------------------------------------------------
print(a7)
bigger_than_3 = a7 > 3
print(bigger_than_3)
type(bigger_than_3)
len(bigger_than_3)
d2 = [i for i, v in enumerate(a7) if v > 3]
print(d2)
[i for i, v in enumerate(a7) if v > 3]
d3 = [v for i, v in enumerate(a7) if v > 26]
print(d3)
sum(bigger_than_3)
len(d2)
large_nums = a7[bigger_than_3]
len(a7[bigger_than_3])
print(large_nums)
large_nums = a7[a7 > 3]
print(large_nums)
# ----------------------------------------------------------------------------------
# --------------------------More----------------------------------------------------
a8 = np.logical_and(a7 > 1, a7 < 3)
print(a8)
a9 = a7[np.logical_and(a7 > 1, a7 < 3)]
print(a9)
a10 = np.logical_or(a7 < 3, a7 > 4)
print(a10)
a11= a7[np.logical_or(a7 < 22, a7 > 27)]
print(a11)
# ----------------------------------------------------------------------------------
# --------------------------Vectorizing Function-------------------------------------
def f(x):
return x ** 2 > 2
f_v = np.vectorize(f)
print(f_v([1,2,3]))
| [
"amir.h.jafari@okstate.edu"
] | amir.h.jafari@okstate.edu |
b26864853535553d7f95f2ea51bd5d4f2e6de8cf | 57517396095839e67957d0d1dfbcc0d816254482 | /src/action_space.py | b0ec3a2a624e5b2c516a323c1b2c30425ceeeea7 | [
"MIT"
] | permissive | Mehran-sh/Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces | 10bdc2d44036d9156d12cc4ce4f34f77463cfcc7 | 339b0d4c9a20130e6861ec94c969619f7503c3fe | refs/heads/master | 2021-09-05T02:37:11.528550 | 2018-01-23T17:35:11 | 2018-01-23T17:35:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | import numpy as np
import itertools
import pyflann
import util.my_plotlib as mplt
from util.data_graph import plot_3d_points
"""
This class represents a n-dimensional cube with a specific number of points embeded.
Points are distributed uniformly in the initialization. A search can be made using the
search_point function that returns the k (given) nearest neighbors of the input point.
"""
class Space:
def __init__(self, low, high, points):
self._low = np.array(low)
self._high = np.array(high)
self._range = self._high - self._low
self._dimensions = len(low)
self.__space = init_uniform_space([0] * self._dimensions,
[1] * self._dimensions,
points)
self._flann = pyflann.FLANN()
self.rebuild_flann()
def rebuild_flann(self):
self._index = self._flann.build_index(self.__space, algorithm='kdtree')
def search_point(self, point, k):
p_in = self.import_point(point)
search_res, _ = self._flann.nn_index(p_in, k)
knns = self.__space[search_res]
p_out = []
for p in knns:
p_out.append(self.export_point(p))
return np.array(p_out)
def import_point(self, point):
return (point - self._low) / self._range
def export_point(self, point):
return self._low + point * self._range
def get_space(self):
return self.__space
def shape(self):
return self.__space.shape
def get_number_of_actions(self):
return self.shape()[0]
def plot_space(self, additional_points=None):
dims = self._dimensions
if dims > 3:
print(
'Cannot plot a {}-dimensional space. Max 3 dimensions'.format(dims))
return
space = self.get_space()
if additional_points is not None:
for i in additional_points:
space = np.append(space, additional_points, axis=0)
if dims == 1:
lines = []
for x in space:
lines.append(mplt.Line([x], [0], line_color='o'))
mplt.plot_lines(lines)
elif dims == 2:
lines = []
for x, y in space:
lines.append(mplt.Line([x], [y], line_color='o'))
mplt.plot_lines(lines)
else:
plot_3d_points(space)
def init_uniform_space(low, high, points):
dims = len(low)
points_in_each_axis = round(points**(1 / dims))
axis = []
for i in range(dims):
axis.append(list(np.linspace(low[i], high[i], points_in_each_axis)))
space = []
for _ in itertools.product(*axis):
space.append(list(_))
return np.array(space)
| [
"kontzedakis_93@hotmail.com"
] | kontzedakis_93@hotmail.com |
94b774182f3456a6ee6cfb24f8b297130c01fb56 | aec2c20ef80ca6a7588c3e1bd877f23ffeb65692 | /Anul III/APD/Tema1/test.py | 576809664a437ecb6b735764210b999d6cdc5a38 | [] | no_license | lavandalia/Teme-Poli-Calculatoare | 15cf707515a6c9618444586d38ef1ddc9e9ecefc | 317849ad19189480f91fa66ff003009d18f73aad | refs/heads/master | 2020-05-16T21:23:23.235094 | 2017-05-21T22:15:40 | 2017-05-21T22:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | #! /usr/bin/python
import glob
import subprocess
exe = './paralel'
indir = './testein'
outdir = './testeout'
outokdir = indir
ctime = '/usr/bin/time'
fmt = "Exit Status: %x\nCPU %%: %P\nMemory:\n Unshared: %D\t\t\tAvg Total Mem: %K\n Major Page Faults: %F\t\tMinor Page Faults: %R\n No. Swaps Out of Mem: %W\tNo. Invol Context Swiches: %c\tNo. Vol Context Switches: %w\nTime:\n Realtime: %E\t\tSystem Time: %S\t\tUser Time: %U"
diff = '/usr/bin/diff'
output = 'outsr.txt'
def main():
of = open(output, 'wb')
for fp in glob.glob('./%s/out*.txt' % indir):
fname = fp.split('/')[-1]
fout = fname
fn = fout.split('.')[0]
if fn.split('_')[-1] == 'detaliu':
continue
n, v, t = fn[3:].split('_')
print fn, n, v, t
fin = '%s/in%s_%s.txt' % (indir, n, v)
fout = '%s/%s' % (outdir, fout)
of.write(fn+'\n')
of.flush()
com = [ctime, '-f', fmt, exe, t, fin, fout]
#print ' '.join(com)
r = subprocess.call(com, stdout = of, stderr = of)
of.flush()
of.write('\n\n\n')
print 'Time ', r
fok = '%s/%s' % (outokdir, fname)
com = [diff, fout, fok]
r = subprocess.call(com, stderr = subprocess.STDOUT)
print 'Diff ', r
if __name__ == '__main__':
main()
| [
"gabriel.ivanica@gmail.com"
] | gabriel.ivanica@gmail.com |
f7c065cb5838b4cd7322cd93403020278b9622fa | 9aba14204989e5bfa913ad4bb679db9ce84d5dec | /classify/models/BertRCNN.py | a2c08c6d1105dabed2146dc9de09f668fc8d8aca | [] | no_license | Whiplashzeb/patent_generate | 871d496d3737dedcd1fbc9d0bf4191d2d05f152a | 654ee4109d7d0faf8b9bf2e9ddcf9db02fb84b22 | refs/heads/master | 2020-10-02T08:40:04.110095 | 2020-03-20T11:31:01 | 2020-03-20T11:31:01 | 227,741,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | from transformers import BertPreTrainedModel, BertModel
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features):
super(Linear, self).__init__()
self.linear = nn.Linear(in_features=in_features, out_features=out_features)
self.init_params()
def init_params(self):
nn.init.kaiming_normal_(self.linear.weight)
nn.init.constant_(self.linear.bias, 0)
def forward(self, x):
x = self.linear(x)
return x
class BertRCNN(BertPreTrainedModel):
def __init__(self, config, rnn_hidden_size, layers, dropout):
super(BertRCNN, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.rnn = nn.LSTM(config.hidden_size, rnn_hidden_size, layers, bidirectional=True, dropout=dropout, batch_first=True)
self.W = Linear(config.hidden_size + 2 * rnn_hidden_size, config.hidden_size)
self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds
)
last_hidden_states = outputs[0]
last_hidden_states = self.dropout(last_hidden_states)
pooled_output = outputs[1]
rnn_output, _ = self.rnn(last_hidden_states)
x = torch.cat((rnn_output, last_hidden_states), 2)
y = torch.tanh(self.W(x)).permute(0, 2, 1)
y = F.max_pool1d(y, y.size()[2]).squeeze(2)
feature = torch.cat([pooled_output, y], dim=-1)
logits = self.classifier(feature)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
| [
"blgszeb@outlook.com"
] | blgszeb@outlook.com |
7896a492913a39c9123888ffc60e591f7a76533f | 690f02586f414ebf5b537afbd4fb58ca6cad9fbb | /link_3.py | f2bb12717f041e1725b6be6db81ee3cac697ead8 | [] | no_license | AlexBauer46/NetworksPA3 | 650f58931e639b4e14a6d67bf5d243c36811166e | 5b395953a6dd2dcba831da6bd1d8b20ce25b8ddb | refs/heads/main | 2023-01-24T12:06:56.362777 | 2020-11-16T01:51:45 | 2020-11-16T01:51:45 | 313,164,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | '''
Created on Oct 12, 2016
@author: mwittie
'''
import queue
import threading
from rprint import print
## An abstraction of a link between router interfaces
class Link:
## creates a link between two objects by looking up and linking node interfaces.
# @param from_node: node from which data will be transfered
# @param from_intf_num: number of the interface on that node
# @param to_node: node to which data will be transfered
# @param to_intf_num: number of the interface on that node
# @param mtu: link maximum transmission unit
def __init__(self, from_node, from_intf_num, to_node, to_intf_num, mtu):
self.from_node = from_node
self.from_intf_num = from_intf_num
self.to_node = to_node
self.to_intf_num = to_intf_num
self.in_intf = from_node.out_intf_L[from_intf_num]
self.out_intf = to_node.in_intf_L[to_intf_num]
# configure the MTUs of linked interfaces
self.in_intf.mtu = mtu
self.out_intf.mtu = mtu
## called when printing the object
def __str__(self):
return 'Link %s-%d to %s-%d' % (self.from_node, self.from_intf_num, self.to_node, self.to_intf_num)
## transmit a packet from the 'from' to the 'to' interface
def tx_pkt(self):
pkt_S = self.in_intf.get()
if pkt_S is None:
return # return if no packet to transfer
if len(pkt_S) > self.in_intf.mtu:
print('%s: packet "%s" length greater than the from interface MTU (%d)' % (self, pkt_S, self.out_intf.mtu))
return # return without transmitting if packet too big
if len(pkt_S) > self.out_intf.mtu:
print('%s: packet "%s" length greater than the to interface MTU (%d)' % (self, pkt_S, self.out_intf.mtu))
return # return without transmitting if packet too big
# otherwise transmit the packet
try:
self.out_intf.put(pkt_S)
print('%s: transmitting packet "%s"' % (self, pkt_S))
except queue.Full:
print('%s: packet lost' % (self))
pass
## An abstraction of the link layer
class LinkLayer:
def __init__(self):
## list of links in the network
self.link_L = []
self.stop = False #for thread termination
## Return a name of the network layer
def __str__(self):
return "Network"
## add a Link to the network
def add_link(self, link):
self.link_L.append(link)
## transfer a packet across all links
def transfer(self):
for link in self.link_L:
link.tx_pkt()
## thread target for the network to keep transmitting data across links
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
#transfer one packet on all the links
self.transfer()
#terminate
if self.stop:
print (threading.currentThread().getName() + ': Ending')
return
| [
"noreply@github.com"
] | AlexBauer46.noreply@github.com |
fcff171d2095a1a02ec1b3033c6527903854024e | a844cba1a0cd54c650b640a7a5cbeabb8c2d15a5 | /modules/debugger/modules.py | 952d7b44e0a87252905c2dcc0c446df72cfd9ab7 | [
"MIT"
] | permissive | romain-tracktik/sublime_debugger | de5950d9f79fcfbe0407af4f89e15e91acb035aa | 6ff71182fee427cfc0254a9d47679d7a6d1424f9 | refs/heads/master | 2020-09-13T12:06:54.544461 | 2019-11-16T09:51:55 | 2019-11-16T09:51:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | from ..typecheck import *
from ..import dap
from ..import core
from ..import ui
class Modules:
def __init__(self):
self.modules = [] #type: List[dap.Module]
self.on_updated = core.Event() #type: core.Event[None]
def on_module_event(self, event: dap.ModuleEvent) -> None:
if event.reason == dap.ModuleEvent.new:
self.modules.append(event.module)
self.on_updated()
return
if event.reason == dap.ModuleEvent.new:
# FIXME: NOT IMPLEMENTED
return
if event.reason == dap.ModuleEvent.new:
# FIXME: NOT IMPLEMENTED
return
def clear_session_date(self) -> None:
self.modules.clear()
self.on_updated()
class ModulesView(ui.Block):
def __init__(self, modules: Modules):
super().__init__()
self.modules = modules
def added(self, layout: ui.Layout):
self.on_updated_handle = self.modules.on_updated.add(self.dirty)
def removed(self):
self.on_updated_handle.dispose()
def render(self) -> ui.Panel.Children:
items = []
for module in self.modules.modules:
items.append(
ui.block(
ui.Label(module.name)
)
)
return [
ui.Table(items=items)
]
| [
"2889367+daveleroy@users.noreply.github.com"
] | 2889367+daveleroy@users.noreply.github.com |
4c61d56834868c5e80a82df074f0e9fbc4e1815a | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/semicolon.py | ed11103b36352b18bd6e69914773b3ce1e715926 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 63 | py | ii = [('KirbWPW2.py', 1), ('BachARE.py', 1), ('HogaGMM.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
852a6baff7fabe6d78e9e363baf24d5b523df030 | eb283f066e2354ebd65cbf113790b0695b8387f7 | /Compilation/TP3/MiniC/TP03/MiniCTypingVisitor.py | 9ad730ecb3bf53aa1fe6544bba38b62b259d67cf | [] | no_license | saadiboune/Cours_Master | feff1c263a200035cf600d6ee9603eb94650d2ec | 83f9981709cabda14439857c6f32b7c69b7342ac | refs/heads/main | 2023-05-31T02:08:22.451321 | 2021-06-07T08:01:28 | 2021-06-07T08:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,127 | py | from MiniCVisitor import MiniCVisitor
from MiniCParser import MiniCParser
from Errors import MiniCInternalError
from enum import Enum
class MiniCTypeError(Exception):
pass
class BaseType(Enum):
Float, Integer, Boolean, String = range(4)
# Basic Type Checking for MiniC programs.
class MiniCTypingVisitor(MiniCVisitor):
def __init__(self):
self._memorytypes = dict() # id-> types
self._current_function = "main"
def _raise(self, ctx, for_what, *types):
raise MiniCTypeError(
'In function {}: Line {} col {}: invalid type for {}: {}'.format(
self._current_function,
ctx.start.line, ctx.start.column, for_what,
' and '.join(t.name.lower() for t in types)))
def _raiseMismatch(self, ctx, for_what, *types):
raise MiniCTypeError(
'In function {}: Line {} col {}: type mismatch for {}: {}'.format(
self._current_function,
ctx.start.line, ctx.start.column, for_what,
' and '.join(t.name.lower() for t in types)))
def _raiseNonType(self, ctx, message):
raise MiniCTypeError(
'In function {}: Line {} col {}: {}'.format(
self._current_function,
ctx.start.line, ctx.start.column, message))
# type declaration
def visitVarDecl(self, ctx):
vars_l = self.visit(ctx.id_l())
tt = self.visit(ctx.typee())
for name in vars_l:
if name in self._memorytypes:
self._raiseNonType(ctx,
"Variable {0} already declared".
format(name))
self._memorytypes[name] = tt
return
def visitBasicType(self, ctx):
if ctx.mytype.type == MiniCParser.INTTYPE:
return BaseType.Integer
elif ctx.mytype.type == MiniCParser.FLOATTYPE:
return BaseType.Float
elif ctx.mytype.type == MiniCParser.BOOLTYPE:
return BaseType.Boolean
elif ctx.mytype.type == MiniCParser.STRINGTYPE:
return BaseType.String
else:
raise MiniCInternalError("Type not implemented")
def visitIdList(self, ctx):
t = self.visit(ctx.id_l())
t.append(ctx.ID().getText())
return t
def visitIdListBase(self, ctx):
return [ctx.ID().getText()]
# typing visitors for expressions, statements !
# visitors for atoms --> value
def visitParExpr(self, ctx):
return self.visit(ctx.expr())
def visitIntAtom(self, ctx):
return BaseType.Integer
def visitFloatAtom(self, ctx):
return BaseType.Float
def visitBooleanAtom(self, ctx):
return BaseType.Boolean
def visitIdAtom(self, ctx):
try:
valtype = self._memorytypes[ctx.getText()]
return valtype
except KeyError:
self._raiseNonType(ctx,
"Undefined variable {}".format(ctx.getText()))
def visitStringAtom(self, ctx):
return BaseType.String
# now visit expr
def visitAtomExpr(self, ctx):
return self.visit(ctx.atom())
def visitOrExpr(self, ctx):
lvaltype = self.visit(ctx.expr(0))
rvaltype = self.visit(ctx.expr(1))
if (BaseType.Boolean == lvaltype) and (BaseType.Boolean == rvaltype):
return BaseType.Boolean
else:
self._raise(ctx, 'boolean operands', lvaltype, rvaltype)
def visitAndExpr(self, ctx):
return self.visitOrExpr(ctx) # Same typing rules
def visitEqualityExpr(self, ctx):
lvaltype = self.visit(ctx.expr(0))
rvaltype = self.visit(ctx.expr(1))
if lvaltype != rvaltype:
self._raiseMismatch(ctx, 'equality operands', lvaltype, rvaltype)
return BaseType.Boolean
def visitRelationalExpr(self, ctx):
lvaltype = self.visit(ctx.expr(0))
rvaltype = self.visit(ctx.expr(1))
if lvaltype != rvaltype:
self._raise(ctx, 'relational operands', lvaltype, rvaltype)
if lvaltype not in (BaseType.Integer, BaseType.Float):
self._raise(ctx, 'relational operands', lvaltype, rvaltype)
return BaseType.Boolean
def visitAdditiveExpr(self, ctx):
lvaltype = self.visit(ctx.expr(0))
rvaltype = self.visit(ctx.expr(1))
if lvaltype != rvaltype:
self._raise(ctx, 'additive operands', lvaltype, rvaltype)
if lvaltype not in (BaseType.Integer, BaseType.Float, BaseType.String):
self._raise(ctx, 'additive operands', lvaltype, rvaltype)
if ctx.myop.type != MiniCParser.PLUS and lvaltype == BaseType.String:
self._raise(ctx, 'additive operands', lvaltype, rvaltype)
return lvaltype
def visitMultiplicativeExpr(self, ctx):
lvaltype = self.visit(ctx.expr(0))
rvaltype = self.visit(ctx.expr(1))
if lvaltype != rvaltype:
self._raise(ctx, 'multiplicative operands', lvaltype, rvaltype)
if lvaltype not in (BaseType.Integer, BaseType.Float):
self._raise(ctx, 'multiplicative operands', lvaltype, rvaltype)
return lvaltype
def visitNotExpr(self, ctx):
etype = self.visit(ctx.expr())
if etype != BaseType.Boolean:
self._raise(ctx, 'not expression', etype)
else:
return BaseType.Boolean
def visitUnaryMinusExpr(self, ctx):
etype = self.visit(ctx.expr())
if etype not in (BaseType.Integer, BaseType.Float):
self._raise(ctx, 'unary minus operand', etype)
return etype
# visit statements
def visitPrintintStat(self, ctx):
etype = self.visit(ctx.expr())
if etype not in (BaseType.Integer, BaseType.Boolean):
self._raise(ctx, 'println_int statement', etype)
def visitPrintfloatStat(self, ctx):
etype = self.visit(ctx.expr())
if etype != BaseType.Float:
self._raise(ctx, 'println_float statement', etype)
def visitPrintstringStat(self, ctx):
etype = self.visit(ctx.expr())
if etype != BaseType.String:
self._raise(ctx, 'println_string statement', etype)
def visitAssignStat(self, ctx):
valtype = self.visit(ctx.expr())
name = ctx.ID().getText()
if name not in self._memorytypes:
self._raiseNonType(
ctx, "Undefined variable "+name)
if self._memorytypes[name] != valtype:
self._raiseMismatch(
ctx, name, self._memorytypes[name], valtype)
def visitWhileStat(self, ctx):
condtype = self.visit(ctx.expr())
if condtype != BaseType.Boolean:
self._raise(ctx, 'while condition', condtype)
self.visit(ctx.stat_block())
def visitIfStat(self, ctx):
condtype = self.visit(ctx.expr())
if condtype != BaseType.Boolean:
self._raise(ctx, 'if condition', condtype)
self.visit(ctx.then_block)
if ctx.else_block is not None:
self.visit(ctx.else_block)
| [
"giraud740@gmail.com"
] | giraud740@gmail.com |
98e405fff7ad9fa147d9ed56eddd076e542a2578 | d5e7a3f489c2f4e95204906cd07e44ef812ddd24 | /Part/湮灭之瞳.py | bac547ea7ac3107ff582ee495d66640d0abf6897 | [] | no_license | VV4yne/DNFCalculating | ee57a1901421c7def6e81a29113dec69adde69c9 | 631992a653029d0c95d23abbdba162cd9ebfa4ee | refs/heads/master | 2022-10-04T13:54:52.668409 | 2020-06-09T09:13:24 | 2020-06-09T09:13:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,195 | py | from PublicReference.base import *
class 湮灭之瞳主动技能(技能):
#只扩展了技能的三条属性,第一条技能hit默认1,2、3条hit默认为0,需要手动赋值
#如果需要继续扩展,可以在各自职业类内继承后自行扩展,同时需要重写下等效百分比函数
#固伤在填写基础及成长的时候需要注意,技能面板/独立得到的成长及数值需要*100
基础 = 0.0
成长 = 0.0
攻击次数 = 1.0
基础2 = 0.0
成长2 = 0.0
攻击次数2 = 0.0
基础3 = 0.0
成长3 = 0.0
攻击次数3 = 0.0
CD = 0.0
# Will添加
CD倍率 = 1.0
TP成长 = 0.0
TP上限 = 0
TP等级 = 0
是否主动 = 1
是否有伤害 = 1
元素之力蓄力数量 = 0
恢复 = 1.0
倍率 = 1.0
被动倍率 = 1.0
基础释放次数 = 0
演出时间 = 0
是否有护石 = 0
关联技能 = ['无']
关联技能2 = ['无']
关联技能3 = ['无']
关联技能4 = ['无']
# Will添加
冷却关联技能 = ['无']
冷却关联技能2 = ['无']
冷却关联技能3 = ['无']
def 等效百分比(self, 武器类型):
if self.等级 == 0:
return 0
else:
return int((self.攻击次数 * (self.基础 + self.成长 * self.等级) + self.攻击次数2 * (self.基础2 + self.成长2 * self.等级) + self.攻击次数3 * (
self.基础3 + self.成长3 * self.等级)) * (1 + self.TP成长 * self.TP等级) * self.倍率)
def 等效CD(self, 武器类型):
if 武器类型 == '魔杖':
return round(self.CD / self.恢复 * 1.0, 1)
if 武器类型 == '法杖':
return round(self.CD / self.恢复 * 1.1, 1)
class 湮灭之瞳被动技能(技能):
是否主动 = 0
是否有伤害 = 0
元素之力蓄力数量 = 0
关联技能 = ['所有']
# Will添加
关联技能2 = ['无']
关联技能3 = ['无']
关联技能4 = ['无']
冷却关联技能 = ['无']
冷却关联技能2 = ['无']
冷却关联技能3 = ['无']
class 湮灭之瞳技能0(湮灭之瞳被动技能):
名称 = '元素循环'
所在等级 = 30
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.00 + 0.02 * self.等级, 5)
class 湮灭之瞳技能1(湮灭之瞳被动技能):
名称 = '元素之力'
所在等级 = 20
等级上限 = 11
基础等级 = 1
关联技能 = ['无']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(0.055+0.014*self.等级,2)
class 湮灭之瞳技能2(湮灭之瞳主动技能):
名称 = '元素环绕'
所在等级 = 25
等级上限 = 20
基础等级 = 10
是否有伤害 = 0
def 属强加成(self):
if self.等级 == 0:
return 0
else:
return (6 + self.等级 * 3)
class 湮灭之瞳技能3(湮灭之瞳被动技能):
名称 = '元素融合'
所在等级 = 15
等级上限 = 11
基础等级 = 1
def 加成倍率(self, 武器类型):
return 1.0
def 属强加成(self):
if self.等级 == 0:
return 0
else:
return (37 + self.等级 * 3)
class 湮灭之瞳技能4(湮灭之瞳被动技能):
名称 = '元素爆发'
所在等级 = 48
等级上限 = 40
基础等级 = 20
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
if self.等级 <= 16:
return round(1.015 + 0.015 * self.等级, 5)
else:
return round(1.255 + 0.020 * (self.等级 - 16), 5)
class 湮灭之瞳技能5(湮灭之瞳被动技能):
名称 = '黑瞳'
所在等级 = 75
等级上限 = 40
基础等级 = 11
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.23 + 0.02 * self.等级, 5)
class 湮灭之瞳技能6(湮灭之瞳被动技能):
名称 = '卓越之力'
所在等级 = 95
等级上限 = 40
基础等级 = 4
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.18 + 0.02 * self.等级, 5)
class 湮灭之瞳技能7(湮灭之瞳被动技能):
名称 = '超卓之心'
所在等级 = 95
等级上限 = 11
基础等级 = 1
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.045 + 0.005 * self.等级, 5)
class 湮灭之瞳技能8(湮灭之瞳被动技能):
名称 = '觉醒之抉择'
所在等级 = 100
等级上限 = 40
基础等级 = 2
关联技能 = ['无']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.10 + 0.05 * self.等级, 5)
class 湮灭之瞳技能9(湮灭之瞳主动技能):
名称 = '元素炮'
所在等级 = 15
等级上限 = 11
基础等级 = 1
基础 = 490
成长 = 10
CD = 4.0
class 湮灭之瞳技能10(湮灭之瞳主动技能):
名称 = '属性变换'
所在等级 = 15
等级上限 = 60
基础等级 = 19
是否有伤害 = 1
是否主动 = 1
基础 = 195
成长 = 58.7
TP成长 = 0.08
TP上限 = 7
关联技能 = ['元素炮','魔球连射']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round((1.95 + 0.587 * self.等级 )* (1+0.08 * self.TP等级), 5)
class 湮灭之瞳技能11(湮灭之瞳主动技能):
名称 = '魔球连射'
所在等级 = 5
等级上限 = 11
基础等级 = 1
基础 = 108
成长 = 2
攻击次数 = 5
CD = 2.4
演出时间 = 1.5
class 湮灭之瞳技能12(湮灭之瞳主动技能):
名称 = '幻魔四重奏'
所在等级 = 50
等级上限 = 40
基础等级 = 12
基础 = 42510
成长 = 12850
CD = 145.0
class 湮灭之瞳技能13(湮灭之瞳主动技能):
名称 = '末日湮灭'
所在等级 = 85
等级上限 = 40
基础等级 = 5
基础 = 95595.6
成长 = 28856.4
CD = 180.0
class 湮灭之瞳技能14(湮灭之瞳主动技能):
名称 = '地炎'
所在等级 = 25
等级上限 = 60
基础等级 = 41
基础 = 1753.702
成长 = 198.297
CD = 4.0
TP成长 = 0.04
TP上限 = 7
演出时间 = 1.8
class 湮灭之瞳技能15(湮灭之瞳主动技能):
名称 = '冰晶坠'
所在等级 = 20
等级上限 = 60
基础等级 = 43
基础 = 2956.143
成长 = 333.857
CD = 6.4
TP成长 = 0.10
TP上限 = 7
演出时间 = 1.5
class 湮灭之瞳技能16(湮灭之瞳主动技能):
名称 = '雷光链'
所在等级 = 30
等级上限 = 60
基础等级 = 38
基础 = 3723.636
成长 = 420.364
CD = 9.6
TP成长 = 0.20
TP上限 = 7
演出时间 = 1.6
class 湮灭之瞳技能17(湮灭之瞳主动技能):
名称 = '暗域扩张'
所在等级 = 30
等级上限 = 60
基础等级 = 38
基础 = 5289.705
成长 = 597.295
CD = 12.0
TP成长 = 0.10
TP上限 = 7
演出时间 = 0.4
class 湮灭之瞳技能18(湮灭之瞳主动技能):
名称 = '冰晶之浴'
所在等级 = 35
等级上限 = 60
基础等级 = 36
基础 = 5459.317
成长 = 616.683
CD = 12.0
TP成长 = 0.0
TP上限 = 1
演出时间 = 4.5
def 等效CD(self, 武器类型):
if self.TP等级 == 0:
if 武器类型 == '魔杖':
return round (0.8 * self.CD * self.CD倍率 / self.恢复, 1)
if 武器类型 == '法杖':
return round (0.8 * 1.1 * self.CD * self.CD倍率 / self.恢复, 1)
else:
if 武器类型 == '魔杖':
return round (0.8 * (self.CD - 3.0) * self.CD倍率 / self.恢复, 1)
if 武器类型 == '法杖':
return round (0.8 * 1.1 * (self.CD - 3.0) * self.CD倍率 / self.恢复, 1)
class 湮灭之瞳技能19(湮灭之瞳主动技能):
名称 = '旋炎破'
所在等级 = 35
等级上限 = 60
基础等级 = 36
基础 = 6199.512
成长 = 700.488
CD = 16.0
TP成长 = 0.10
TP上限 = 7
是否有护石 = 1
演出时间 = 2.0
def 装备护石(self):
self.倍率 *= 1.22
class 湮灭之瞳技能20(湮灭之瞳主动技能):
名称 = '雷光屏障'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 6881.948
成长 = 777.052
CD = 16.0
TP成长 = 0.10
TP上限 = 7
是否有护石 = 1
演出时间 = 1.2
def 装备护石(self):
self.倍率 *= 1.23
class 湮灭之瞳技能21(湮灭之瞳主动技能):
名称 = '黑暗禁域'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 6500.105
成长 = 733.895
CD = 16.0
TP成长 = 0.10
TP上限 = 7
演出时间 = 4.0
class 湮灭之瞳技能22(湮灭之瞳主动技能):
名称 = '元素轰炸'
所在等级 = 45
等级上限 = 60
基础等级 = 31
基础 = 16196.139
成长 = 1833.861
CD = 32
TP成长 = 0.10
TP上限 = 7
是否有护石 = 1
演出时间 = 2.0
def 装备护石(self):
self.倍率 *= 1.23
class 湮灭之瞳技能23(湮灭之瞳主动技能):
名称 = '元素浓缩球'
所在等级 = 60
等级上限 = 40
基础等级 = 23
基础 = 14117.087
成长 = 1593.913
CD = 24
TP成长 = 0.10
TP上限 = 7
是否有护石 = 1
演出时间 = 1.0
def 装备护石(self):
self.倍率 *= 1.26
class 湮灭之瞳技能24(湮灭之瞳主动技能):
名称 = '元素幻灭'
所在等级 = 70
等级上限 = 40
基础等级 = 18
基础 = 22054.889
成长 = 2490.111
CD = 40.0
TP成长 = 0.10
TP上限 = 7
是否有护石 = 1
演出时间 = 1.2
def 装备护石(self):
self.倍率 *= 1.23
class 湮灭之瞳技能25(湮灭之瞳主动技能):
名称 = '元素禁域'
所在等级 = 75
等级上限 = 40
基础等级 = 16
基础 = 36737.1875
成长 = 4147.8125
CD = 32.0
演出时间 = 0.4
class 湮灭之瞳技能26(湮灭之瞳主动技能):
名称 = '聚能魔炮'
所在等级 = 80
等级上限 = 40
基础等级 = 13
基础 = 45659.769
成长 = 5155.231
CD = 36.0
演出时间 = 1.5
湮灭之瞳技能列表 = []
i = 0
while i >= 0:
try:
exec('湮灭之瞳技能列表.append(湮灭之瞳技能' + str(i) + '())')
i += 1
except:
i = -1
湮灭之瞳技能序号 = dict()
for i in range(len(湮灭之瞳技能列表)):
湮灭之瞳技能序号[湮灭之瞳技能列表[i].名称] = i
湮灭之瞳一觉序号 = 0
湮灭之瞳二觉序号 = 0
湮灭之瞳三觉序号 = 0
for i in 湮灭之瞳技能列表:
if i.所在等级 == 50:
湮灭之瞳一觉序号 = 湮灭之瞳技能序号[i.名称]
if i.所在等级 == 85:
湮灭之瞳二觉序号 = 湮灭之瞳技能序号[i.名称]
if i.所在等级 == 100:
湮灭之瞳三觉序号 = 湮灭之瞳技能序号[i.名称]
湮灭之瞳护石选项 = ['无']
for i in 湮灭之瞳技能列表:
if i.是否有伤害 == 1 and i.是否有护石 == 1:
湮灭之瞳护石选项.append(i.名称)
湮灭之瞳符文选项 = ['无']
for i in 湮灭之瞳技能列表:
if i.所在等级 >= 20 and i.所在等级 <= 80 and i.所在等级 != 50 and i.是否有伤害 == 1:
湮灭之瞳符文选项.append(i.名称)
class 湮灭之瞳角色属性(角色属性):
职业名称 = '湮灭之瞳'
武器选项 = ['魔杖', '法杖']
# '物理百分比','魔法百分比','物理固伤','魔法固伤'
伤害类型选择 = ['魔法百分比']
# 默认
伤害类型 = '魔法百分比'
防具类型 = '布甲'
防具精通属性 = ['智力']
主BUFF = 2.07
# 基础属性(含唤醒)
基础力量 = 774
基础智力 = 976
# 适用系统奶加成
力量 = 基础力量
智力 = 基础智力
# 人物基础 + 唤醒
物理攻击力 = 65.0
魔法攻击力 = 65.0
独立攻击力 = 1045.0
火属性强化 = 13
冰属性强化 = 13
光属性强化 = 13
暗属性强化 = 13
def __init__(self):
self.技能栏 = copy.deepcopy(湮灭之瞳技能列表)
self.技能序号 = copy.deepcopy(湮灭之瞳技能序号)
def 属性强化加成(self):
属性强化值 = 0
for i in self.技能栏:
if i.名称 != '元素环绕':
属性强化值 += 0
else:
属性强化值 += i.属强加成()
return (属性强化值)
def 伤害指数计算(self):
self.冰属性强化 += self.技能栏[self.技能序号['元素环绕']].属强加成()
self.光属性强化 += self.技能栏[self.技能序号['元素环绕']].属强加成()
self.火属性强化 += self.技能栏[self.技能序号['元素环绕']].属强加成()
self.暗属性强化 += self.技能栏[self.技能序号['元素环绕']].属强加成()
self.冰属性强化 += self.技能栏[self.技能序号['元素融合']].属强加成()
self.光属性强化 += self.技能栏[self.技能序号['元素融合']].属强加成()
self.火属性强化 += self.技能栏[self.技能序号['元素融合']].属强加成()
self.暗属性强化 += self.技能栏[self.技能序号['元素融合']].属强加成()
基准倍率 = 1.5 * self.主BUFF * (1 - 443215 / (443215 + 20000))
面板 = (self.面板智力()/250+1) * (self.魔法攻击力 + self.进图魔法攻击力) * (1 + self.百分比三攻)
属性倍率=1.05+0.0045*max(self.火属性强化,self.冰属性强化,self.光属性强化,self.暗属性强化)
增伤倍率=1+self.伤害增加
增伤倍率*=1+self.暴击伤害
增伤倍率*=1+self.最终伤害
增伤倍率*=self.技能攻击力
增伤倍率*=1+self.持续伤害*(1-0.1*self.持续伤害计算比例)
增伤倍率*=1+self.附加伤害+self.属性附加*属性倍率
self.伤害指数=面板*属性倍率*增伤倍率*基准倍率/100
def 被动倍率计算(self):
for i in self.技能栏:
if i.关联技能 != ['无']:
if i.关联技能 == ['所有']:
for j in self.技能栏:
if j.是否有伤害 == 1:
j.被动倍率 *= i.加成倍率(self.武器类型)
else :
for k in i.关联技能:
self.技能栏[self.技能序号[k]].被动倍率 *= i.加成倍率(self.武器类型)
# Will添加
if i.关联技能2 != ['无']:
if i.关联技能2 == ['所有']:
for j in self.技能栏:
if j.是否有伤害 == 1:
j.被动倍率 *= i.加成倍率2(self.武器类型)
else :
for k in i.关联技能2:
self.技能栏[self.技能序号[k]].被动倍率 *= i.加成倍率2(self.武器类型)
# Will添加
if i.关联技能3 != ['无']:
if i.关联技能3 == ['所有']:
for j in self.技能栏:
if j.是否有伤害 == 1:
j.被动倍率 *= i.加成倍率3(self.武器类型)
else :
for k in i.关联技能3:
self.技能栏[self.技能序号[k]].被动倍率 *= i.加成倍率3(self.武器类型)
def 伤害计算(self, x=0):
self.所有属性强化(self.进图属强)
# Will添加
self.CD倍率计算()
self.加算冷却计算()
self.被动倍率计算()
self.伤害指数计算()
技能释放次数 = []
技能单次伤害 = []
技能总伤害 = []
# 技能释放次数计算
for i in self.技能栏:
if i.是否有伤害 == 1:
if self.次数输入[self.技能序号[i.名称]] == '/CD':
技能释放次数.append(int((self.时间输入 - i.演出时间) / i.等效CD(self.武器类型) + 1 + i.基础释放次数))
else:
技能释放次数.append(int(self.次数输入[self.技能序号[i.名称]]) + i.基础释放次数)
else:
技能释放次数.append(0)
for i in self.技能栏:
if i.关联技能4 != ['无']:
for j in i.关联技能4:
i.元素之力蓄力数量 += 技能释放次数[self.技能序号[j]]
# 技能单次伤害计算
for i in self.技能栏:
if i.是否主动 == 1 and i.名称 != '元素炮' :
技能单次伤害.append(i.等效百分比(self.武器类型) * self.伤害指数 * i.被动倍率)
elif i.名称 == '元素炮':
技能单次伤害.append(i.等效百分比(self.武器类型) * self.伤害指数 * i.被动倍率*
self.技能栏[self.技能序号['元素循环']].加成倍率(self.武器类型)*
self.技能栏[self.技能序号['超卓之心']].加成倍率(self.武器类型)*
self.技能栏[self.技能序号['卓越之力']].加成倍率(self.武器类型)*
(1.0 + self.技能栏[self.技能序号['元素之力']].加成倍率(self.武器类型)*5))
else:
技能单次伤害.append(0)
# 单技能伤害合计
for i in self.技能栏:
if i.是否主动 == 1 and 技能释放次数[self.技能序号[i.名称]] != 0:
技能总伤害.append(技能单次伤害[self.技能序号[i.名称]] * 技能释放次数[self.技能序号[i.名称]] * (
1 + self.白兔子技能 * 0.20 + self.年宠技能 * 0.10 * self.宠物次数[self.技能序号[i.名称]] / 技能释放次数[
self.技能序号[i.名称]] + self.斗神之吼秘药 * 0.12))
else:
技能总伤害.append(0)
总伤害 = 0
for i in self.技能栏:
总伤害 += 技能总伤害[self.技能序号[i.名称]]
if x == 0:
return 总伤害
if x == 1:
详细数据 = []
for i in range(0, len(self.技能栏)):
详细数据.append(技能释放次数[i])
详细数据.append(技能总伤害[i])
if 技能释放次数[i] != 0:
详细数据.append(技能总伤害[i] / 技能释放次数[i])
else:
详细数据.append(0)
if 总伤害 != 0:
详细数据.append(技能总伤害[i] / 总伤害 * 100)
else:
详细数据.append(0)
return 详细数据
class 湮灭之瞳(角色窗口):
def 窗口属性输入(self):
self.初始属性 = 湮灭之瞳角色属性()
self.角色属性A = 湮灭之瞳角色属性()
self.角色属性B = 湮灭之瞳角色属性()
self.一觉序号 = 湮灭之瞳一觉序号
self.二觉序号 = 湮灭之瞳二觉序号
self.三觉序号 = 湮灭之瞳三觉序号
self.护石选项 = copy.deepcopy(湮灭之瞳护石选项)
self.符文选项 = copy.deepcopy(湮灭之瞳符文选项)
| [
"wxh_email@yeah.net"
] | wxh_email@yeah.net |
a10d282f4257960d5550150c8ec7340eaa01d943 | b165f9573d1b534a046344b1e933c692b9983bdd | /platform/software/test/IntraProcessJob_SingleEdge.py | 8d55d0f744999f56aa872398b4dcf970f13a3679 | [
"Apache-2.0"
] | permissive | oika/connect | 18a4aa3dccf702f0e90717a864564d225b9a85e7 | 2486b97256d7adcd130f90d5c3e665d90ef1a39d | refs/heads/master | 2020-03-22T09:08:13.614323 | 2018-07-10T03:03:54 | 2018-07-10T03:03:54 | 139,817,722 | 0 | 0 | Apache-2.0 | 2018-07-05T08:18:05 | 2018-07-05T08:18:05 | null | UTF-8 | Python | false | false | 721 | py | # -*- coding: utf-8 -*-
from OperatorInterface import OperatorInterface
from BaseOperator import BaseOperator
from JobInterface import JobInterface
from BaseJob import BaseJob
class OperatorA(OperatorInterface, BaseOperator):
def prepare(self):
pass
def run(self):
pass
def pause(self):
pass
def cancel(self):
pass
class UserJob(JobInterface, BaseJob):
def define_dataflow(self):
op1 = OperatorA('1')
op2 = OperatorA('2')
self.df.add_node(op1)
self.df.add_node(op2)
self.df.add_edge(op1, 0, op2, 0)
ps1 = self.create_thread_local_group(op1, op2)
self.create_device_local_group('sv0', 'CPU', ps1)
| [
"36325622+rasshai@users.noreply.github.com"
] | 36325622+rasshai@users.noreply.github.com |
44faaac61986ba8141ba558cb7832f673fd17880 | 856af6304cf4fc87df513a3b1d7ce65dc3d626e1 | /FuzzySearch.py | 8fb762de3cf06f59c85b94c4d96f5532947bf244 | [] | no_license | laxman1129/smart-chef-service | 721c08aa2a012f23238ceb2ce153c01b0f581987 | 02bf348a5bd17d7e7d05665a28c790e0e3d703c5 | refs/heads/master | 2020-09-07T00:38:48.768256 | 2019-11-13T16:36:42 | 2019-11-13T16:36:42 | 220,604,797 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | import pandas as pd
import spacy
from fuzzywuzzy import fuzz
import re
from nltk.corpus import stopwords
import collections
import pickle
import os.path
if os.path.exists('data/datafile.pickle'):
infile = open('data/datafile.pickle', 'rb')
data = pickle.load(infile)
infile.close()
print('read')
else:
data = pd.read_json('data/all_meals.json')
data = data.transpose()
outfile = open('data/datafile.pickle', 'wb')
pickle.dump(data, outfile)
outfile.close()
print('write')
parser = spacy.load("en_core_web_sm")
indices = data['id']
titles = data['title'].fillna('')
ingredients = data['ingredients'][:].fillna('')
category = data['category'].fillna('')
area = data['area'].fillna('')
tags = data['tags'].fillna('')
keywords = []
def populate_keywords():
for i in range(0, len(titles)):
tokens = (list(titles)[i]) + ' ' + (' '.join((list(ingredients)[i]))) + ' ' + list(category)[i] + ' ' + \
list(area)[
i] + ' ' + list(tags)[i]
keywords.append(tokens)
populate_keywords()
def pre_process(text):
if not text:
return ''
# text = text.lower()
# remove special characters and digits
text = re.sub("(\\d|\\W)+", " ", text)
word_list = text.split()
filtered_words = [word for word in word_list if word not in stopwords.words('english')]
text = ' '.join(filtered_words)
return text.strip()
def term_tokenizer(terms):
terms = pre_process(terms)
terms = parser(terms)
terms = [word.lemma_.lower().strip() for word in terms]
return ' '.join(terms)
score_index_dict = collections.defaultdict(list)
def get_ratio(search, terms):
for item in terms:
# print(item, term_tokenizer(item))
ratio = fuzz.token_set_ratio(search, term_tokenizer(item))
# print(terms.index(item), ratio)
# score_index_dict.setdefault(ratio, [])
# score_index_dict[ratio].append(terms.index(item))
score_index_dict[ratio].append(terms.index(item))
def get_closest_match(search, terms=keywords, count=10):
get_ratio(search, terms)
sorted_keys = list(score_index_dict.keys())
sorted_keys.sort()
sorted_keys.reverse()
search_indices = []
i = 0;
while len(search_indices) <= count:
search_indices.extend(score_index_dict[sorted_keys[i]])
i = i + 1
return search_indices[0:count+1]
def get_titles(items):
return [list(titles)[x] for x in items]
def get_id(items):
return [list(indices)[list(titles).index(x)] for x in items]
| [
"laxman1129@gmail.com"
] | laxman1129@gmail.com |
02ceeabd8f4a616fd11376b96a22c5c3cac3da1b | 93f0664db6ebf1e4806a62c1b750372d82c7e72c | /Problem14.py | b877bc1cb0e544ed69c8d8f681e7a0966ba4e438 | [] | no_license | amoghkapalli/ProjectEuler | 0b3b5eafc4609f1a083f5db83d024ee5bcf0a3e6 | 282ec6270e6be270bb22f73217bee0bb4a9fcd77 | refs/heads/master | 2022-04-27T13:46:54.133751 | 2020-05-04T21:01:40 | 2020-05-04T21:01:40 | 258,296,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | '''The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?'''
import time
start_time = time.time()
def collatz_num_gen (n):
chainNumber = 1
changing_n = n
while changing_n != 1:
if changing_n % 2 == 0:
changing_n = changing_n/2
chainNumber += 1
else:
changing_n = (3*changing_n) + 1
chainNumber += 1
return [chainNumber, n]
list_of_collatz = []
for i in range(2, 1000000):
list_of_collatz.append(collatz_num_gen(i))
sortedList = sorted(list_of_collatz, reverse=True)
print(sortedList[:1])
print("--- %s seconds ---" % (time.time() - start_time)) | [
"noreply@github.com"
] | amoghkapalli.noreply@github.com |
8115c129a9e9022ff6720b4ed9034694cf96de0c | ab342e0231bd376154937711610fe49bea9d7a51 | /sigpy/mri/rf/linop.py | bb8b71d6141161a7b427e952eab53411ff5f490c | [
"BSD-3-Clause"
] | permissive | Phillistan16/sigpy | cfcf3c053b4d7c82e8841e164aba81632a8215f6 | 4f83c7b7d4560f9f6fc169de301011f541a9be68 | refs/heads/master | 2022-11-26T19:37:39.878810 | 2020-07-17T00:13:55 | 2020-07-17T00:13:55 | 279,999,088 | 0 | 0 | BSD-3-Clause | 2020-07-15T22:58:38 | 2020-07-15T22:58:38 | null | UTF-8 | Python | false | false | 4,563 | py | # -*- coding: utf-8 -*-
"""MRI pulse-design-specific linear operators.
"""
import sigpy as sp
from sigpy import backend
def PtxSpatialExplicit(sens, coord, dt, img_shape, b0=None, ret_array=False):
"""Explicit spatial-domain pulse design linear operator.
Linear operator relates rf pulses to desired magnetization.
Equivalent matrix has dimensions [Ns Nt].
Args:
sens (array): sensitivity maps. [nc dim dim]
coord (None or array): coordinates. [nt 2]
dt (float): hardware sampling dt.
img_shape (None or tuple): image shape.
b0 (array): 2D array, B0 inhomogeneity map.
ret_array (bool): if true, return explicit numpy array.
Else return linop.
Returns:
SigPy linop with A.repr_string 'pTx spatial explicit', or numpy array
if selected with 'ret_array'
References:
Grissom, W., Yip, C., Zhang, Z., Stenger, V. A., Fessler, J. A.
& Noll, D. C.(2006).
Spatial Domain Method for the Design of RF Pulses in Multicoil
Parallel Excitation. Magnetic resonance in medicine, 56, 620-629.
"""
three_d = False
if len(img_shape) >= 3:
three_d = True
device = backend.get_device(sens)
xp = device.xp
with device:
nc = sens.shape[0]
dur = dt * coord.shape[0] # duration of pulse, in s
# create time vector
t = xp.expand_dims(xp.linspace(0, dur, coord.shape[0]), axis=1)
# row-major order
# x L to R, y T to B
x_ = xp.linspace(-img_shape[0] / 2,
img_shape[0] - img_shape[0] / 2, img_shape[0])
y_ = xp.linspace(img_shape[1] / 2,
-(img_shape[1] - img_shape[1] / 2), img_shape[1])
if three_d:
z_ = xp.linspace(-img_shape[2] / 2,
img_shape[2] - img_shape[2] / 2, img_shape[2])
x, y, z = xp.meshgrid(x_, y_, z_, indexing='ij')
else:
x, y = xp.meshgrid(x_, y_, indexing='ij')
# create explicit Ns * Nt system matrix, for 3d or 2d problem
if three_d:
if b0 is None:
AExplicit = xp.exp(1j * (xp.outer(x.flatten(), coord[:, 0]) +
xp.outer(y.flatten(), coord[:, 1]) +
xp.outer(z.flatten(), coord[:, 2])))
else:
AExplicit = xp.exp(1j * 2 * xp.pi * xp.transpose(b0.flatten()
* (t - dur)) +
1j * (xp.outer(x.flatten(), coord[:, 0])
+ xp.outer(y.flatten(), coord[:, 1])
+ xp.outer(z.flatten(), coord[:, 2])))
else:
if b0 is None:
AExplicit = xp.exp(1j * (xp.outer(x.flatten(), coord[:, 0]) +
xp.outer(y.flatten(), coord[:, 1])))
else:
AExplicit = xp.exp(1j * 2 * xp.pi * xp.transpose(b0.flatten()
* (t - dur)) +
1j * (xp.outer(x.flatten(), coord[:, 0])
+ xp.outer(y.flatten(),
coord[:, 1])))
# add sensitivities to system matrix
AFullExplicit = xp.empty(AExplicit.shape)
for ii in range(nc):
if three_d:
tmp = xp.squeeze(sens[ii, :, :, :]).flatten()
else:
tmp = sens[ii, :, :].flatten()
D = xp.transpose(xp.tile(tmp, [coord.shape[0], 1]))
AFullExplicit = xp.concatenate((AFullExplicit, D * AExplicit),
axis=1)
# remove 1st empty AExplicit entries
AFullExplicit = AFullExplicit[:, coord.shape[0]:]
A = sp.linop.MatMul((coord.shape[0] * nc, 1), AFullExplicit)
# Finally, adjustment of input/output dimensions to be consistent with
# the existing Sense linop operator. [nc x nt] in, [dim x dim] out
Ro = sp.linop.Reshape(ishape=A.oshape, oshape=sens.shape[1:])
Ri = sp.linop.Reshape(ishape=(nc, coord.shape[0]),
oshape=(coord.shape[0] * nc, 1))
A = Ro * A * Ri
A.repr_str = 'pTx spatial explicit'
# output a sigpy linop or a numpy array
if ret_array:
return A.linops[1].mat
else:
return A
| [
"jon.bach.martin@gmail.com"
] | jon.bach.martin@gmail.com |
aa8b0268bfacab3e0387e1bfd6e62f11f92979a2 | 7626938b9b6eb1f899b27fce05fb3de0b34a5291 | /list_operations.py | 8e5e3c5fbb0648c6f0bcb54e892df5c0f85bb739 | [] | no_license | kingmohanreddy/TYR | 8757b92bdde3bdc73e765291889f1e78c7ed8d8d | 591823c5c10a11cca9a50465881e4f97224ab920 | refs/heads/master | 2022-11-15T12:38:48.368912 | 2020-07-11T07:22:56 | 2020-07-11T07:22:56 | 278,798,457 | 0 | 0 | null | 2020-07-11T07:22:57 | 2020-07-11T05:54:49 | Python | UTF-8 | Python | false | false | 374 | py | #creating list operations
lst = [2,5,6,7,8,25,64]
#printing lst
print(lst)
#using append
lst.append(56)
print(lst)
#using copy
lst.copy()
print(lst)
#using clear
lst.clear()
print(lst)
#creating list operations
lst = [2,5,6,7,8,25,64]
#printing lst
print(lst)
#using append
lst.append(56)
print(lst)
#using copy
lst.copy()
print(lst)
#using clear
lst.clear()
print(lst)
| [
"kingmohanreddy143@gmail.com"
] | kingmohanreddy143@gmail.com |
b85ee7a2f13106e325a6f6c1b01173397ffd6858 | 88e410b530418e906afecf3d643637f097218d2c | /scripts/lists.py | dec9a2c894d305fa9112a7de0b1d3203fb3a8a81 | [] | no_license | felsewhere1/hello_world | f778efe8f0180a9ea3684f6ab6627ebb0bec90bc | a3efd8c95a726e5b14267ef4392f89f79b091a73 | refs/heads/master | 2020-04-16T05:40:53.244642 | 2019-01-26T15:11:22 | 2019-01-26T15:11:22 | 165,315,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | colors = ["red", "green", "blue", "yello"]
print(colors[0])
print(colors[-1])
print(colors)
for i in colors:
print(i)
colors[3] = "yellow"
for i in colors:
print(i)
print(colors[1:2])
print(colors[1:])
print(colors[:4])
colors.reverse() #does not return!
print(colors)
colors.sort()
print(colors)
leapyear = []
for year in range (1900, 1940):
if (year % 4 == 0 and year % 100 !=0 ) or (year % 400 == 0):
leapyear.append(year)
print(leapyear)
#list comprehension - expression and loop
leapyear2 = [x for x in range(1900, 1940)]
print (leapyear2)
#list comprehension - expression and loop with condition
leapyear2 = [x for x in range(1900, 1940) if (x % 4 == 0 and x % 100 !=0 ) or (x % 400 == 0)]
print (leapyear2)
| [
"felsewhere1@gmail.com"
] | felsewhere1@gmail.com |
30423417308c1cea6ca75732557706ea8b6ed083 | 331ec8de30dbeff12ce02df3128fcdac4c58cdae | /passwordGenerator.py | 427299202973e7a549198d35ad545083e7c78a7d | [] | no_license | Dylan-Morrissey/Python | f9ce8eda28e56fc0c6fc68321dad97dab236e810 | 58c306bc0bfe39566b87cc9c575dde3ebd0e0bc0 | refs/heads/master | 2022-02-12T23:10:15.369208 | 2022-01-29T22:57:16 | 2022-01-29T22:57:16 | 173,100,773 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | # Script Name : passwordGenerator.py
# Author : Dylan Morrissey
# Created : 11th March 2019
# Description : Script which is used to randomly generate a password.
import random
def passwordGen(passlen, option):
password = ''
pwchars = [['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'],
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'],
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ],
['!', '"', '#', '$', '&', '%', "'", ')', '(', '*', '+', ',', '-', '.', '/', ';', ':', '<', '=', '>', '?', '@', '[', ']', '{', '}', '_', '`', '|', '\\', '^', '~']]
for i in range(passlen):
if option == 1:
ran = random.choice(pwchars[:-3])
ranchar = random.choice(ran)
password = password + ranchar
elif option == 2:
ran = random.choice(pwchars[1:-2])
ranchar = random.choice(ran)
password = password + ranchar
elif option == 3:
ran = random.choice(pwchars[:-2])
ranchar = random.choice(ran)
password = password + ranchar
elif option == 4:
ran = random.choice(pwchars[:-1])
ranchar = random.choice(ran)
password = password + ranchar
elif option == 5:
ran = random.choice(pwchars)
ranchar = random.choice(ran)
password = password + ranchar
print "Your password is: %s\n" % password
raw_input("Press any key to continue.")
print '-' * 50
print "Welcome to the password generator."
print '-' * 50
def menu():
try:
print "1) Generate a password with only uppercase characters.\n2) Generate a password with only lowercase characters.\n3) Generate a password with uppercase and lowercase characters.\n4) Generate a password with uppercase, lowercase and numbers.\n5) Generate a password with uppercase, lowercase, numbers and special characters."
option = int(raw_input("Please select one of the options: "))
passlen = int(raw_input("Please enter how long you want the password to be? 12-24 : "))
passwordGen(passlen, option)
menu()
except (ValueError, IndexError):
print "Error with input generating password of lenght 24 with upper lower number and special characters!"
passlen = 24
option = 5
passwordGen(passlen, option)
menu()
menu()
| [
"noreply@github.com"
] | Dylan-Morrissey.noreply@github.com |
551b428503874c903e41834e1c62952b6faaeea5 | 8baa6d8e35a17f331345d9f314cdb2787653d38a | /src/exojax/spec/exomol.py | b13173a276fb13d010cf9c32fe7d85bedb6157c2 | [
"MIT"
] | permissive | bmorris3/exojax | 2fb1dae486a1d4d7a91ee8e9fdd1c9e616fb1b3f | 67d1b6c868d69892d4bbf9e620ed05e432cfe61f | refs/heads/master | 2023-09-04T20:12:32.817699 | 2021-06-12T06:14:00 | 2021-06-12T06:14:00 | 379,588,979 | 0 | 0 | MIT | 2021-06-23T12:03:57 | 2021-06-23T12:03:57 | null | UTF-8 | Python | false | false | 1,356 | py | import numpy as np
def Sij0(A,g,nu_lines,elower,QTref):
"""Reference Line Strength in Tref=296K, S0.
Note:
Tref=296K
Args:
A: Einstein coefficient (s-1)
g: the upper state statistical weight
nu_lines: line center wavenumber (cm-1)
elower: elower
QTref: partition function Q(Tref)
Mmol: molecular mass (normalized by m_u)
Returns:
Sij(T): Line strength (cm)
"""
ccgs=29979245800.0
hcperk=1.4387773538277202 #hc/kB in cgs
Tref=296.0
S0=-A*g*np.exp(-hcperk*elower/Tref)*np.expm1(-hcperk*nu_lines/Tref)\
/(8.0*np.pi*ccgs*nu_lines**2*QTref)
return S0
def gamma_exomol(P, T, n_air, alpha_ref):
"""gamma factor by a pressure broadening
Args:
P: pressure (bar)
T: temperature (K)
n_air: coefficient of the temperature dependence of the air-broadened halfwidth
alpha_ref: broadening parameter
Returns:
gamma: pressure gamma factor (cm-1)
"""
Tref=296.0 #reference tempearture (K)
gamma=alpha_ref*P*(Tref/T)**n_air
return gamma
def gamma_natural(A):
"""gamma factor by natural broadning
1/(4 pi c) = 2.6544188e-12 (cm-1 s)
Args:
A: Einstein A-factor (1/s)
Returns:
gamma_natural: natural width (cm-1)
"""
return 2.6544188e-12*A
| [
"divrot@gmail.com"
] | divrot@gmail.com |
59e12231b41913126b4620f9bbcd71ae543fabf5 | 8779349e77ff0dacbd48d297f8a3f0a164e18ba4 | /user.py | 840cf55c32af5ee95848c78fc299f4838e510e2b | [] | no_license | YashDRaja/LiveTextly | 67c247ac15dc186b40354eb711726b394b17e39b | 99c6b106aeb99e7b0ee297137e6d518b560bb0c5 | refs/heads/master | 2023-04-20T11:22:05.967899 | 2021-05-11T19:39:13 | 2021-05-11T19:39:13 | 230,683,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | class User:
def __init__(self, id):
self.id = id
self.password = None
self.receiver = None
self.received = (None,None)
self.history = []
self.message = (None,self.receiver)
self.sent = []
def receive(self):
if self.message[0] != None:
self.sent.append(self.message)
self.message = (None,self.receiver)
if self.received[0] != None:
message = self.received
self.history.append(self.received)
self.received = (None,None)
return message
def send(self,message):
self.message = (message,self.receiver)
def change(self,id):
self.receiver = id
| [
"56655681+YashDRaja@users.noreply.github.com"
] | 56655681+YashDRaja@users.noreply.github.com |
28c3cb75bdc891a7ed08a26cd380d35ccdfe997b | 3b6fd1757e3f382d7adaa0d0d0d8a371dbfe7b26 | /Reconhecimento/app/Controller/Registro.py | 94c4c50ac4ce8a2ed57855e94949d2b1bf722c36 | [] | no_license | viniciusleal34/Api_Chamada | 15d0e786d33db465ff87ce45ea9f938b7daf5add | 174c5b9bc49f38e99d01797d45e9092a1233fd5e | refs/heads/master | 2021-04-23T03:40:50.394220 | 2020-03-31T09:26:34 | 2020-03-31T09:26:34 | 249,895,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py |
from Reconhecimento.app.database import db
class Registro:
def __init__(self,codigo, date, hour, minute):
self.codigo = codigo
self.date = date
self.hour = hour
self.minute = minute
def salvar(self):
registro = db.registro
registro.insert_one({
'codigo': self.codigo,
'date': str(self.date),
'hour': int(self.hour),
'minute': int(self.minute),
}) | [
"vinicius.nascimento19@fatec.sp.gov.br"
] | vinicius.nascimento19@fatec.sp.gov.br |
1ec7f3f8cafa6a7767d5f64a891aad1645c75fe8 | 4947a81db1d815cf4f442bace643968de94e5afc | /grayscaleScript.py | 02467faf091308e2fc25459749c599a7a4c3bd63 | [] | no_license | Madhusakth/DM_pro | 7a171569baa010713a5988c5f34c2e38af452754 | 217f71ccd4f59e2a790dba817033a51388114023 | refs/heads/master | 2021-04-15T12:09:14.925624 | 2018-05-10T19:28:01 | 2018-05-10T19:28:01 | 126,866,801 | 0 | 0 | null | 2018-05-10T19:28:02 | 2018-03-26T17:44:48 | Jupyter Notebook | UTF-8 | Python | false | false | 2,203 | py | #!/usr/bin/python
# Note to Kagglers: This script will not run directly in Kaggle kernels. You
# need to download it and run it on your local machine.
# Downloads images from the Google Landmarks dataset using multiple threads.
# Images that already exist will not be downloaded again, so the script can
# resume a partially completed download. All images will be saved in the JPG
# format with 90% compression quality.
import sys, os, multiprocessing, urllib.request, csv
from PIL import Image
from io import BytesIO
def ParseData(data_file):
csvfile = open(data_file, 'r')
csvreader = csv.reader(csvfile)
key_url_list = [line[:2] for line in csvreader]
return key_url_list[1:] # Chop off header
def DownloadImage(key_url):
out_dir = sys.argv[2]
(key, url) = key_url
filename = os.path.join(out_dir, '%s.jpg' % key)
if os.path.exists(filename):
print('Image %s already exists. Skipping download.' % filename)
return
try:
response = urllib.request.urlopen(url)
image_data = response.read()
except Exception as e:
print(e)
print('Warning: Could not download image %s from %s' % (key, url))
return
try:
pil_image_rgb = Image.open(BytesIO(image_data)).convert('L')
except Exception as e:
print(e)
print('Warning: Failed to parse image %s' % key)
return
"""
try:
pil_image_rgb = pil_image.convert('RGB')
except Exception as e:
print(e)
print('Warning: Failed to convert image %s to RGB' % key)
return
"""
try:
pil_image_rgb = pil_image_rgb.resize((256, 256), Image.ANTIALIAS)
pil_image_rgb.save(filename, format='JPEG', quality=70)
except Exception as e:
print(e)
print('Warning: Failed to save image %s' % filename)
return
def Run():
if len(sys.argv) != 3:
print('Syntax: %s <data_file.csv> <output_dir/>' % sys.argv[0])
sys.exit(0)
(data_file, out_dir) = sys.argv[1:]
if not os.path.exists(out_dir):
os.mkdir(out_dir)
key_url_list = ParseData(data_file)
pool = multiprocessing.Pool(processes=50)
pool.map(DownloadImage, key_url_list)
if __name__ == '__main__':
Run()
| [
"jcai@DESKTOP-QUBSOHM.localdomain"
] | jcai@DESKTOP-QUBSOHM.localdomain |
ff163d13f70faa09c13751bc35eb50971b507a00 | 7167dbee22b6c166cb146ba0c825fd2841519c09 | /2019/7/main.py | 8a7681a2002559d4770cfd8b5847b26a29a79ac0 | [] | no_license | nellamad/AdventOfCode | 3fe41d1d5e0f181c6e7afd474d3ffc42e35b9fee | 6cfbc72d70e186c7e61893f8f0896d090aad9936 | refs/heads/master | 2020-09-25T16:00:57.108963 | 2019-12-08T07:56:49 | 2019-12-08T07:56:49 | 226,039,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,647 | py | from sys import maxsize
from itertools import permutations
INPUT_PATH = "input.txt"
op_to_param_length = {
1: 3,
2: 3,
3: 1,
4: 1,
5: 2,
6: 2,
7: 3,
8: 3,
99: 0
}
def get_program():
with open(INPUT_PATH) as fp:
return [int(x) for x in fp.readline().split(',')]
def parse_instruction(p, i):
def read_params(p, i, num, modes):
def get_param_value(p, i, mode):
if mode == '0':
return int(p[p[i]])
elif mode == '1':
return int(p[i])
else:
print('Unrecognized parameter mode: {0}'.format(mode))
params = []
for offset in range(1, num + 1):
if offset > len(modes):
mode = '0'
else:
mode = modes[-offset]
params.append(get_param_value(p, i + offset, mode))
return params
modes_ops = str(p[i])
modes, op = modes_ops[:-2], int(modes_ops[-2:])
assert op in op_to_param_length, "unrecognized op: {0}".format(op)
return op, read_params(p, i, op_to_param_length[op], modes)
def resume_program(p, i, inputs):
def increment_instruction(i, params):
return i + len(params) + 1
# print("running with inputs: {0}".format(inputs))
while i < len(p):
op, params = parse_instruction(p, i)
if op == 1:
p[p[i + 3]] = params[0] + params[1]
elif op == 2:
p[p[i + 3]] = params[0] * params[1]
elif op == 3:
p[p[i + 1]] = inputs.pop()
elif op == 4:
return increment_instruction(i, params), p[p[i + 1]]
elif op == 5:
if params[0] != 0:
i = params[1]
continue
elif op == 6:
if params[0] == 0:
i = params[1]
continue
elif op == 7:
p[p[i + 3]] = int(params[0] < params[1])
elif op == 8:
p[p[i + 3]] = int(params[0] == params[1])
elif op == 99:
return None, None
else:
print("Invalid op: {0}".format(op))
i = increment_instruction(i, params)
class Amplifier:
def __init__(self, i, program, phase):
self.id = chr(ord('A') + i)
self.program = program
self.inputs = [phase]
self.instruction_pointer = 0
def run(self, inputs):
assert len(self.inputs) == 0 or self.program == get_program(), "Running wrong version of program"
self.instruction_pointer, output = resume_program(self.program, self.instruction_pointer, inputs + self.inputs)
self.inputs = []
return output
def part_one():
program = get_program()
max_output = -maxsize
for phase in permutations("01234"):
input_signal = 0
amplifiers = [Amplifier(i, program.copy(), int(phase[i])) for i in range(5)]
for a in amplifiers:
input_signal = a.run([input_signal])
max_output = max(max_output, input_signal)
return max_output
def part_two():
program = get_program()
max_output = -maxsize
for phase in permutations("56789"):
input_signal = 0
amplifiers = [Amplifier(i, program.copy(), int(phase[i])) for i in range(5)]
while input_signal is not None:
for a in amplifiers:
input_signal = a.run([input_signal])
if input_signal is not None:
max_output = max(max_output, input_signal)
return max_output
if __name__ == '__main__':
print("Part one answer: {0}".format(part_one()))
print("Part two answer: {0}".format(part_two()))
| [
"allenqdam@gmail.com"
] | allenqdam@gmail.com |
1e56f83eac27b3db5ac605e0618a55236e8a087b | c048599e7673138616019f8be5d7b0c3022cd72d | /AWS Lambda/TelegramBotLambda.py | 77a4a935f5130643edf558a4e3c5d371c0070d86 | [] | no_license | cococoolbean/ET0731-IoT-Security-Safe-Pacerl- | e62a3646c110244ed5f19b43d5a435879cc48843 | e863fcd43ef6b83b02572cc42c4aba929dcf7fa2 | refs/heads/master | 2022-04-02T05:39:29.678774 | 2020-02-16T11:52:04 | 2020-02-16T11:52:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | """
This is the python code that allows user to
use telegram bot to control the function of lock and unlock
"""
import json
import boto3
from botocore.vendored import requests
TELE_TOKEN='bot_chatId:xxxxxxxxxxxxxxxxxxxxxxxxxxx'
URL = "https://api.telegram.org/bot{}/".format(TELE_TOKEN)
client = boto3.client('iot-data', region_name='us-east-1') // generally , the region_name is "us-east-1"
MyID = xxxxxxxxx // your chat id
def send_message(text, chat_id):
if MyID == chat_id:
if text == 'unlock':
final_text = 'Box is Unlocked'
# Change topic, qos and payload
response = client.publish(
topic='topic/servo', // replace "topic/servo" subjected to the AWS IoT thing
qos=0,
payload=json.dumps(1)
)
elif text == 'lock':
final_text = 'Box is locked'
# Change topic, qos and payload
response = client.publish(
topic='topic/servo', // replace "topic/servo" subjected to the AWS IoT thing
qos=0,
payload=json.dumps(0)
)
else:
final_text = 'Error! Type "unlock" to unlock the box\n Type "lock" to lock the box'
else:
final_text = 'You are not allowed to use this bot'
url = URL + "sendMessage?text={}&chat_id={}".format(final_text, chat_id)
requests.get(url)
def lambda_handler(event, context):
message = json.loads(event['body'])
chat_id = message['message']['chat']['id'] // get user id
reply = message['message']['text']
send_message(reply, chat_id)
return {
'statusCode': 200
}
| [
"noreply@github.com"
] | cococoolbean.noreply@github.com |
b8f04f11344cd7fcc06b3f1e060a3b7b6ea8fa86 | 10e9ff22b63a7149d503bb7bf92a07d1f1303f47 | /src/tworobot_main.py | 7681f3dedfa4e36f1cbcf4433f31f563eff00b38 | [] | no_license | ngthanhtin/Instruction-Navigation-MultiRobot | 1907ec3e056719a0bffb0cd39f7575c6763ee3cb | 03c8a34d637f9db386640c47965ae27f208152eb | refs/heads/master | 2023-07-21T04:45:10.831918 | 2021-09-01T01:09:17 | 2021-09-01T01:09:17 | 384,711,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,452 | py | #!/usr/bin/env python3
import rospy
from gazebo_msgs.msg import ModelStates
import math
#import gym
import numpy as np
import tensorflow as tf
# from ddpg import *
from mddpg.magent import *
# from tworobot_environment import Env
# from tworobot_environment_getobjects import Env
from multirobot_environment import Env
from pathlib import Path
import argparse
import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"]="1"
exploration_decay_start_step = 50000
state_dim = 16
action_dim = 2
action_linear_max = 0.25 # m/s
action_angular_max = 0.5 # rad/s
def write_to_csv(item, file_name):
with open(file_name, 'a') as f:
f.write("%s\n" % item)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=int, default=0, help='1 for training and 0 for testing')
parser.add_argument('--env_id', type=int, default=2, help='env name')
parser.add_argument('--sac', type=int, default=0, help='1 for using sac')
parser.add_argument('--visual_obs', type=int, default=0, help='1 for using image at robot observation')
parser.add_argument('--test_env_id', type=int, default=2, help='test environment id')
parser.add_argument('--n_scan', type=int, default=10, help='num of scan sampled from full scan')
args = parser.parse_args()
return args
def main():
rospy.init_node('baseline')
# get arg
args = parse_args()
is_training = bool(args.train)
env_name = 'env' + str(args.env_id)
trained_models_dir = './src/trained_models/bl-' + env_name + '-models/' if not args.visual_obs else \
'./src/trained_models/vis_obs-' + env_name + '-models/'
# env = Env(is_training, args.env_id, args.test_env_id, args.visual_obs, args.n_scan)
env = Env(is_training, args.env_id, args.test_env_id, 2, args.visual_obs, args.n_scan)
# agent = DDPG(env, state_dim, action_dim, trained_models_dir)
lr_actor = 1e-4
lr_critic = 1e-4
lr_decay = .95
replay_buff_size = 10000
gamma = .99
batch_size = 128
random_seed = 42
soft_update_tau = 1e-3
# 2 agents
agent = MADDPG(state_dim, action_dim, lr_actor, lr_critic, lr_decay, replay_buff_size, gamma, batch_size, random_seed, soft_update_tau)
past_action = np.array([[0., 0.], [0., 0.]])
print('State Dimensions: ' + str(state_dim))
print('Action Dimensions: ' + str(action_dim))
print('Action Max: ' + str(action_linear_max) + ' m/s and ' + str(action_angular_max) + ' rad/s')
if is_training:
print('Training mode')
# path things
figures_path = './figures/bl-' + env_name + '/' if not args.visual_obs else \
'./figures/vis_obs-' + env_name + '/'
print(figures_path)
Path(trained_models_dir + 'actor').mkdir(parents=True, exist_ok=True)
Path(trained_models_dir + 'critic').mkdir(parents=True, exist_ok=True)
Path(figures_path).mkdir(parents=True, exist_ok=True)
avg_reward_his = []
threshold_init = 20
total_rewards = []
avg_scores = []
max_avg_score = -1
max_score = -1
var = 1.
ep_rets = []
ep_ret = 0.
while True:
states = env.reset()
one_round_step = 0
scores = np.zeros(2)
while True:
a = agent.act(states)
a[0][0] = np.clip(np.random.normal(a[0][0], var), 0., 1.)
a[0][1] = np.clip(np.random.normal(a[0][1], var), -0.5, 0.5)
a[1][0] = np.clip(np.random.normal(a[1][0], var), 0., 1.)
a[1][1] = np.clip(np.random.normal(a[1][1], var), -0.5, 0.5)
state_s, r, dones, arrives = env.step([a[0], a[1]], [past_action[0], past_action[1]])
time_step = agent.update(states, a, r, state_s, dones)
if arrives:
result = 'Success'
else:
result = 'Fail'
# if time_step > 0:
# total_reward += r
# ep_ret += r
# print("Timestep: ",time_step)
# if time_step % 10000 == 0 and time_step > 0:
# print('---------------------------------------------------')
# avg_reward = total_reward / 10000
# print('Average_reward = ', avg_reward)
# avg_reward_his.append(round(avg_reward, 2))
# print('Average Reward:',avg_reward_his)
# total_reward = 0
# print('Mean episode return over training time step: {:.2f}'.format(np.mean(ep_rets)))
# print('Mean episode return over current 10k training time step: {:.2f}'.format(np.mean(ep_rets[-10:])))
# write_to_csv(np.mean(ep_rets), figures_path + 'mean_ep_ret_his.csv')
# write_to_csv(np.mean(ep_rets[-10:]), figures_path + 'mean_ep_ret_10k_his.csv')
# write_to_csv(avg_reward, figures_path + 'avg_reward_his.csv')
# print('---------------------------------------------------')
# if time_step % 5 == 0 and time_step > exploration_decay_start_step:
# var *= 0.9999
scores += np.array(r)
past_action = a
states = state_s
one_round_step += 1
# if arrive_s:
# print('Step: %3i' % one_round_step, '| Var: %.2f' % var, '| Time step: %i' % time_step, '|', result)
# one_round_step = 0
# if time_step > 0:
# ep_rets.append(ep_ret)
# ep_ret = 0.
# if done_s or one_round_step >= 500:
# print('Step: %3i' % one_round_step, '| Var: %.2f' % var, '| Time step: %i' % time_step, '|', result)
# if time_step > 0:
# ep_rets.append(ep_ret)
# ep_ret = 0.
# break
if (dones[0] == 1 and dones[1] == 1) or (arrives[0] == 1 and arrives[1] == 1) or one_round_step >= 500:
break
episode_score = np.max(scores)
total_rewards.append(episode_score)
print("Score: {:.4f}".format(episode_score))
if max_score <= episode_score:
max_score = episode_score
agent.save('./tworobot_weights.pth')
if len(total_rewards) >= 100: # record avg score for the latest 100 steps
latest_avg_score = sum(total_rewards[(len(total_rewards)-100):]) / 100
print("100 Episodic Everage Score: {:.4f}".format(latest_avg_score))
avg_scores.append(latest_avg_score)
# if max_avg_score <= latest_avg_score: # record better results
# worsen_tolerance = threshold_init # re-count tolerance
# max_avg_score = latest_avg_score
# else:
# if max_avg_score > 0.5:
# worsen_tolerance -= 1 # count worsening counts
# print("Loaded from last best model.")
# agent.load(best_model_path) # continue from last best-model
# if worsen_tolerance <= 0: # earliy stop training
# print("Early Stop Training.")
# break
else:
print('Testing mode')
total_return = 0.
total_step = 0
total_path_len = 0.
arrive_cnt = 0
robot_name='turtlebot3_burger_1'
# robot_name = 'robot1'
while True:
state = env.reset()
one_round_step = 0
data = None
while data is None:
try:
data = rospy.wait_for_message('gazebo/model_states', ModelStates, timeout=5)
except:
pass
robot_cur_state = data.pose[data.name.index(robot_name)].position
while True:
a = agent.action(state)
a[0] = np.clip(a[0], 0., 1.)
a[1] = np.clip(a[1], -0.5, 0.5)
state_, r, done, arrive = env.step(a, past_action)
total_return += r
past_action = a
state = state_
one_round_step += 1
total_step += 1
data = None
while data is None:
try:
data = rospy.wait_for_message('gazebo/model_states', ModelStates, timeout=5)
except:
pass
robot_next_state = data.pose[data.name.index(robot_name)].position
dist = math.hypot(
robot_cur_state.x - robot_next_state.x,
robot_cur_state.y - robot_next_state.y
)
total_path_len += dist
robot_cur_state = robot_next_state
if arrive:
arrive_cnt += 1
print('Step: %3i' % one_round_step, '| Arrive!!!')
one_round_step = 0
if env.test_goals_id >= len(env.test_goals):
print('Finished, total return: ', total_return)
print('Total step: ', total_step)
print('Total path length: ', total_path_len)
print('Success rate: ', arrive_cnt / len(env.test_goals))
exit(0)
if done:
print('Step: %3i' % one_round_step, '| Collision!!!')
if env.test_goals_id >= len(env.test_goals):
print('Finished, total return: ', total_return)
print('Total step: ', total_step)
print('Total path length: ', total_path_len)
print('Success rate: ', arrive_cnt / len(env.test_goals))
exit(0)
break
if __name__ == '__main__':
main()
| [
"ngthanhtinqn@gmail.com"
] | ngthanhtinqn@gmail.com |
74a5e2868e2ee6e54bc08acfae858d0c3bc8a2f1 | 1b0139d14f31928d7bb74674b0910a7dca2d852e | /Jupyter_notebook/numpy/killed_in_USA.py | 09c85534d024e309028cb2036b3751f3f33e04db | [] | no_license | malhotrasahil/coding_ninjas | 6a7c8c668896850da81f196c1afe83bfc63f16c0 | edf3189caa65bcb780837b4a70eef22fc17eaf3f | refs/heads/master | 2020-08-06T01:50:10.036298 | 2019-10-05T08:55:18 | 2019-10-05T08:55:18 | 212,789,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | import numpy as np
import csv
with open("C:\\Users\\Sahil\\Downloads\\terrorismData.csv" , encoding ='UTF-8') as file_obj:
# csv_obj=csv.reader(file_obj)
csv_obj = csv.DictReader(file_obj,skipinitialspace=True)
list1=list(csv_obj)
# list2=list1[0:][3]
# print(list2)
# arr=np.array(list2)
# for row in list1:
# if row['Country']=='United States':
# if row['Killed'] !='':
# print(int(float(row['Killed'])))
# else:
# print(0)
#
country=list()
killed=list()
for row in list1:
killed.append(row['Killed'])
country.append(row['Country'])
# print(country)
np_country=np.array(country)
np_killed=np.array(killed)
np_killed[np_killed=='']='0.0'
np_killed=np.array(np_killed , dtype=float)
country_us_bool=(np_country=='United States')
# print(country_us_bool)
killed_us=np_killed[country_us_bool]
# killed_us=np.array(killed_us , dtype=int)
for i in killed_us:
print(int(i))
| [
"noreply@github.com"
] | malhotrasahil.noreply@github.com |
572ff9086f75d3bfc5ddd794315dcdd46d41f352 | 6a689e14959b6cc207444a419bbde4f7e40a5222 | /detail/Event.py | 00f144d3b8e3d8c02db5ee1b8347977ce4b6f0ee | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | rainforest-tokyo/AutoNaptPython | 1d2724656590bd08b2595a845183e5d89961be42 | 5c021ca18e7a8280b52fd168ff6c443321ff3e31 | refs/heads/master | 2020-03-23T21:11:38.515278 | 2019-07-19T15:08:19 | 2019-07-19T15:08:19 | 142,088,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Event(object):
def __init__(self, doc = None):
self.handlers = []
self.__doc__ = doc
def __str__(self):
return 'Event<%s>' % str(self.__doc__)
def add(self, handler):
self.handlers.append(handler)
return self
def remove(self, handler):
self.handlers.remove(handler)
return self
def __call__(self, sender, e):
for handler in self.handlers:
handler(sender, e)
__iadd__ = add
__isub__ = remove
| [
"kouichirou_okada@rainforest.tokyo"
] | kouichirou_okada@rainforest.tokyo |
5bc8ca0afc78c19fa36a2ac33968c2e027761c1a | d0004dec928083657d514d21e4f13a04ffdb1079 | /randomnumber.py | 7f723e2ae3623a518520324f79926447cbfccee5 | [] | no_license | harshagl88/pythonbasic | 0e228f0d8904675b0c8f0fd5c28ed9ef701ea167 | 32872e446c33c43db80bed65831b349c0d5c09ef | refs/heads/master | 2023-01-30T02:33:25.986729 | 2020-12-13T05:04:57 | 2020-12-13T05:04:57 | 307,269,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import random
class RandomNumber:
def random_number(self):
print(random.randint(10, 100))
rn = RandomNumber()
rn.random_number()
| [
"harsh.agl88@gmail.com"
] | harsh.agl88@gmail.com |
8388fe4dea5028cd270dc5918c4a86c8b67ac556 | 4678a77ee3e928fc4db79c6b570d538013457181 | /venv/bin/django-admin.py | 2df02c4b03ca8f7f54e92faaddf31f4c616defb6 | [] | no_license | alisarmad/HTML | ee078b49b4a86ea2fdc37d8a319506b0b1e05731 | 22b661782a6fbc814f981e2d1ce8872f67e5b755 | refs/heads/master | 2022-12-05T10:20:32.248317 | 2020-01-10T11:09:04 | 2020-01-10T11:09:04 | 231,580,769 | 0 | 1 | null | 2022-11-26T11:27:12 | 2020-01-03T12:11:02 | Python | UTF-8 | Python | false | false | 139 | py | #!/var/www/html/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"sarmad1305@gmail.com"
] | sarmad1305@gmail.com |
ec9c417e630a5d1e8843d6a2e23386c2db157f4e | d136c10cdd556055717f8b4330066f56052688b5 | /setup.py | 49f3c1e51fc0701380fc53e434bee2a0568658c7 | [
"CC0-1.0"
] | permissive | biomodels/BIOMD0000000083 | 24cfbb23497e0dae2fa764e13bbdfef565bd2a51 | e8221a507f10df4490c4c6dd004368c9384f2de6 | refs/heads/master | 2021-01-01T05:34:54.036641 | 2014-10-16T05:22:13 | 2014-10-16T05:22:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from setuptools import setup, find_packages
setup(name='BIOMD0000000083',
version=20140916,
description='BIOMD0000000083 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000083',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
59eaeba87219790848ae8ac34d77c14d8cc54364 | 1ce2b3721c79baa0b513d2b2a6ac72dbd72cee74 | /resources/thing.py | 477f310c02ef8c6d122588a14f67c7093aa27087 | [] | no_license | BrandonBlair/adventureengine | 09374fde5a1a719ab3a941b260e2efd73cd6a0c3 | e8024280f075ff6f9f2e298c741cc8a45a5f6b03 | refs/heads/master | 2021-06-17T14:12:20.289074 | 2019-06-03T16:22:39 | 2019-06-03T16:22:39 | 189,677,109 | 1 | 0 | null | 2021-03-25T22:39:34 | 2019-06-01T01:25:30 | Python | UTF-8 | Python | false | false | 434 | py | from .container import Container
class Thing(Container):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = kwargs.get('name', 'Generic Thing')
self.short_desc = kwargs.get('short_desc', 'thing')
self.desc = kwargs.get('desc', 'generic thing')
self.contains = kwargs.get('contains', self.contains)
self.gettable = kwargs.get('gettable', False) | [
"cbrandon.blair@gmail.com"
] | cbrandon.blair@gmail.com |
8817047c2c71be9cdae859f54be02ae569fe144c | 724ae861f52fedc9d57f39c30971fab0114dc34b | /cms_content/cms_app.py | 1f95945cb2b4dac8bdf406ce033d0d0148078a6c | [
"BSD-3-Clause"
] | permissive | mmlic/django-cms-content | 36faa82f5303ba151d1f5542d895bbf65aec753a | d6b214542ab618b6fa4645483fa5bf58e8212f86 | refs/heads/master | 2021-01-16T18:06:01.393986 | 2010-09-18T14:54:45 | 2010-09-18T14:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_content.menu import CMSContentMenu
class CMSContentApp(CMSApp):
name = _(u"CMS Content App")
urls = ["cms_content.urls"]
menus = [CMSContentMenu]
apphook_pool.register(CMSContentApp)
| [
"indexofire@gmail.com"
] | indexofire@gmail.com |
ea7a28f829cd31f0bb53e7c6d894881acf4893c1 | 578a7fdc73798b9826b032910346195e3e5a5370 | /import_2.0.py | c8586850f047136719b1c0e2e58387e8567801ce | [] | no_license | giapiazze/Samu_Script | 87c4ab9643e3d11f7fffc839128477c39c37e9c9 | 354bc9841892ec3df3fdf2d59fe1e4083925cc23 | refs/heads/master | 2021-05-09T12:50:16.286384 | 2019-02-04T17:59:11 | 2019-02-04T17:59:11 | 119,018,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | import getopt
import sys
import datetime
import xlrd
# DB SQLAlchemy
from import_db import Session, Base
from import_models import AuthHousesUsers
# Main module to read start option parameter
# Option parameter: -d 'YYYY-mm-dd' => The date to search)
# Option parameter: -x True => If you want export)
if "__main__" == __name__:
# Default params options
date = datetime.datetime.now()
s = Session()
workbook = xlrd.open_workbook('per_importazione.xlsx')
# AuthHousesUsers first
worksheet = workbook.sheet_by_index(2)
for r in range(1, worksheet.nrows):
row = worksheet.row(r)
houseId = int(row[1].value)
userId = int(row[2].value)
isActive = bool(row[3].value)
startDate = date
print(r, houseId)
housesUsers = AuthHousesUsers(None, houseId, userId, isActive, startDate, None)
s.add(housesUsers)
s.commit()
sys.exit(1)
| [
"giapiazze@gmail.com"
] | giapiazze@gmail.com |
a9fa1327a1dab06a3cf16ed62e3b72fc95be7db8 | ca9177204037f9371dbc23ea96f3ad5ff2250e41 | /tag/views.py | 65c32958594acf5b33ca17e3d8e49e31e505eb42 | [] | no_license | luanvuhlu/hlusupportivelearning | 5562ce84e26b6c746cadc44a84b10d5df9f70c86 | 1deeaa0fe1abbe6f8debac49ff55533173edd7a6 | refs/heads/master | 2021-01-15T12:16:17.848073 | 2014-11-20T16:07:41 | 2014-11-20T16:07:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from hlusupportivelearning.views import get_user
from django.template import RequestContext, loader
from student.models import Student
from models import Tag
from entity import StudentTag
from hlusupportivelearning.util import ErrorMessage
import logging
log=logging.getLogger(__name__)
# Create your views here.
def student_tag_view(request, code):
errors=ErrorMessage()
template='tag/student.html'
if request.method=='POST':
tags=request.POST.getlist('tag')
student=get_object_or_404(Student, account=request.user, code=code, activated=True, block=False)
tags_of_student=student.tags.all()
new_tags_of_student=[]
for tag_id in tags:
if not tag_id:
continue
tag=get_object_or_404(Tag, id=tag_id, is_public=True, activated=True)
new_tags_of_student.append(tag)
if tag not in tags_of_student:
student.tags.add(tag)
for tag in tags_of_student:
if tag.id not in tags:
student.tags.remove(tag)
student.save()
all_tags=Tag.objects.filter(activated=True, is_public=True)
# tags_of_student=student.tags.all()
tags=[]
for tag in all_tags:
tags.append(StudentTag(tag, (tag in new_tags_of_student)))
return render(request, template, {
'errors':errors,
'student':student,
'tags':tags,
})
student=get_object_or_404(Student, account=request.user, code=code, activated=True, block=False)
student_tags=student.tags.all()
# log.debug(student_tags)
tags=[]
all_tags=Tag.objects.filter(activated=True, is_public=True)
for tag in all_tags:
tags.append(StudentTag(tag, (tag in student_tags)))
return render(request, template, {
'errors':errors,
'student':student,
'tags':tags,
})
| [
"luanhlu3503@gmail.com"
] | luanhlu3503@gmail.com |
a18916117bbe69ba04686b91913c458e5c100627 | c58ca78e0f86c579ccb9715b18306d045edfe6ce | /bsplines/__init__.py | 485360b7c55ad12ae3f10437a525f043fb0ab284 | [
"MIT"
] | permissive | pschulam/bsplines | 1102652e502064053b1b65fb73104baaaa774dd9 | e24a7a6e613077d58f7501ff0c6105221a12c82d | refs/heads/master | 2021-01-11T19:44:26.917311 | 2017-01-23T18:08:17 | 2017-01-23T18:08:17 | 79,385,727 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from .bsplines import BSplineBasis, QuantileBSplineBasis
| [
"pschulam@gmail.com"
] | pschulam@gmail.com |
238de79091ee6ab3dc170ece675fcc253ffd9020 | be0538698ed652f128720f4ab2c477805dc7030f | /python_design_patterns/SOLID/dependency_inversion.py | a53b1ac59971025baee140e8533ba0efc11bfa85 | [] | no_license | titoeb/python-desing-patterns | a2c8a45d218802ea4e3cc3fe4d1c7522fc991128 | 1e5f835a7fcca2be99d112053a9a1fc10829f2cf | refs/heads/master | 2023-04-14T16:40:48.590152 | 2021-05-05T06:44:09 | 2021-05-05T06:44:09 | 359,351,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | from __future__ import annotations
import abc
from enum import Enum
from typing import List, Tuple
# The depency inversion principle
# High level modules should not depend on low-level module, but they should depend on abstractions.
# Therefore, one should rather depend on interfaces then concrete implementations.
class Relationship(Enum):
PARENT = 0
CHILD = 1
SIBLING = 2
class Person:
def __init__(self, name: str):
self.name: str = name
def __str__(self):
return self.name
# This is the lower level module that does not use other classes and even more importantly
# it handles lower level mechanics like storage.
class Relationships:
def __init__(self):
self.relations: List[Tuple[int]] = []
def add_parent_and_child(self, parent: int, child: int):
self.relations.append((parent, Relationship.PARENT, child))
self.relations.append((child, Relationship.CHILD, parent))
# This is the higher level module in the sense that is uses other classes and does not handle
# lower level machanics.
class Resarch:
def __init__(self, relationships: Relationships):
for person_0, relationship_type, person_1 in relationships.relations:
if person_0.name == "John" and relationship_type == Relationship.PARENT:
print(f"John has a child called {person_1.name}")
# So what is the problem with the Research class?
# Well it depends on the low level object Relationships, in particular on the fact
# that the relations in there are stored as a list. If we decide at a later point to
# use a dictionary or a database instead, we will break the research class!
# So what to do?
# First, we should not use the object `relations` of the `Relationships` class directly,
# but we should use dedicated methods to access the information. Even better we should
# define an interace instead of depending on the lower level class (relationships) directly!
# So let's define the `RelationshipBrowser`!
class RelationshipBrowser:
@abc.abstractmethod
def find_all_children_of(self, name):
pass
class BetterRelationships(RelationshipBrowser):
def __init__(self):
self.relations: List[Tuple[int]] = []
def add_parent_and_child(self, parent: int, child: int):
self.relations.append((parent, Relationship.PARENT, child))
self.relations.append((child, Relationship.CHILD, parent))
def find_all_children_of(self, name):
for person_0, relationship_type, person_1 in self.relations:
if person_0.name == name and relationship_type == Relationship.PARENT:
yield person_1
class BetterResarch:
def __init__(self, browser: RelationshipBrowser):
for person in browser.find_all_children_of("John"):
print(f"John has a child called {person}")
if __name__ == "__main__":
parent = Person("John")
child1 = Person("Chris")
child2 = Person("Matt")
relations = Relationships()
relations.add_parent_and_child(parent=parent, child=child1)
relations.add_parent_and_child(parent=parent, child=child2)
research = Resarch(relationships=relations)
# Using these objects, it satisfies the dependency inversion principle:
better_relations = BetterRelationships()
better_relations.add_parent_and_child(parent=parent, child=child1)
better_relations.add_parent_and_child(parent=parent, child=child2)
better_research = BetterResarch(browser=better_relations)
| [
"timtoebrock@gmail.com"
] | timtoebrock@gmail.com |
ec17da356f713d8e5b8b9e457642a00c09d086eb | 7bab15222a8a22bf0c1e5b9df27c867c2a5257e5 | /utils.py | d03cc447f4e30c703f8a8598c0f28dba95c5801a | [] | no_license | Ade-Pyaar/Auto_predict_app | 096c9f2224e395af4b05e341cb51f19471941b6b | 3e2a2e0192f86154844248d2542fd86e5bc1bf61 | refs/heads/master | 2023-04-28T16:49:06.531883 | 2021-05-22T16:52:27 | 2021-05-22T16:52:27 | 361,749,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import pickle, json
with open ('n_gram_counts_list.txt', 'rb') as fp:
n_gram_counts_list = pickle.load(fp)
with open('vocabulary.txt', 'r') as f:
vocabulary = json.loads(f.read())
def estimate_probability(word, previous_n_gram,
n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0):
previous_n_gram = tuple(previous_n_gram)
previous_n_gram_count = n_gram_counts.get(previous_n_gram, 0)
denominator = previous_n_gram_count + k * vocabulary_size
n_plus1_gram = (previous_n_gram) + (word,)
n_plus1_gram_count = n_plus1_gram_counts.get(n_plus1_gram, 0)
numerator = n_plus1_gram_count + k
probability = numerator/denominator
return probability
def estimate_probabilities(previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0):
previous_n_gram = tuple(previous_n_gram)
vocabulary = vocabulary + ["<e>", "<unk>"]
vocabulary_size = len(vocabulary)
probabilities = {}
for word in vocabulary:
probability = estimate_probability(word, previous_n_gram,
n_gram_counts, n_plus1_gram_counts,
vocabulary_size, k=k)
probabilities[word] = probability
return probabilities
def suggest_a_word(previous_tokens, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0, start_with=None):
n = len(list(n_gram_counts.keys())[0])
previous_n_gram = previous_tokens[-n:]
probabilities = estimate_probabilities(previous_n_gram,
n_gram_counts, n_plus1_gram_counts,
vocabulary, k=k)
suggestion = None
max_prob = 0
for word, prob in probabilities.items():
if start_with:
if not word.startswith(start_with):
continue
if prob > max_prob:
suggestion = word
max_prob = prob
return suggestion, max_prob
def get_suggestions(token, k=1.0, start_with=None):
previous_tokens = token.lower().split( ' ')
model_counts = len(n_gram_counts_list)
suggestions = []
suggestion_dict = {}
for i in range(model_counts-1):
n_gram_counts = n_gram_counts_list[i]
n_plus1_gram_counts = n_gram_counts_list[i+1]
suggestion = suggest_a_word(previous_tokens, n_gram_counts,
n_plus1_gram_counts, vocabulary,
k=k, start_with=start_with)
suggestions.append(suggestion)
for item in suggestions:
suggestion_dict[item[0]] = item[1]
final_dict = {k: v for k, v in sorted(suggestion_dict.items(), key=lambda item: item[1], reverse=True)}
dict_keys = [i for i in final_dict.keys()]
for x in dict_keys:
if x == '<e>' or dict_keys.count(x) > 1:
del final_dict[x]
return final_dict | [
"Adebayoibrahim2468@gmail.com"
] | Adebayoibrahim2468@gmail.com |
871af0fab6d27935b3a9d6894e5b69448e205e49 | e7d4b6fcace1509d37359776d9f72020dad3da41 | /part010/ch08_basemap/sec2_draw/test_2_plot_x_x.py | 955139e8b3ec761b1aa180217c7fb55b866cb87f | [] | no_license | LinyunGH/book_python_gis | b422e350222c4ab5711efb4cc6101e229bd26f7b | 067d75e324c006e2098827ac16ba38d4894b8a21 | refs/heads/master | 2020-04-09T22:25:35.049625 | 2018-10-14T09:56:38 | 2018-10-14T09:56:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | # -*- coding: utf-8 -*-
print('=' * 40)
print(__file__)
from helper.textool import get_tmp_file
################################################################################
from mpl_toolkits.basemap import Basemap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
para = {'projection': 'merc',
'lat_0': 0, 'lon_0': 120,
'resolution': 'h', 'area_thresh': .1,
'llcrnrlon': 116, 'llcrnrlat': 36.6,
'urcrnrlon': 124, 'urcrnrlat': 40.2 }
my_map = Basemap(**para)
my_map.drawcoastlines(); my_map.drawmapboundary()
################################################################################
lon = 121.60001; lat = 38.91027
x, y = my_map(lon, lat)
my_map.plot(x, y, 'bo', markersize=12)
# plt.show()
plt.savefig(get_tmp_file(__file__, '1'), bbox_inches='tight', dpi=600)
plt.savefig(get_tmp_file(__file__, '1', file_ext='pdf'), bbox_inches='tight', dpi=600)
plt.clf()
################################################################################
my_map = Basemap(**para)
my_map.drawcoastlines(); my_map.drawmapboundary()
lons = [121.60001, 121.38617, 117.19723]
lats = [38.91027, 37.53042, 39.12473]
x, y = my_map(lons, lats)
################################################################################
my_map.plot(x, y, 'bo', markersize=10)
# plt.show()
plt.savefig(get_tmp_file(__file__, '2'), bbox_inches='tight', dpi=600)
plt.savefig(get_tmp_file(__file__, '2', file_ext='pdf'), bbox_inches='tight', dpi=600)
plt.clf()
################################################################################
my_map = Basemap(**para)
my_map.drawcoastlines(); my_map.drawmapboundary()
my_map.plot(x, y, marker=None,color='m')
# plt.show()
plt.savefig(get_tmp_file(__file__, '3'), bbox_inches='tight', dpi=600)
plt.savefig(get_tmp_file(__file__, '3', file_ext='pdf'), bbox_inches='tight', dpi=600)
plt.clf()
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
662c71161cfd8d0510f97315e3f4b811738fdcf3 | c1c7214e1f9230f19d74bb9776dac40d820da892 | /examples/django/model一般/FilePathFieldの使い方の例/project/app/views.py | 862e10440074a4c00591ebc229e8546a7d8428c2 | [] | no_license | FujitaHirotaka/djangoruler3 | cb326c80d9413ebdeaa64802c5e5f5daadb00904 | 9a743fbc12a0efa73dbc90f93baddf7e8a4eb4f8 | refs/heads/master | 2020-04-01T13:32:28.078110 | 2018-12-13T00:39:56 | 2018-12-13T00:39:56 | 153,256,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from django.shortcuts import render
import os
from .forms import *
from pathlib import Path
import re
from ajax.views import z
#この部分は本編とは関係なし
########################
d=z()
########################
def index(request):
d["form"]=Form
d["form2"]=Form2
d["form3"]=Form3
d["form4"]=Form4
d["form5"]=Form5
return render(request, 'app/index.html', d)
| [
"fujiozone@msn.com"
] | fujiozone@msn.com |
78bfd40f5139c5f8b353a0d60b790cfc105d405a | a7c7bf8eb6031c77014e1c60f27b6956201c83d7 | /2d.py | 1b66ceb515fb4b0f719404f16e601769dc5edc74 | [
"Apache-2.0"
] | permissive | LuPaSchmitt/quantum-neural-network | 1fe388fae8070b1c2e07d6ee87f5e31450766b3d | 5398e7ece1e21971b5df840af8f20559270b2bc4 | refs/heads/main | 2023-06-07T13:32:26.544123 | 2021-07-04T10:24:30 | 2021-07-04T10:24:30 | 379,517,755 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,003 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 30 12:35:28 2021
@author: Lukas
"""
import numpy as np
import tensorflow as tf
import strawberryfields as sf
from strawberryfields import ops
import basis
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
tf.random.set_seed(2021)
np.random.seed(2021)
#==============================================================
# Trainingsdaten
#==============================================================
#Größe des Trainingssamples
batch = 20
#Größe des Intervalls
a = -1
b = 1
#Trainingsepochen
epochs=1000
#Bestrafung von nicht erwünschten Eigenschaften der Lösung
reg = 1
#Lernrate
lr = 0.03
#Funktionen die gelernt werden sollen
#Rauschen (Normalverteilt)
e=0.0
#2 dimensional
def f1(x,y,e):
return x*y + e*np.random.normal(size=x.shape)
def f2(x,y,e):
return np.sin(x*y) + e*np.random.normal(size=x.shape)
def f3(x,y,e):
return np.sin(x)*np.sin(y) + e*np.random.normal(size=x.shape)
def f4(x,y,e):
return np.sin(x)+ np.sin(y) + e*np.random.normal(size=x.shape)
#Bestimme welche Funktion gelernt werden soll
def f(x,y,e):
return f1(x,y,e)
#Ordner in dem Bilder gespeichert werden
ordner="multiplication/"
#==============================================================
#Erstelle Trainings und Testdaten
train_data_x = np.linspace(a, b, num=batch)
train_data_y = np.linspace(a, b, num=batch)
test_data_x = np.linspace(a-0.01, b+0.01, num=batch)
test_data_y = np.linspace(a-0.01, b+0.01, num=batch)
X,Y = np.meshgrid(train_data_x,train_data_y)
tX,tY = np.meshgrid(test_data_x,test_data_y)
train_data_x=X.flatten()
train_data_y=Y.flatten()
train_Z = f(train_data_x,train_data_y,e)
train_data_x = tf.constant(train_data_x,tf.float32)
train_data_y = tf.constant(train_data_y,tf.float32)
train_Z = tf.constant(train_Z,tf.float32)
testX = tf.constant(tX.flatten(),tf.float32)
testY = tf.constant(tY.flatten(),tf.float32)
#==============================================================
# Netzparameter
#==============================================================
#Größe des Netzes
in_dim = 3
layers = 7
#Genauigkeit
cutoff_dim = 11
#==============================================================
# zum Ausführen des Programms wird ein Simulator benötigt. Hier wird das backend von tensorflow verwendet
#cutoff_dim gibt an wieviele Dimensionen des Fock-Raums für die Simulation benutzt werden sollen
#Je höher die Zahl, desto kleiner ist der Fehler auf Operationen, aber desto mehr Zeit wird benötigt
eng = sf.Engine('tf', backend_options={"cutoff_dim": cutoff_dim, "batch_size": batch**2})
#==============================================================
# Initialisierung
#==============================================================
#Erstelle ein Programm mit N qumodes
qnn = sf.Program(in_dim)
# initialisiere Parameter zufällig
weights = basis.init(in_dim, layers)
anzahl = np.prod(weights.shape) # Gesamtzahl an Parametern
#Erstelle einen Array mit symbolischen Variabeln die im QNN verwendet werden
params = np.arange(anzahl).reshape(weights.shape)
params = params.astype(np.str) #Variablen sind einfach numeriert
par = []
for i in params:
par.append(qnn.params(*i))
params = np.array(par)
#symbolischer Parameter für den Input
x_data = qnn.params("input1")
y_data = qnn.params("input2")
#==============================================================
#Baue die Struktur des Netzes auf
with qnn.context as q:
#Setze den Input des Netzes als Verschiebung im Ortsraum
ops.Dgate(x_data) | q[0]
ops.Dgate(y_data) | q[1]
for l in range(layers):
basis.layer(params[l], q)
#==============================================================
# Kostenfunktion
#==============================================================
def costfunc(weights):
#Um Tensorflow benutzen zu können muss ein Dictionary zwischen den symbolischen
#Variablen und den Tensorflowvariablen erstellt werden
dictio = {}
for symb, var in zip(params.flatten(), tf.reshape(weights, -1)):
dictio[symb.name] = var
dictio["input1"] = train_data_x
dictio["input2"] = train_data_y
# benutze den Tensorflowsimulator
state = eng.run(qnn, args=dictio).state
#Ortsprojektion und Varianz
output = state.quad_expectation(2)[0]
#Größe die minimiert werden soll
loss = tf.reduce_mean(tf.abs(output - train_Z) ** 2)
#Stelle sicher, dass der Trace des Outputs nahe bei 1 bleibt
#Es wird also bestraft, wenn der Circuit Operationen benutzt
#die für große Rechenfehler sorgen (dazu führen, dass der Anteil an höheren Fockstates zunimmt)
trace = tf.abs(tf.reduce_mean(state.trace()))
cost = loss + reg * (tf.abs(trace - 1) ** 2)
return cost, loss, trace, output
"""
#Das Training dieses Netzes dauert mehrere Stunden! zum Testen daher
#den Trainingsteil des Programmes auskommentieren (Gewichte werden aus Datei geladen)
#==============================================================
# Training
#==============================================================
weights = tf.Variable(weights)
history = []
start_time = time.time()
#Nutze einen Optimierer von Tensorflow. Genauer gesagt: Adam (arXiv:1412.6980v9)
opt= tf.keras.optimizers.Adam(learning_rate=lr)
# Führe das Training 1000 mal durch
for i in range(epochs):
# wenn das Programm gelaufen ist, dann resete die Engine
if eng.run_progs:
eng.reset()
with tf.GradientTape() as tape:
cost, loss, trace, output = costfunc(weights)
gradients = tape.gradient(cost, weights)
opt.apply_gradients(zip([gradients], [weights]))
history.append(loss)
#alle 10 Schritte
if i % 10 == 0:
print("Epochen: {} Gesamtkosten: {:.4f} Loss: {:.4f} Trace: {:.4f}".format(i, cost, loss, trace))
#Speichere grafisch den Trainingsfortschritt
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_surface(X, Y, np.reshape(output,(batch,batch)), cmap="RdYlGn", lw=0.5, rstride=1, cstride=1)
ax.plot_surface(X, Y, np.reshape(train_Z,(batch,batch)), cmap="Greys", lw=0.5, rstride=1, cstride=1,alpha=0.2)
fig.set_size_inches(4.8, 5)
name=ordner+str(i)+".png"
fig.savefig(name, format='png', bbox_inches='tight')
plt.close(fig)
#Gebe die Dauer des Trainings aus
end_time = time.time()
print("Dauer: ",np.round(end_time-start_time),"Sekunden")
np.save("weights_mult",weights)
eng.reset()
# %matplotlib inline
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.sans-serif'] = ['Computer Modern Roman']
plt.style.use('default')
#Erstelle einen Plot des Trainingsverlaufes
plt.plot(history)
plt.ylabel('Kosten')
plt.xlabel('Epoche')
plt.show()
"""
#Teste den Algorithmus an nicht gelernten Trainingsdaten
#==============================================================
# Test
#==============================================================
weights=np.load("weights_mult.npy")
"""
#Simuliere fehlerhafte Gates durch Veränderung einzelner Parameter
from random import randint
for fehler in range(1):
print(fehler)
for anz in range(1):
weights=np.load("weights_mult.npy")
for z in range(8):
i=randint(0,6)
j=randint(0,27)
weights[i,j] += 0.1*np.random.normal(size=1)
cost, loss, trace, output = costfunc(weights)
eng.reset()
print(loss)
"""
dictio = {}
for symb, var in zip(params.flatten(), tf.reshape(weights, -1)):
dictio[symb.name] = var
dictio["input1"] = testX
dictio["input2"] = testY
# benutze den Tensorflowsimulator
state = eng.run(qnn, args=dictio).state
#Ortsprojektion der Ausgabe
output = state.quad_expectation(2)[0]
#Visualisiere die Ausgabe für alle Testdaten
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_surface(tX, tY, np.reshape(output,(batch,batch)), cmap="RdYlGn", lw=0.5, rstride=1, cstride=1,alpha=0.8)
#ax.plot_surface(X, Y, np.reshape(output,(batch,batch)), cmap="RdYlGn", lw=0.5, rstride=1, cstride=1,alpha=0.8)
ax.plot_surface(X, Y, np.reshape(train_Z,(batch,batch)), cmap="Greys", lw=0.5, rstride=1, cstride=1,alpha=0.4)
fig.set_size_inches(4.8, 5)
name=ordner+"Test"+".pdf"
ax.set_xlabel('x', fontsize=18)
ax.set_ylabel('y', fontsize=18)
ax.set_zlabel('z', fontsize=18)
fig.savefig(name, format='pdf', bbox_inches='tight')
| [
"noreply@github.com"
] | LuPaSchmitt.noreply@github.com |
db15bf9a47411339c570cde0a71c23fc2c1aba37 | ee732d3f760bcb1218b65256ca050264d839fa1c | /tools/lifepool/src/SalivaDnaParser.py | 7e9ec48339d31f9237cc2a328c125cd503b3220e | [] | no_license | The-Ark-Informatics/ark | d806a6a45489a036ae4556b828d7740a7be83dd3 | 6b711a3f8dd4df384f57d150c4227a49c792f964 | refs/heads/master | 2023-03-10T04:08:24.508553 | 2023-03-06T06:52:39 | 2023-03-06T06:52:39 | 35,088,800 | 5 | 8 | null | 2020-08-09T03:21:16 | 2015-05-05T08:57:56 | JavaScript | UTF-8 | Python | false | false | 1,029 | py | '''
Created on 08/07/2013
@author: thilina
'''
import re
print "----------------------- SALIVA DNA BIOSPECIMEN --------------------------------"
inputFile = open('../resource/DNA_SALIVA_SPECIMEN.csv', 'r')
firstLine=True
output=""
for line in inputFile:
if firstLine :
firstLine=False
continue
tokens = line.split(",")
parentUid = tokens[1].strip()
specimenUid = parentUid+"-800"
initQuantity = tokens[2].replace("OragenePurifier","").replace("ml","")
quantity = tokens[4].strip()
purity = tokens[6].strip()
concentration = tokens[7].strip()
operator = tokens[3].strip()
qubit = tokens[5]
line=parentUid+","+specimenUid+","+initQuantity+","+quantity+","+purity+","+concentration+","+operator+","+qubit
print line
output=output+line+"\n"
inputFile.close()
outputFile = open('../resource/SALIVA_DNA_PROCESSED_BIOSPECIMEN.csv', 'w')
outputFile.write(output)
outputFile.close()
print "----SALIVA DNA BIOSPECIMEN DONE ----------------"
| [
"tranaweera@gmail.com"
] | tranaweera@gmail.com |
66bf729f83145854a2e66ea266c12bab27a2b7bd | 474fc20675272fe93139f3540745c25d3fda709a | /scripts/cma/purecma_reference_test.py | 82e9a23144dcf7c759606289682f17cfe244186d | [
"BSD-3-Clause"
] | permissive | ALHESzpuniWojro/Projekt-1 | c1c1618dd5d476958733fe5781742f89566bd4a4 | f0ee777ffb967f066bcdb2fa310149394bf09bd2 | refs/heads/master | 2020-09-21T08:13:55.307688 | 2020-02-06T11:28:25 | 2020-02-06T11:28:25 | 224,727,716 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 54,051 | py | #!/usr/bin/env python
"""A minimalistic implemention of CMA-ES without using `numpy`.
The Covariance Matrix Adaptation Evolution Strategy, CMA-ES, serves for
numerical nonlinear function minimization.
The **main functionality** is implemented in
1. class `CMAES`, and
2. function `fmin` which is a small single-line-usage wrapper around
`CMAES`.
This code has two **purposes**:
1. for READING and UNDERSTANDING the basic flow and the details of the
CMA-ES *algorithm*. The source code is meant to be read. For a quick
glance, study the first few code lines of `fmin` and the code of
method `CMAES.tell`, where all the real work is done in about 20 lines
of code (search "def tell" in the source). Otherwise, reading from
the top is a feasible option, where the codes of `fmin`,
`CMAES.__init__`, `CMAES.ask`, `CMAES.tell` are of particular
interest.
2. apply CMA-ES when the python module `numpy` is not available.
When `numpy` is available, `cma.fmin` or `cma.CMAEvolutionStrategy` are
preferred to run "serious" simulations. The latter code has many more
lines, but usually executes faster, offers a richer user interface,
better termination options, boundary and noise handling, injection,
automated restarts...
Dependencies: `math.exp`, `math.log` and `random.normalvariate` (modules
`matplotlib.pylab` and `sys` are optional).
Testing: call ``python purecma.py`` at the OS shell. Tested with
Python 2.6, 2.7, 3.3, 3.5, 3.6.
URL: http://github.com/CMA-ES/pycma
Last change: September, 2017, version 3.0.0
:Author: Nikolaus Hansen, 2010-2011, 2017
This code is released into the public domain (that is, you may
use and modify it however you like).
"""
from __future__ import division # such that 1/2 != 0
from __future__ import print_function # available since 2.6, not needed
___author__ = "Nikolaus Hansen"
__license__ = "public domain"
from sys import stdout as _stdout # not strictly necessary
from math import log, exp
from random import normalvariate as random_normalvariate
# Wojciech imports
import random
import test_func
import numpy as np
try:
from .interfaces import OOOptimizer, BaseDataLogger as _BaseDataLogger
except (ImportError, ValueError):
OOOptimizer, _BaseDataLogger = object, object
try:
from .recombination_weights import RecombinationWeights
except (ImportError, ValueError):
RecombinationWeights = None
del division, print_function #, absolute_import, unicode_literals, with_statement
__version__ = '3.0.0'
__author__ = 'Nikolaus Hansen'
__docformat__ = 'reStructuredText'
# ps - population size in each of an interation
# n - how many candidates will be tournamented
# mi - how many 'winners' will be picked from the population
# fn - cec testing function number (1-28)
def fmin(no, xstart, sigma,
args=(),
maxfevals='1e3 * N**2', ftarget=None,
verb_disp=100, verb_log=1, verb_save=1000, ps=7, n=2, mi=5, fn=1):
"""non-linear non-convex minimization procedure, a functional
interface to CMA-ES.
Parameters
==========
`objective_fct`: `callable`
a function that takes as input a `list` of floats (like
[3.0, 2.2, 1.1]) and returns a single `float` (a scalar).
The objective is to find ``x`` with ``objective_fct(x)``
to be as small as possible.
`xstart`: `list` or sequence
list of numbers (like `[3.2, 2, 1]`), initial solution vector,
its length defines the search space dimension.
`sigma`: `float`
initial step-size, standard deviation in any coordinate
`args`: `tuple` or sequence
additional (optional) arguments passed to `objective_fct`
`ftarget`: `float`
target function value
`maxfevals`: `int` or `str`
maximal number of function evaluations, a string
is evaluated with ``N`` as search space dimension
`verb_disp`: `int`
display on console every `verb_disp` iteration, 0 for never
`verb_log`: `int`
data logging every `verb_log` iteration, 0 for never
`verb_save`: `int`
save logged data every ``verb_save * verb_log`` iteration
Return
======
The `tuple` (``xmin``:`list`, ``es``:`CMAES`), where ``xmin`` is the
best seen (evaluated) solution and ``es`` is the correspoding `CMAES`
instance. Consult ``help(es.result)`` of property `result` for further
results.
Example
=======
The following example minimizes the function `ff.elli`:
>>> try: import cma.purecma as purecma
... except ImportError: import purecma
>>> def felli(x):
... return sum(10**(6 * i / (len(x)-1)) * xi**2
... for i, xi in enumerate(x))
>>> res = purecma.fmin(felli, 3 * [0.5], 0.3, verb_disp=100) # doctest:+SKIP
evals: ax-ratio max(std) f-value
7: 1.0 3.4e-01 240.2716966
14: 1.0 3.9e-01 2341.50170536
700: 247.9 2.4e-01 0.629102574062
1400: 1185.9 5.3e-07 4.83466373808e-13
1421: 1131.2 2.9e-07 5.50167024417e-14
termination by {'tolfun': 1e-12}
best f-value = 2.72976881789e-14
solution = [5.284564665206811e-08, 2.4608091035303e-09, -1.3582873173543187e-10]
>>> print(res[0]) # doctest:+SKIP
[5.284564665206811e-08, 2.4608091035303e-09, -1.3582873173543187e-10]
>>> res[1].result[1]) # doctest:+SKIP
2.72976881789e-14
>>> res[1].logger.plot() # doctest:+SKIP
Details
=======
After importing `purecma`, this call:
>>> es = purecma.fmin(pcma.ff.elli, 10 * [0.5], 0.3, verb_save=0)[1] # doctest:+SKIP
and these lines:
>>> es = purecma.CMAES(10 * [0.5], 0.3)
>>> es.optimize(purecma.ff.elli, callback=es.logger.add) # doctest:+SKIP
do pretty much the same. The `verb_save` parameter to `fmin` adds
the possibility to plot the saved data *during* the execution from a
different Python shell like ``pcma.CMAESDataLogger().load().plot()``.
For example, with ``verb_save == 3`` every third time the logger
records data they are saved to disk as well.
:See: `CMAES`, `OOOptimizer`.
"""
name2 ='result_out_ref/' + str(no) + '_dim_' + str(len(xstart)) + '_cec_' + str(fn) + '_tourcand_' + str(n) + '_data'
global output
output = open(name2, 'w')
es = CMAES(no, xstart, sigma, es_winners_number = mi, popsize=ps, tourcand=n, cecfn=fn, maxfevals=maxfevals, ftarget=ftarget)
if verb_log: # prepare data logging
name ='data_out_ref/' + str(no) + '_dim_' + str(len(xstart)) + '_cec_' + str(fn) + '_tourcand_' + str(n) + '_data'
es.logger = CMAESDataLogger(name, verb_log).add(es, force=True)
iterations = 0
f_worst = True
while not es.stop():
X = es.ask() # gets a list of sampled candidate solutions
#########################################################################################
#-------------------------- OUR MODIFICATION - CEC FUNCTIONS ---------------------------#
#########################################################################################
dim = len(xstart)
if(not(dim==2 or dim==5 or dim==10 or dim==20 or dim==30)):
print('"\nError: CEC test functions are only defined for D=2,5,10,20,30.')
return
#fit = [objective_fct(x, *args) for x in X] # evaluate candidates
X2 = []
for i in range(len(X)):
for j in range(dim):
X2.append(X[i][j])
tf = test_func.X()
tf.set(X2, ps, dim, fn)
fit = tf.get()
#print("wylosowana populacja")
#print(X)
#print("wartosci f celu")
#print(fit)
#print("------------------------------------------------")
#########################################################################################
#------------------------- OUR MODIFICATION - PENALTY FUNCTION -------------------------#
#########################################################################################
# worst observed value
if (f_worst==True) :
worst = sorted(fit)[len(fit)-1]
f_worst = False
elif (worst < sorted(fit)[len(fit)-1]):
worst = sorted(fit)[len(fit)-1]
#print("najgorszy zaobserwowany")
#print(worst)
for i in range(len(X)):
outsider = False
dev = 0
for j in range(dim):
if(X[i][j] < -80 or X[i][j] > 80):
outsider = True
dev += (abs(X[i][j])-80)**2
if(outsider == True):
fit[i] = dev + worst
if mi > es.params.lam:
print ("Number of winners must be smaller or equal to number of population!")
return
newX = [X[k] for k in argsort(fit)]
newFit = sorted(fit)
es.tell(newX, newFit, mi) # update distribution parameters
# that's it! The remainder is managing output behavior only.
es.disp(verb_disp)
if verb_log:
if es.counteval / es.params.lam % verb_log < 1:
es.logger.add(es)
if verb_save and (es.counteval / es.params.lam
% (verb_save * verb_log) < 1):
es.logger.save()
iterations += 1
if verb_disp: # do not print by default to allow silent verbosity
es.disp(1)
output.write('Starting point = ' + str(xstart) + '\n')
print('Starting point = ', xstart)
output.write('Population size = ' + str(ps) + '\n')
print('Population size = ', ps)
output.write('Dimension = ' + str(dim) + '\n')
print('Dimension = ', dim)
output.write('Tournament size = ' + str(n) + '\n')
print('Tournament size = ', n)
output.write('\"Winners" number = ' + str(mi) + '\n')
print('\"Winners" number = ', mi)
output.write('CEC test function number = ' + str(fn) + '\n')
print('CEC test function number = ', fn)
output.write('Termination by ' + str(es.stop()) + '\n')
print('Termination by ', es.stop())
output.write('Iterations = ' + str(iterations) + '\n')
print('Iterations =', iterations)
output.write('Best f-value = ' + str(es.result[1]) + '\n')
print('Best f-value = ', es.result[1])
output.write('Solution = ' + str(es.result[0]) + '\n')
print('Solution = ', es.result[0])
print("------------------------------")
if verb_log:
es.logger.add(es, force=True)
es.logger.save() if verb_save else None
#return [es.best.x if es.best.f < objective_fct(es.xmean) else
# es.xmean, es]
return [es.result[0], es]
class CMAESParameters(object):
"""static "internal" parameter setting for `CMAES`
"""
default_popsize = '4 + int(3 * log(N))'
# Our modification winners_number
def __init__(self, N, winners_number, popsize=None,
RecombinationWeights=None):
"""set static, fixed "strategy" parameters once and for all.
Input parameter ``RecombinationWeights`` may be set to the class
`RecombinationWeights`.
"""
self.dimension = N
self.chiN = (1 - 1. / (4 * N) + 1. / (21 * N**2))
# Strategy parameter setting: Selection
self.lam = eval(safe_str(popsize if popsize else
CMAESParameters.default_popsize,
{'int': 'int', 'log': 'log', 'N': N}))
# Our modification
self.mu = winners_number
#self.mu = int(self.lam / 2) # number of parents/points/solutions for recombination
if RecombinationWeights:
self.weights = RecombinationWeights(self.lam)
self.mueff = self.weights.mueff
else: # set non-negative recombination weights "manually"
_weights = [log(self.lam / 2 + 0.5) - log(i + 1) if i < self.mu else 0
for i in range(self.lam)]
w_sum = sum(_weights[:self.mu])
self.weights = [w / w_sum for w in _weights] # sum is one now
self.mueff = sum(self.weights[:self.mu])**2 / \
sum(w**2 for w in self.weights[:self.mu]) # variance-effectiveness of sum w_i x_i
# Strategy parameter setting: Adaptation
self.cc = (4 + self.mueff/N) / (N+4 + 2 * self.mueff/N) # time constant for cumulation for C
self.cs = (self.mueff + 2) / (N + self.mueff + 5) # time constant for cumulation for sigma control
self.c1 = 2 / ((N + 1.3)**2 + self.mueff) # learning rate for rank-one update of C
self.cmu = min([1 - self.c1, 2 * (self.mueff - 2 + 1/self.mueff) / ((N + 2)**2 + self.mueff)]) # and for rank-mu update
self.damps = 2 * self.mueff/self.lam + 0.3 + self.cs # damping for sigma, usually close to 1
if RecombinationWeights:
self.weights.finalize_negative_weights(N, self.c1, self.cmu)
# gap to postpone eigendecomposition to achieve O(N**2) per eval
# 0.5 is chosen such that eig takes 2 times the time of tell in >=20-D
self.lazy_gap_evals = 0.5 * N * self.lam * (self.c1 + self.cmu)**-1 / N**2
class CMAES(OOOptimizer): # could also inherit from object
"""class for non-linear non-convex numerical minimization with CMA-ES.
The class implements the interface define in `OOOptimizer`, namely
the methods `__init__`, `ask`, `tell`, `stop`, `disp` and property
`result`.
Examples
--------
The Jupyter notebook or IPython are the favorite environments to
execute these examples, both in ``%pylab`` mode. All examples
minimize the function `elli`, output is not shown.
First we need to import the module we want to use. We import `purecma`
from `cma` as (aliased to) ``pcma``::
from cma import purecma as pcma
The shortest example uses the inherited method
`OOOptimizer.optimize`::
es = pcma.CMAES(8 * [0.1], 0.5).optimize(pcma.ff.elli)
See method `CMAES.__init__` for a documentation of the input
parameters to `CMAES`. We might have a look at the result::
print(es.result[0]) # best solution and
print(es.result[1]) # its function value
`result` is a property of `CMAES`. In order to display more exciting
output, we may use the `CMAESDataLogger` instance in the `logger`
attribute of `CMAES`::
es.logger.plot() # if matplotlib is available
Virtually the same example can be written with an explicit loop
instead of using `optimize`, see also `fmin`. This gives insight
into the `CMAES` class interface and entire control over the
iteration loop::
pcma.fmin?? # print source, works in jupyter/ipython only
es = pcma.CMAES(9 * [0.5], 0.3) # calls CMAES.__init__()
# this loop resembles the method optimize
while not es.stop(): # iterate
X = es.ask() # get candidate solutions
f = [pcma.ff.elli(x) for x in X] # evaluate solutions
es.tell(X, f) # do all the real work
es.disp(20) # display info every 20th iteration
es.logger.add(es) # log another "data line"
# final output
print('termination by', es.stop())
print('best f-value =', es.result[1])
print('best solution =', es.result[0])
print('potentially better solution xmean =', es.result[5])
print("let's check f(xmean) = ", pcma.ff.elli(es.result[5]))
es.logger.plot() # if matplotlib is available
A very similar example which may also save the logged data within
the loop is the implementation of function `fmin`.
Details
-------
Most of the work is done in the method `tell`. The property
`result` contains more useful output.
:See: `fmin`, `OOOptimizer.optimize`
"""
# Our modification - es_winners_number
def __init__(self, no, xstart, sigma, es_winners_number, popsize, tourcand, cecfn, # mandatory
ftarget=None,
maxfevals='100 * popsize + ' # 100 iterations plus...
'150 * (N + 3)**2 * popsize**0.5',
randn=random_normalvariate):
"""Instantiate `CMAES` object instance using `xstart` and `sigma`.
Parameters
----------
`xstart`: `list`
of numbers (like ``[3, 2, 1.2]``), initial
solution vector
`sigma`: `float`
initial step-size (standard deviation in each coordinate)
`popsize`: `int` or `str`
population size, number of candidate samples per iteration
`maxfevals`: `int` or `str`
maximal number of function evaluations, a string is
evaluated with ``N`` as search space dimension
`ftarget`: `float`
target function value
`randn`: `callable`
normal random number generator, by default
`random.normalvariate`
Details: this method initializes the dynamic state variables and
creates a `CMAESParameters` instance for static parameters.
"""
# process some input parameters and set static parameters
N = len(xstart) # number of objective variables/problem dimension
self.params = CMAESParameters(N, es_winners_number, popsize)
self.maxfevals = eval(safe_str(maxfevals,
known_words={'N': N, 'popsize': self.params.lam}))
self.ftarget = ftarget # stop if fitness <= ftarget
self.randn = randn
# Modification
name ='data_out_ref/' + str(no) + '_dim_' + str(N) + '_cec_' + str(cecfn) + '_tourcand_' + str(tourcand) + '_data'
self.popsz = popsize
# initializing dynamic state variables
self.xmean = xstart[:] # initial point, distribution mean, a copy
self.sigma = sigma
self.pc = N * [0] # evolution path for C
self.ps = N * [0] # and for sigma
self.C = DecomposingPositiveMatrix(N) # covariance matrix
self.counteval = 0 # countiter should be equal to counteval / lam
self.fitvals = [] # for bookkeeping output and termination
self.best = BestSolution()
self.logger = CMAESDataLogger(filename = name) # for convenience and output
def ask(self):
"""sample lambda candidate solutions
distributed according to::
m + sigma * Normal(0,C) = m + sigma * B * D * Normal(0,I)
= m + B * D * sigma * Normal(0,I)
and return a `list` of the sampled "vectors".
"""
self.C.update_eigensystem(self.counteval,
self.params.lazy_gap_evals)
candidate_solutions = []
for k in range(self.params.lam): # repeat lam times
z = [self.sigma * eigenval**0.5 * self.randn(0, 1)
for eigenval in self.C.eigenvalues]
y = dot(self.C.eigenbasis, z)
candidate_solutions.append(plus(self.xmean, y))
return candidate_solutions
def tell(self, arx, fitvals, mi):
"""update the evolution paths and the distribution parameters m,
sigma, and C within CMA-ES.
Parameters
----------
`arx`: `list` of "row vectors"
a list of candidate solution vectors, presumably from
calling `ask`. ``arx[k][i]`` is the i-th element of
solution vector k.
`fitvals`: `list`
the corresponding objective function values, to be
minimised
"""
### bookkeeping and convenience short cuts
self.counteval += self.popsz # evaluations used within tell
N = len(self.xmean)
par = self.params
## Back to purecma code
xold = self.xmean # not a copy, xmean is assigned a new later
### Sort by fitness
self.fitvals = fitvals # used for termination and display only
self.best.update(arx[0], self.fitvals[0], self.counteval)
### recombination, compute new weighted mean value
self.xmean = dot(arx[0:par.mu], par.weights[:par.mu], transpose=True)
# = [sum(self.weights[k] * arx[k][i] for k in range(self.mu))
# for i in range(N)]
### Cumulation: update evolution paths
y = minus(self.xmean, xold)
z = dot(self.C.invsqrt, y) # == C**(-1/2) * (xnew - xold)
csn = (par.cs * (2 - par.cs) * par.mueff)**0.5 / self.sigma
for i in range(N): # update evolution path ps
self.ps[i] = (1 - par.cs) * self.ps[i] + csn * z[i]
ccn = (par.cc * (2 - par.cc) * par.mueff)**0.5 / self.sigma
# turn off rank-one accumulation when sigma increases quickly
hsig = (sum(x**2 for x in self.ps) / N # ||ps||^2 / N is 1 in expectation
/ (1-(1-par.cs)**(2*self.counteval/par.lam)) # account for initial value of ps
< 2 + 4./(N+1)) # should be smaller than 2 + ...
for i in range(N): # update evolution path pc
self.pc[i] = (1 - par.cc) * self.pc[i] + ccn * hsig * y[i]
### Adapt covariance matrix C
# minor adjustment for the variance loss from hsig
c1a = par.c1 * (1 - (1-hsig**2) * par.cc * (2-par.cc))
self.C.multiply_with(1 - c1a - par.cmu * sum(par.weights)) # C *= 1 - c1 - cmu * sum(w)
self.C.addouter(self.pc, par.c1) # C += c1 * pc * pc^T, so-called rank-one update
for k, wk in enumerate(par.weights): # so-called rank-mu update
if wk < 0: # guaranty positive definiteness
wk *= N * (self.sigma / self.C.mahalanobis_norm(minus(arx[k], xold)))**2
self.C.addouter(minus(arx[k], xold), # C += wk * cmu * dx * dx^T
wk * par.cmu / self.sigma**2)
### Adapt step-size sigma
cn, sum_square_ps = par.cs / par.damps, sum(x**2 for x in self.ps)
self.sigma *= exp(min(1, cn * (sum_square_ps / N - 1) / 2))
# self.sigma *= exp(min(1, cn * (sum_square_ps**0.5 / par.chiN - 1)))
def stop(self):
"""return satisfied termination conditions in a dictionary,
generally speaking like ``{'termination_reason':value, ...}``,
for example ``{'tolfun':1e-12}``, or the empty `dict` ``{}``.
"""
res = {}
if self.counteval <= 0:
return res
if self.counteval >= self.maxfevals:
res['maxfevals'] = self.maxfevals
if self.ftarget is not None and len(self.fitvals) > 0 \
and self.fitvals[0] <= self.ftarget:
res['ftarget'] = self.ftarget
if self.C.condition_number > 1e14:
res['condition'] = self.C.condition_number
if len(self.fitvals) > 1 \
and self.fitvals[-1] - self.fitvals[0] < 1e-12:
res['tolfun'] = 1e-12
if self.sigma * max(self.C.eigenvalues)**0.5 < 1e-11:
# remark: max(D) >= max(diag(C))**0.5
res['tolx'] = 1e-11
return res
@property
def result(self):
"""the `tuple` ``(xbest, f(xbest), evaluations_xbest, evaluations,
iterations, xmean, stds)``
"""
return (self.best.x,
self.best.f,
self.best.evals,
self.counteval,
int(self.counteval / self.params.lam),
self.xmean,
[self.sigma * C_ii**0.5 for C_ii in self.C.diag])
def disp(self, verb_modulo=1):
"""`print` some iteration info to `stdout`
"""
if verb_modulo is None:
verb_modulo = 20
if not verb_modulo:
return
iteration = self.counteval / self.params.lam
if iteration == 1 or iteration % (10 * verb_modulo) < 1:
output.write('evals: ax-ratio max(std) f-value\n')
print('evals: ax-ratio max(std) f-value')
if iteration <= 2 or iteration % verb_modulo < 1:
output.write(str(self.counteval).rjust(5) + ': ' +
' %6.1f %8.1e ' % (self.C.condition_number**0.5,
self.sigma * max(self.C.diag)**0.5) +
str(self.fitvals[0])+'\n')
print(str(self.counteval).rjust(5) + ': ' +
' %6.1f %8.1e ' % (self.C.condition_number**0.5,
self.sigma * max(self.C.diag)**0.5) +
str(self.fitvals[0]))
_stdout.flush()
# -----------------------------------------------
class CMAESDataLogger(_BaseDataLogger): # could also inherit from object
"""data logger for class `CMAES`, that can record and plot data.
Examples
========
The data may come from `fmin` or `CMAES` and the simulation may
still be running in a different Python shell.
Use the default logger from `CMAES`:
>>> try: import cma.purecma as pcma
... except ImportError: import purecma as pcma
>>> es = pcma.CMAES(3 * [0.1], 1)
>>> isinstance(es.logger, pcma.CMAESDataLogger) # type(es.logger)
True
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [pcma.ff.elli(x) for x in X])
... es.logger.add(es) # doctest: +SKIP
>>> es.logger.save()
>>> # es.logger.plot() #
Load and plot previously generated data:
>>> logger = pcma.CMAESDataLogger().load()
>>> logger.filename == "_CMAESDataLogger_datadict.py"
True
>>> # logger.plot()
TODO: the recorded data are kept in memory and keep growing, which
may well lead to performance issues for (very?) long runs. Ideally,
it should be possible to dump data to a file and clear the memory and
also to downsample data to prevent plotting of long runs to take
forever. ``"], 'key': "`` or ``"]}"`` is the place where to
prepend/append new data in the file.
"""
plotted = 0
"""plot count for all instances"""
def __init__(self, filename, verb_modulo=1):
"""`verb_modulo` controls whether and when logging takes place
for each call to the method `add`
"""
# _BaseDataLogger.__init__(self) # not necessary
self.filename = filename
self.optim = None
self.modulo = verb_modulo
self._data = {'eval': [], 'iter': [], 'stds': [], 'D': [],
'sigma': [], 'fit': [], 'xmean': [], 'more_data': []}
self.counter = 0 # number of calls of add
def add(self, es=None, force=False, more_data=None):
"""append some logging data from CMAES class instance `es`,
if ``number_of_times_called modulo verb_modulo`` equals zero
"""
es = es or self.optim
if not isinstance(es, CMAES):
raise RuntimeWarning('logged object must be a CMAES instance,'
' was %s' % type(es))
dat = self._data # a convenient alias
self.counter += 1
if force and self.counter == 1:
self.counter = 0
if (self.modulo
and (len(dat['eval']) == 0
or es.counteval != dat['eval'][-1])
and (self.counter < 4 or force
or int(self.counter) % self.modulo == 0)):
dat['eval'].append(es.counteval)
dat['iter'].append(es.counteval / es.params.lam)
dat['stds'].append([es.C[i][i]**0.5
for i in range(len(es.C))])
dat['D'].append(sorted(ev**0.5 for ev in es.C.eigenvalues))
dat['sigma'].append(es.sigma)
dat['fit'].append(es.fitvals[0] if hasattr(es, 'fitvals')
and es.fitvals
else None)
dat['xmean'].append([x for x in es.xmean])
if more_data is not None:
dat['more_data'].append(more_data)
return self
def plot(self, fig_number=322):
"""plot the stored data in figure `fig_number`.
Dependencies: `matlabplotlib.pylab`
"""
from matplotlib import pylab
from matplotlib import pyplot
from matplotlib.pylab import (
gca, figure, plot, xlabel, grid, semilogy, text, draw, show,
subplot, tight_layout, rcParamsDefault, xlim, ylim, title, savefig
)
def title_(*args, **kwargs):
kwargs.setdefault('size', rcParamsDefault['axes.labelsize'])
pylab.title(*args, **kwargs)
def subtitle(*args, **kwargs):
kwargs.setdefault('horizontalalignment', 'center')
text(0.5 * (xlim()[1] - xlim()[0]), 0.9 * ylim()[1],
*args, **kwargs)
def legend_(*args, **kwargs):
kwargs.setdefault('framealpha', 0.3)
kwargs.setdefault('fancybox', True)
kwargs.setdefault('fontsize', rcParamsDefault['font.size'] - 2)
pylab.legend(*args, **kwargs)
'''fig = figure(fig_number)
fig_title = 'Population size = ' + ps + ', '
fig_title += 'Dimension = ' + dim + ', '
if(n!=2):
'Tournament size = ' + n + ', '
else:
fig_title += 'Pairs comaparison' + ', '
fig_title +='\"Winners" number = ' + mi + ', '
fig_title += 'CEC test function number = ' + fn + ', '
fig_title += 'Termination by' + st + ', '
fig_title += 'Iterations =' + iterations + ', '
fig_title += 'Best f-value =' + es.result[1] + ', '
fig_title += 'Solution =' + es.result[0]
fig.suptitle(fig_title, fontsize=14)'''
dat = self._data # dictionary with entries as given in __init__
if not dat:
return
try: # a hack to get the presumable population size lambda
strpopsize = ' (evaluations / %s)' % str(dat['eval'][-2] -
dat['eval'][-3])
except IndexError:
strpopsize = ''
# plot fit, Delta fit, sigma
subplot(221)
gca().clear()
if dat['fit'][0] is None: # plot is fine with None, but comput-
dat['fit'][0] = dat['fit'][1] # tations need numbers
# should be reverted later, but let's be lazy
assert dat['fit'].count(None) == 0
fmin = min(dat['fit'])
imin = dat['fit'].index(fmin)
dat['fit'][imin] = max(dat['fit']) + 1
fmin2 = min(dat['fit'])
dat['fit'][imin] = fmin
semilogy(dat['iter'], [f - fmin if f - fmin > 1e-19 else None
for f in dat['fit']],
'c', linewidth=1, label='f-min(f)')
semilogy(dat['iter'], [max((fmin2 - fmin, 1e-19)) if f - fmin <= 1e-19 else None
for f in dat['fit']], 'C1*')
semilogy(dat['iter'], [abs(f) for f in dat['fit']], 'b',
label='abs(f-value)')
semilogy(dat['iter'], dat['sigma'], 'g', label='sigma')
semilogy(dat['iter'][imin], abs(fmin), 'r*', label='abs(min(f))')
if dat['more_data']:
gca().twinx()
plot(dat['iter'], dat['more_data'])
grid(True)
legend_(*[[v[i] for i in [1, 0, 2, 3]] # just a reordering
for v in gca().get_legend_handles_labels()])
# plot xmean
subplot(222)
gca().clear()
plot(dat['iter'], dat['xmean'])
for i in range(len(dat['xmean'][-1])):
text(dat['iter'][0], dat['xmean'][0][i], str(i))
text(dat['iter'][-1], dat['xmean'][-1][i], str(i))
subtitle('mean solution')
grid(True)
# plot squareroot of eigenvalues
subplot(223)
gca().clear()
semilogy(dat['iter'], dat['D'], 'm')
xlabel('iterations' + strpopsize)
title_('Axis lengths')
grid(True)
# plot stds
subplot(224)
# if len(gcf().axes) > 1:
# sca(pylab.gcf().axes[1])
# else:
# twinx()
gca().clear()
semilogy(dat['iter'], dat['stds'])
for i in range(len(dat['stds'][-1])):
text(dat['iter'][-1], dat['stds'][-1][i], str(i))
title_('Coordinate-wise STDs w/o sigma')
grid(True)
xlabel('iterations' + strpopsize)
_stdout.flush()
tight_layout()
#pyplot.tight_layout(pad=20)
#pyplot.subplots_adjust(top=0.9)
#savefig('books_read.png')
draw()
show()
CMAESDataLogger.plotted += 1
def save(self, name=None):
"""save data to file `name` or ``self.filename``"""
#with open(name or self.filename, 'w') as f:
# f.write(repr(self._data))
def load(self, name=None):
"""load data from file `name` or ``self.filename``"""
from ast import literal_eval
with open(name or self.filename, 'r') as f:
self._data = literal_eval(f.read())
return self
#_____________________________________________________________________
#_________________ Fitness (Objective) Functions _____________________
class ff(object): # instead of a submodule
"""versatile collection of test functions in static methods"""
@staticmethod # syntax available since 2.4
def elli(x):
"""ellipsoid test objective function"""
n = len(x)
aratio = 1e3
return sum(x[i]**2 * aratio**(2.*i/(n-1)) for i in range(n))
@staticmethod
def sphere(x):
"""sphere, ``sum(x**2)``, test objective function"""
return sum(x[i]**2 for i in range(len(x)))
@staticmethod
def tablet(x):
"""discus test objective function"""
return sum(xi**2 for xi in x) + (1e6-1) * x[0]**2
@staticmethod
def rosenbrock(x):
"""Rosenbrock test objective function"""
n = len(x)
if n < 2:
raise ValueError('dimension must be greater one')
return sum(100 * (x[i]**2 - x[i+1])**2 + (x[i] - 1)**2 for i
in range(n-1))
#_____________________________________________________________________
#_______________________ Helper Class&Functions ______________________
#
class BestSolution(object):
"""container to keep track of the best solution seen"""
def __init__(self, x=None, f=None, evals=None):
"""take `x`, `f`, and `evals` to initialize the best solution
"""
self.x, self.f, self.evals = x, f, evals
def update(self, x, f, evals=None):
"""update the best solution if ``f < self.f``
"""
if self.f is None or f < self.f:
self.x = x
self.f = f
self.evals = evals
return self
@property
def all(self):
"""``(x, f, evals)`` of the best seen solution"""
return self.x, self.f, self.evals
class SquareMatrix(list): # inheritance from numpy.ndarray is not recommended
"""rudimental square matrix class"""
def __init__(self, dimension):
"""initialize with identity matrix"""
for i in range(dimension):
self.append(dimension * [0])
self[i][i] = 1
def multiply_with(self, factor):
"""multiply matrix in place with `factor`"""
for row in self:
for j in range(len(row)):
row[j] *= factor
return self
def addouter(self, b, factor=1):
"""Add in place `factor` times outer product of vector `b`,
without any dimensional consistency checks.
"""
for i, row in enumerate(self):
for j in range(len(row)):
row[j] += factor * b[i] * b[j]
return self
@property
def diag(self):
"""diagonal of the matrix as a copy (save to change)
"""
return [self[i][i] for i in range(len(self)) if i < len(self[i])]
class DecomposingPositiveMatrix(SquareMatrix):
"""Symmetric matrix maintaining its own eigendecomposition.
If ``isinstance(C, DecomposingPositiveMatrix)``,
the eigendecomposion (the return value of `eig`) is stored in
the attributes `eigenbasis` and `eigenvalues` such that the i-th
eigenvector is::
[row[i] for row in C.eigenbasis] # or equivalently
[C.eigenbasis[j][i] for j in range(len(C.eigenbasis))]
with eigenvalue ``C.eigenvalues[i]`` and hence::
C = C.eigenbasis x diag(C.eigenvalues) x C.eigenbasis^T
"""
def __init__(self, dimension):
SquareMatrix.__init__(self, dimension)
self.eigenbasis = eye(dimension)
self.eigenvalues = dimension * [1]
self.condition_number = 1
self.invsqrt = eye(dimension)
self.updated_eval = 0
def update_eigensystem(self, current_eval, lazy_gap_evals):
"""Execute eigendecomposition of `self` if
``current_eval > lazy_gap_evals + last_updated_eval``.
Assumes (for sake of simplicity) that `self` is positive
definite and hence raises a `RuntimeError` otherwise.
"""
if current_eval <= self.updated_eval + lazy_gap_evals:
return self
self._enforce_symmetry() # probably not necessary with eig
self.eigenvalues, self.eigenbasis = eig(self) # O(N**3)
if min(self.eigenvalues) <= 0:
raise RuntimeError(
"The smallest eigenvalue is <= 0 after %d evaluations!"
"\neigenvectors:\n%s \neigenvalues:\n%s"
% (current_eval, str(self.eigenbasis), str(self.eigenvalues)))
self.condition_number = max(self.eigenvalues) / min(self.eigenvalues)
# now compute invsqrt(C) = C**(-1/2) = B D**(-1/2) B'
# this is O(n^3) and takes about 25% of the time of eig
for i in range(len(self)):
for j in range(i+1):
self.invsqrt[i][j] = self.invsqrt[j][i] = sum(
self.eigenbasis[i][k] * self.eigenbasis[j][k]
/ self.eigenvalues[k]**0.5 for k in range(len(self)))
self.updated_eval = current_eval
return self
def mahalanobis_norm(self, dx):
"""return ``(dx^T * C^-1 * dx)**0.5``
"""
return sum(xi**2 for xi in dot(self.invsqrt, dx))**0.5
def _enforce_symmetry(self):
for i in range(len(self)):
for j in range(i):
self[i][j] = self[j][i] = (self[i][j] + self[j][i]) / 2
return self
def eye(dimension):
"""return identity matrix as `list` of "vectors" (lists themselves)"""
m = [dimension * [0] for i in range(dimension)]
# m = N * [N * [0]] fails because it gives N times the same reference
for i in range(dimension):
m[i][i] = 1
return m
def dot(A, b, transpose=False):
""" usual dot product of "matrix" A with "vector" b.
``A[i]`` is the i-th row of A. With ``transpose=True``, A transposed
is used.
"""
if not transpose:
return [sum(A[i][j] * b[j] for j in range(len(b)))
for i in range(len(A))]
else:
return [sum(A[j][i] * b[j] for j in range(len(b)))
for i in range(len(A[0]))]
def plus(a, b):
"""add vectors, return a + b """
return [a[i] + b[i] for i in range(len(a))]
def minus(a, b):
"""subtract vectors, return a - b"""
return [a[i] - b[i] for i in range(len(a))]
def argsort(a):
"""return index list to get `a` in order, ie
``a[argsort(a)[i]] == sorted(a)[i]``
"""
return sorted(range(len(a)), key=a.__getitem__) # a.__getitem__(i) is a[i]
def safe_str(s, known_words=None):
"""return ``s`` as `str` safe to `eval` or raise an exception.
Strings in the `dict` `known_words` are replaced by their values
surrounded with a space, which the caller considers safe to evaluate
with `eval` afterwards.
Known issues:
>>> try: from cma.purecma import safe_str
... except ImportError: from purecma import safe_str
>>> safe_str('int(p)', {'int': 'int', 'p': 3.1}) # fine
' int ( 3.1 )'
>>> safe_str('int(n)', {'int': 'int', 'n': 3.1}) # unexpected
' i 3.1 t ( 3.1 )'
"""
safe_chars = ' 0123456789.,+-*()[]e'
if s != str(s):
return str(s)
if not known_words:
known_words = {}
stest = s[:] # test this string
sret = s[:] # return this string
for word in sorted(known_words.keys(), key=len, reverse=True):
stest = stest.replace(word, ' ')
sret = sret.replace(word, " %s " % known_words[word])
for c in stest:
if c not in safe_chars:
raise ValueError('"%s" is not a safe string'
' (known words are %s)' % (s, str(known_words)))
return sret
#____________________________________________________________
#____________________________________________________________
#
# C and B are arrays rather than matrices, because they are
# addressed via B[i][j], matrices can only be addressed via B[i,j]
# tred2(N, B, diagD, offdiag);
# tql2(N, diagD, offdiag, B);
# Symmetric Householder reduction to tridiagonal form, translated from
# JAMA package.
def eig(C):
"""eigendecomposition of a symmetric matrix.
Return the eigenvalues and an orthonormal basis
of the corresponding eigenvectors, ``(EVals, Basis)``, where
- ``Basis[i]``: `list`, is the i-th row of ``Basis``
- the i-th column of ``Basis``, ie ``[Basis[j][i] for j in range(len(Basis))]``
is the i-th eigenvector with eigenvalue ``EVals[i]``
Details: much slower than `numpy.linalg.eigh`.
"""
# class eig(object):
# def __call__(self, C):
# Householder transformation of a symmetric matrix V into tridiagonal
# form.
# -> n : dimension
# -> V : symmetric nxn-matrix
# <- V : orthogonal transformation matrix:
# tridiag matrix == V * V_in * V^t
# <- d : diagonal
# <- e[0..n-1] : off diagonal (elements 1..n-1)
# Symmetric tridiagonal QL algorithm, iterative
# Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3
# operations
# -> n : Dimension.
# -> d : Diagonale of tridiagonal matrix.
# -> e[1..n-1] : off-diagonal, output from Householder
# -> V : matrix output von Householder
# <- d : eigenvalues
# <- e : garbage?
# <- V : basis of eigenvectors, according to d
# tred2(N, B, diagD, offdiag); B=C on input
# tql2(N, diagD, offdiag, B);
#import numpy as np
#return np.linalg.eigh(C) # return sorted EVs
try:
num_opt = False # True doesn't work (yet)
if num_opt:
import numpy as np
except ImportError:
num_opt = False
# private void tred2 (int n, double V[][], double d[], double e[]) {
def tred2(n, V, d, e):
# This is derived from the Algol procedures tred2 by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
# num_opt = False # factor 1.5 in 30-D
d[:] = V[n-1][:] # d is output argument
if num_opt:
# V = np.asarray(V, dtype=float)
e = np.asarray(e, dtype=float)
# Householder reduction to tridiagonal form.
for i in range(n-1, 0, -1):
# Scale to avoid under/overflow.
h = 0.0
if not num_opt:
scale = 0.0
for k in range(i):
scale = scale + abs(d[k])
else:
scale = sum(np.abs(d[0:i]))
if scale == 0.0:
e[i] = d[i-1]
for j in range(i):
d[j] = V[i-1][j]
V[i][j] = 0.0
V[j][i] = 0.0
else:
# Generate Householder vector.
if not num_opt:
for k in range(i):
d[k] /= scale
h += d[k] * d[k]
else:
d[:i] /= scale
h = np.dot(d[:i], d[:i])
f = d[i-1]
g = h**0.5
if f > 0:
g = -g
e[i] = scale * g
h -= f * g
d[i-1] = f - g
if not num_opt:
for j in range(i):
e[j] = 0.0
else:
e[:i] = 0.0
# Apply similarity transformation to remaining columns.
for j in range(i):
f = d[j]
V[j][i] = f
g = e[j] + V[j][j] * f
if not num_opt:
for k in range(j+1, i):
g += V[k][j] * d[k]
e[k] += V[k][j] * f
e[j] = g
else:
e[j+1:i] += V.T[j][j+1:i] * f
e[j] = g + np.dot(V.T[j][j+1:i], d[j+1:i])
f = 0.0
if not num_opt:
for j in range(i):
e[j] /= h
f += e[j] * d[j]
else:
e[:i] /= h
f += np.dot(e[:i], d[:i])
hh = f / (h + h)
if not num_opt:
for j in range(i):
e[j] -= hh * d[j]
else:
e[:i] -= hh * d[:i]
for j in range(i):
f = d[j]
g = e[j]
if not num_opt:
for k in range(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i-1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in range(n-1):
V[n-1][i] = V[i][i]
V[i][i] = 1.0
h = d[i+1]
if h != 0.0:
if not num_opt:
for k in range(i+1):
d[k] = V[k][i+1] / h
else:
d[:i+1] = V.T[i+1][:i+1] / h
for j in range(i+1):
if not num_opt:
g = 0.0
for k in range(i+1):
g += V[k][i+1] * V[k][j]
for k in range(i+1):
V[k][j] -= g * d[k]
else:
g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])
V.T[j][:i+1] -= g * d[:i+1]
if not num_opt:
for k in range(i+1):
V[k][i+1] = 0.0
else:
V.T[i+1][:i+1] = 0.0
if not num_opt:
for j in range(n):
d[j] = V[n-1][j]
V[n-1][j] = 0.0
else:
d[:n] = V[n-1][:n]
V[n-1][:n] = 0.0
V[n-1][n-1] = 1.0
e[0] = 0.0
# Symmetric tridiagonal QL algorithm, taken from JAMA package.
# private void tql2 (int n, double d[], double e[], double V[][]) {
# needs roughly 3N^3 operations
def tql2(n, d, e, V):
# This is derived from the Algol procedures tql2, by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
# num_opt = False # True doesn't work
if not num_opt:
for i in range(1, n): # (int i = 1; i < n; i++):
e[i-1] = e[i]
else:
e[0:n-1] = e[1:n]
e[n-1] = 0.0
f = 0.0
tst1 = 0.0
eps = 2.0**-52.0
for l in range(n): # (int l = 0; l < n; l++) {
# Find small subdiagonal element
tst1 = max(tst1, abs(d[l]) + abs(e[l]))
m = l
while m < n:
if abs(e[m]) <= eps*tst1:
break
m += 1
# If m == l, d[l] is an eigenvalue,
# otherwise, iterate.
if m > l:
iiter = 0
while 1: # do {
iiter += 1 # (Could check iteration count here.)
# Compute implicit shift
g = d[l]
p = (d[l+1] - g) / (2.0 * e[l])
r = (p**2 + 1)**0.5 # hypot(p, 1.0)
if p < 0:
r = -r
d[l] = e[l] / (p + r)
d[l+1] = e[l] * (p + r)
dl1 = d[l+1]
h = g - d[l]
if not num_opt:
for i in range(l+2, n):
d[i] -= h
else:
d[l+2:n] -= h
f = f + h
# Implicit QL transformation.
p = d[m]
c = 1.0
c2 = c
c3 = c
el1 = e[l+1]
s = 0.0
s2 = 0.0
# hh = V.T[0].copy() # only with num_opt
for i in range(m-1, l-1, -1):
# (int i = m-1; i >= l; i--) {
c3 = c2
c2 = c
s2 = s
g = c * e[i]
h = c * p
r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])
e[i+1] = s * r
s = e[i] / r
c = p / r
p = c * d[i] - s * g
d[i+1] = h + s * (c * g + s * d[i])
# Accumulate transformation.
if not num_opt: # overall factor 3 in 30-D
for k in range(n): # (int k = 0; k < n; k++){
h = V[k][i+1]
V[k][i+1] = s * V[k][i] + c * h
V[k][i] = c * V[k][i] - s * h
else: # about 20% faster in 10-D
hh = V.T[i+1].copy()
# hh[:] = V.T[i+1][:]
V.T[i+1] = s * V.T[i] + c * hh
V.T[i] = c * V.T[i] - s * hh
# V.T[i] *= c
# V.T[i] -= s * hh
p = -s * s2 * c3 * el1 * e[l] / dl1
e[l] = s * p
d[l] = c * p
# Check for convergence.
if abs(e[l]) <= eps*tst1:
break
# } while (Math.abs(e[l]) > eps*tst1);
d[l] += f
e[l] = 0.0
# Sort eigenvalues and corresponding vectors.
if 11 < 3:
for i in range(n-1): # (int i = 0; i < n-1; i++) {
k = i
p = d[i]
for j in range(i+1, n): # (int j = i+1; j < n; j++) {
if d[j] < p: # NH find smallest k>i
k = j
p = d[j]
if k != i:
d[k] = d[i] # swap k and i
d[i] = p
for j in range(n): # (int j = 0; j < n; j++) {
p = V[j][i]
V[j][i] = V[j][k]
V[j][k] = p
# tql2
N = len(C[0])
V = [C[i][:] for i in range(N)]
d = N * [0]
e = N * [0]
tred2(N, V, d, e)
tql2(N, d, e, V)
return d, V # sorting of V-columns in place is non-trivial
def test():
"""test of the `purecma` module, called ``if __name__ == "__main__"``.
Currently only based on `doctest`:
>>> try: import cma.purecma as pcma
... except ImportError: import purecma as pcma
>>> import random
>>> random.seed(3)
>>> xmin, es = pcma.fmin(pcma.ff.rosenbrock, 4 * [0.5], 0.5,
... verb_disp=0, verb_log=1)
>>> print(es.counteval)
1680
>>> print(es.best.evals)
1664
>>> assert es.best.f < 1e-12
>>> random.seed(5)
>>> es = pcma.CMAES(4 * [0.5], 0.5)
>>> es.params = pcma.CMAESParameters(es.params.dimension,
... es.params.lam,
... pcma.RecombinationWeights)
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [pcma.ff.rosenbrock(x) for x in X])
>>> print("%s, %s" % (pcma.ff.rosenbrock(es.result[0]) < 1e-13,
... es.result[2] < 1600))
True, True
Large population size:
>>> random.seed(4)
>>> es = pcma.CMAES(3 * [1], 1)
>>> es.params = pcma.CMAESParameters(es.params.dimension, 300,
... pcma.RecombinationWeights)
>>> es.logger = pcma.CMAESDataLogger()
>>> try:
... es = es.optimize(pcma.ff.elli, verb_disp=0)
... except AttributeError: # OOOptimizer.optimize is not available
... while not es.stop():
... X = es.ask()
... es.tell(X, [pcma.ff.elli(x) for x in X])
>>> assert es.result[1] < 1e13
>>> print(es.result[2])
9300
"""
import doctest
print('launching doctest...')
print(doctest.testmod(report=True, verbose=0)) # module test
#_____________________________________________________________________
#_____________________________________________________________________
#
if __name__ == "__main__":
test()
# fmin(ff.rosenbrock, 10 * [0.5], 0.5)
| [
"rokwojtek@gmail.com"
] | rokwojtek@gmail.com |
ede0b947d1f6fd8b57868020e0b6a882b83d4825 | 9066c49e12738fc19fbe747384c12de86e221c67 | /lab exercise 1/question 7.py | fb9a14e0ab40a457ef802f40d55c200dfe61f8b8 | [] | no_license | Samyam412/lab_exercise | 76fe64762471e1e05f0e61f982ebd99eb31a41f0 | 65d724e4a0a2c6ce136cedcd2431f5614d7df437 | refs/heads/master | 2023-04-18T01:09:00.135539 | 2021-05-04T06:16:59 | 2021-05-04T06:16:59 | 363,842,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | """ You live 4 miles from university. The bus drives at 25mph but spends 2 minutes at each
of the 10 stops on the way. How long will the bus journey take? Alternatively, you could
run to university. You jog the first mile at 7mph; then run the next two at15mph; before
jogging the last at 7mph again. Will this be quicker or slower than the bus?"""
distance = 4
time_taken_bus = (distance / 25) + 10*2
time_taken_man =distance / (7*2 + 15*2)
if time_taken_man > time_taken_bus:
print("you can jog to the university faster")
else:
print("you can reach the university faster by the bus") | [
"Kcsamy50@gmail.com"
] | Kcsamy50@gmail.com |
f38852aedba093f8b94fb6d1b1bea8938b869023 | 4b9f84b990572a61e668d981c119a92c22362ab3 | /tempest/tempest/services/telemetry/json/alarming_client.py | ce142119b24b566d3dbb97516d5cb3e60484cd3e | [
"Apache-2.0"
] | permissive | bopopescu/openstack_tracing | 02fb245c97ab2ebb6ec0b24768d2b8cb21da700a | 2dd8c6c83096468c7b7fb937a5061f19210e5551 | refs/heads/master | 2022-11-26T11:25:19.655883 | 2019-12-03T19:48:02 | 2019-12-03T19:48:02 | 282,092,327 | 0 | 0 | null | 2020-07-24T01:21:44 | 2020-07-24T01:21:44 | null | UTF-8 | Python | false | false | 3,646 | py | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.common import service_client
class AlarmingClient(service_client.ServiceClient):
version = '2'
uri_prefix = "v2"
def deserialize(self, body):
return json.loads(body.replace("\n", ""))
def serialize(self, body):
return json.dumps(body)
def list_alarms(self, query=None):
uri = '%s/alarms' % self.uri_prefix
uri_dict = {}
if query:
uri_dict = {'q.field': query[0],
'q.op': query[1],
'q.value': query[2]}
if uri_dict:
uri += "?%s" % urllib.urlencode(uri_dict)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyList(resp, body)
def show_alarm(self, alarm_id):
uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def show_alarm_history(self, alarm_id):
uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyList(resp, body)
def delete_alarm(self, alarm_id):
uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
if body:
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def create_alarm(self, **kwargs):
uri = "%s/alarms" % self.uri_prefix
body = self.serialize(kwargs)
resp, body = self.post(uri, body)
self.expected_success(201, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def update_alarm(self, alarm_id, **kwargs):
uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
body = self.serialize(kwargs)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def show_alarm_state(self, alarm_id):
uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyData(resp, body)
def alarm_set_state(self, alarm_id, state):
uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
body = self.serialize(state)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyData(resp, body)
| [
"yves-junior.bationo@polymtl.ca"
] | yves-junior.bationo@polymtl.ca |
3b9ab5748169f40f1b677cd70a4f2f612a57773f | ab90ac26580ad1d129a96026b62ff828d9b5d678 | /ECDSA_address_from_big_int.py | bbb4c6c0c33e217a2ee38e6c46fea0652487452f | [] | no_license | OdaZei/phemex_puzzle | d5ad23af3d0656e1dcc464057555164430664962 | aead2242ed15456020b8f80be9d6a7066623f6b0 | refs/heads/master | 2020-12-20T08:33:14.527527 | 2020-03-08T14:36:18 | 2020-03-08T14:36:18 | 236,017,161 | 2 | 1 | null | 2020-01-25T23:18:19 | 2020-01-24T14:26:05 | Python | UTF-8 | Python | false | false | 2,798 | py | import ecdsa
import binascii
import base58
import hashlib
from itertools import permutations as pmt
"//////////////////////////////////////////////////////////////////////////////////"
global filepath
global kk
"//////////////////////////////////////////////////////////////////////////////////"
solution = ""
match = "1h8BNZkhsPiu6EKazP19WkGxDw3jHf9aT"
partial=27
words = {
"0":"BTC",
"1":"ETH",
"2":"XRP",
"3":"Phemex"
}
prime="957496696762772407663"
base="abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
"///////////////////////////////////////////////////////////////////////////////////////"
class GenAddressGivenInteger():
def __init__(self,i):
self.value = i
self.integer = self.correct_lenght()
self.private = self.get_PrivateKey()
self.pubKey = self.get_PublicKey()
def correct_lenght(self):
return int.to_bytes(self.value,32,"big")
def rimped160(self,x):
hash160 = hashlib.new('ripemd160')
hash160.update(x)
return hash160
def get_PrivateKey(self):
key = "00" + binascii.hexlify(self.integer).decode()
sha256_one = hashlib.sha256(binascii.unhexlify(key)).hexdigest()
sha256_two = hashlib.sha256(binascii.unhexlify(sha256_one)).hexdigest()
WIF = base58.b58encode(binascii.unhexlify(key + sha256_two[:8]))
return WIF.decode()
def get_PublicKey(self):
signed_key = ecdsa.SigningKey.from_string(self.integer,curve=ecdsa.SECP256k1)
verifying_key = signed_key.get_verifying_key()
public_key = "04" + binascii.hexlify(verifying_key.to_string()).decode()
hash160 = self.rimped160(hashlib.sha256(binascii.unhexlify(public_key)).digest()).digest()
public_addr_one = b"\x00" + hash160
checksum = hashlib.sha256(hashlib.sha256(public_addr_one).digest()).digest()[:4]
public_addr_two = base58.b58encode(public_addr_one + checksum)
return public_addr_two.decode()
def find_base(solt:str):
stt=""
for i in solt:
for j in base:
if(i==j):
stt+=str(base.find(j))
if(len(stt)==partial):
return str(stt)
"/////////////////////////////////////////////////////////////////////////////////////////////////////////////"
perm = pmt(words)
for i in perm:
for j in i:
word = words[j]
solution+=word
tt=find_base(solution)
addr_one = GenAddressGivenInteger(int(prime+tt))
addr_two = GenAddressGivenInteger(int(tt+prime))
if(addr_one.pubKey == match or addr_two.pubKey== match):
print((addr_one.private +" "+addr_two.private)*100)
print(addr_one.pubKey + " "+addr_two.pubKey)
break
solution = ""
print(addr_one.pubKey + " "+addr_two.pubKey+"\n"+"/"*80)
| [
"noreply@github.com"
] | OdaZei.noreply@github.com |
4d3a0c272d36b397c444515c4923ebd3de65bfda | 9cf72dc753a7334873a1d61a656dab33ac4703a5 | /VirusTotalAVBot/__init__.py | b8a3cb9e21e8982a2ea723b87ce48bdb153374bb | [
"MIT"
] | permissive | kenanismayilov335/VirusTotal-File-Scan-Bot | f3895eb927dc06279876a4c260fdbe3614227258 | 369e8bba10ce40cf9e9bdaeba018496e8509cd8d | refs/heads/main | 2023-07-12T10:29:57.272000 | 2021-08-21T22:00:40 | 2021-08-21T22:00:40 | 398,662,020 | 0 | 0 | MIT | 2021-08-21T21:47:21 | 2021-08-21T21:47:20 | null | UTF-8 | Python | false | false | 411 | py | import os
BOT_TOKEN = os.environ.get('BOT_TOKEN')
API_ID = int(os.environ.get('API_ID'))
API_HASH = os.environ.get('API_HASH')
BOT_USERNAME = os.environ.get('BOT_USERNAME')
BOT_OWNER = os.environ.get('BOT_OWNER')
PLUG_IN = dict(root="VirusTotalAVBot.modules")
VT_API = os.environ.get('VT_API')
GROUP_INFO_MSGS = os.environ.get('GROUP_INFO_MSGS')
MAX_FILE_SIZE = int(os.environ.get('MAX_FILE_SIZE'))
| [
"noreply@github.com"
] | kenanismayilov335.noreply@github.com |
e75d9db12b0fc5c4d1d738e16e1b86046f0f1899 | c99e0a59670c1b319d258a7bbc0c0cb84928f9c7 | /GUI/wxpython2/venv/bin/easy_install | 83934b7c9921673f90bb5507478cc27005f04bbf | [] | no_license | Daparrag/ARMV7e-M-Invasive-profile-library-for-cortex-M4-MCU | 879a67368eb07be3187ec4810325789d055b6b7f | 5dbad1ae73a725b3ec856169e36f373e76eb2506 | refs/heads/master | 2021-01-20T08:30:13.084201 | 2018-07-11T06:53:34 | 2018-07-11T06:53:34 | 90,154,033 | 0 | 0 | null | 2018-07-11T06:53:36 | 2017-05-03T13:48:13 | Python | UTF-8 | Python | false | false | 274 | #!/home/homer/PycharmProjects/wxpython2/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"daparrag"
] | daparrag | |
fcbab2128e2606c0a8b84ba21afd298b102d0db3 | d1ec1d7c7a955c033471725425d72ab7dcd7455f | /cut_rod.py | 43a0384fdc1030a431943ac135a4367ca51aa23f | [] | no_license | qz5e20/Data-Structure | c252c1f519dbebc03793eb13d8311ade67e6298e | 541f25927a6a2640c78d901a267d3ea7be1ebb91 | refs/heads/main | 2023-06-20T09:37:05.310329 | 2021-07-13T10:25:30 | 2021-07-13T10:25:30 | 385,563,491 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 10:23:42 2021
@author: user
"""
p=[0,1,5,8,9,10,17,17,20,21,23,24,26,27,27,28,30,33,36,39,40]
#p=[0,1,5,8,9,10,17,17,20,24,30]
def cut_rod_recurision_1(self,n):
if n==0:
return 0
else:
res=p[n]
for i in range(1,n):
res=max(res,cut_rod_recurision_1(p,i)+cut_rod_recurision_1(p,n-i))
return res
def c1(p,n):
return cut_rod_recurision_1(p, n)
#自顶向下
def cut_rod_recurision_2(self,n):
if n==0:
return 0
else:
res=0
for i in range(1,n+1):
res=max(res,p[i]+cut_rod_recurision_2(p,n-i))
return res
#不用递归,自底向上
def cut_rod_dp(p,n):
r=[0]
for i in range(1,n+1):
res=0
for j in range(1,i+1):
res=max(res,p[j]+r[i-j])
r.append(res)
return r[n]
#重构找切割位
def cut_rod_extend(p,n):
r=[0]
s=[0]
for i in range(1,n+1):
res_r=0
res_s=0
for j in range(1,i+1):
if p[j]+r[i-j]>res_r:
res_r=p[j]+r[i-j]
res_s=j
r.append(res_r)
s.append(res_s)
return r[n],s
def cut_rod_solution(p,n):
r,s=cut_rod_extend(p, n)
ans =[]
while n>0:
ans.append(s[n])
n-=s[n]
return ans
print(cut_rod_solution(p,10))
#print(c1(p,20))
#print(cut_rod_recurision_2(p,20))
| [
"noreply@github.com"
] | qz5e20.noreply@github.com |
4e633c267ce51f581b210db672c0ed1041e02ffd | 37ff29a9a83eafbf0f54e2ce0bf2c0255b1663a1 | /build/husky_control/catkin_generated/generate_cached_setup.py | d7af4a5285b20e1200840d1b9135e75b800eadd2 | [] | no_license | wy7727/husky | f8d9c2a05487f66efbfb58e8fc1c141efc10e177 | 7925bc34ae316639aef88fc3e6a8d36aba12620b | refs/heads/master | 2020-04-09T12:09:41.420418 | 2019-12-01T09:24:24 | 2019-12-01T09:24:24 | 160,337,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/ying/wy_ws/devel;/home/ying/px4/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/ying/wy_ws/devel/.private/husky_control/env.sh')
output_filename = '/home/ying/wy_ws/build/husky_control/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"wuying277727@gmail.com"
] | wuying277727@gmail.com |
24ff4cfbe9ebc07dc6bf91e7a7bdf56035b30726 | b5402b40b69244380bc0d3f85ff65483d0505181 | /mongodb/factory/execode.py | c462619c70d66714d3b5f4d8d1e6279769e79df5 | [
"MIT"
] | permissive | RxJellyBot/Jelly-Bot | ea7b6bd100431736732f9f4cc739858ec148e3e2 | c7da1e91783dce3a2b71b955b3a22b68db9056cf | refs/heads/master | 2023-08-29T20:41:01.813945 | 2021-10-20T05:27:21 | 2021-10-20T05:27:21 | 189,347,226 | 5 | 1 | MIT | 2020-09-05T00:50:41 | 2019-05-30T04:47:48 | Python | UTF-8 | Python | false | false | 7,976 | py | """Execode-related data controllers."""
from datetime import timedelta
from typing import Type, Optional, Tuple
from bson import ObjectId
from django.http import QueryDict # pylint: disable=wrong-import-order
from extutils.dt import now_utc_aware
from flags import Execode, ExecodeCompletionOutcome, ExecodeCollationFailedReason
from models import ExecodeEntryModel, Model
from models.exceptions import ModelConstructionError
from mongodb.utils import ExtendedCursor
from mongodb.exceptions import NoCompleteActionError, ExecodeCollationError
from mongodb.helper import ExecodeCompletor, ExecodeRequiredKeys
from mongodb.factory.results import (
EnqueueExecodeResult, CompleteExecodeResult, GetExecodeEntryResult,
OperationOutcome, GetOutcome, WriteOutcome
)
from JellyBot.systemconfig import Database
from ._base import BaseCollection
from .mixin import GenerateTokenMixin
__all__ = ("ExecodeManager",)
DB_NAME = "execode"
class _ExecodeManager(GenerateTokenMixin, BaseCollection):
token_length = ExecodeEntryModel.EXECODE_LENGTH
token_key = ExecodeEntryModel.Execode.key
database_name = DB_NAME
collection_name = "main"
model_class = ExecodeEntryModel
def build_indexes(self):
self.create_index(ExecodeEntryModel.Execode.key, name="Execode", unique=True)
self.create_index(ExecodeEntryModel.Timestamp.key,
name="Timestamp (for TTL)", expireAfterSeconds=Database.ExecodeExpirySeconds)
def enqueue_execode(self, root_uid: ObjectId, execode_type: Execode, data_cls: Type[Model] = None,
**data_kw_args) \
-> EnqueueExecodeResult:
"""
Enqueue an Execode action.
:param root_uid: user to execute the enqueued Execode
:param execode_type: type of the execode
:param data_cls: model class of the additional data class
:param data_kw_args: arguments to construct the model
:return: enqueuing result
"""
execode = self.generate_hex_token()
now = now_utc_aware(for_mongo=True)
if not data_cls and data_kw_args:
return EnqueueExecodeResult(WriteOutcome.X_NO_MODEL_CLASS)
if data_cls:
try:
data = data_cls(**data_kw_args).to_json()
except ModelConstructionError as ex:
return EnqueueExecodeResult(WriteOutcome.X_INVALID_MODEL, ex)
else:
data = {}
if execode_type == Execode.UNKNOWN:
return EnqueueExecodeResult(WriteOutcome.X_UNKNOWN_EXECODE_ACTION)
model, outcome, ex = self.insert_one_data(
CreatorOid=root_uid, Execode=execode, ActionType=execode_type, Timestamp=now, Data=data)
return EnqueueExecodeResult(
outcome, ex, model, execode, now + timedelta(seconds=Database.ExecodeExpirySeconds))
def get_queued_execodes(self, root_uid: ObjectId) -> ExtendedCursor[ExecodeEntryModel]:
"""
Get the queued Execodes of ``root_uid``.
:param root_uid: user OID to get the queued Execodes
:return: a cursor yielding queued Execodes of the user
"""
filter_ = {ExecodeEntryModel.CreatorOid.key: root_uid}
return ExtendedCursor(self.find(filter_), self.count_documents(filter_), parse_cls=ExecodeEntryModel)
def get_execode_entry(self, execode: str, action: Optional[Execode] = None) -> GetExecodeEntryResult:
"""
Get the entry of an Execode action.
Limits the result to only return the Execode with the action type ``action`` if it is not ``None``.
:param execode: code of the Execode
:param action: action of the Execode
:return: result of getting the Execode
"""
cond = {ExecodeEntryModel.Execode.key: execode}
if action:
cond[ExecodeEntryModel.ActionType.key] = action
ret: ExecodeEntryModel = self.find_one_casted(cond)
if not ret:
if self.count_documents({ExecodeEntryModel.Execode.key: execode}) > 0:
return GetExecodeEntryResult(GetOutcome.X_EXECODE_TYPE_MISMATCH)
return GetExecodeEntryResult(GetOutcome.X_NOT_FOUND_ABORTED_INSERT)
return GetExecodeEntryResult(GetOutcome.O_CACHE_DB, model=ret)
def remove_execode(self, execode: str):
"""
Delete the Execode entry.
:param execode: execode of the entry to be deleted
"""
self.delete_one({ExecodeEntryModel.Execode.key: execode})
def _attempt_complete(self, execode: str, tk_model: ExecodeEntryModel, execode_kwargs: QueryDict) \
-> Tuple[OperationOutcome, Optional[ExecodeCompletionOutcome], Optional[Exception]]:
cmpl_outcome = ExecodeCompletionOutcome.X_NOT_EXECUTED
ex = None
try:
cmpl_outcome = ExecodeCompletor.complete_execode(tk_model, execode_kwargs)
if cmpl_outcome.is_success:
outcome = OperationOutcome.O_COMPLETED
self.remove_execode(execode)
else:
outcome = OperationOutcome.X_COMPLETION_FAILED
except NoCompleteActionError as e:
outcome = OperationOutcome.X_NO_COMPLETE_ACTION
ex = e
except ExecodeCollationError as e:
if e.err_code == ExecodeCollationFailedReason.MISSING_KEY:
outcome = OperationOutcome.X_MISSING_ARGS
else:
outcome = OperationOutcome.X_COLLATION_ERROR
ex = e
except Exception as e:
outcome = OperationOutcome.X_COMPLETION_ERROR
ex = e
return outcome, cmpl_outcome, ex
def complete_execode(self, execode: str, execode_kwargs: dict, action: Optional[Execode] = None) \
-> CompleteExecodeResult:
"""
Finalize the pending Execode.
:param execode: execode of the action to be completed
:param execode_kwargs: arguments may be needed to complete the Execode action
:param action: type of the Execode action
"""
ex = None
tk_model: Optional[ExecodeEntryModel] = None
# Force type to be dict because the type of `execode_kwargs` might be django QueryDict
if isinstance(execode_kwargs, QueryDict):
execode_kwargs = execode_kwargs.dict()
if not execode:
outcome = OperationOutcome.X_EXECODE_EMPTY
return CompleteExecodeResult(outcome, None, None, set(), ExecodeCompletionOutcome.X_NOT_EXECUTED)
# Not using self.find_one_casted for catching `ModelConstructionError`
get_execode = self.get_execode_entry(execode, action)
if get_execode.success:
tk_model = get_execode.model
# Check for missing keys
if missing_keys := ExecodeRequiredKeys.get_required_keys(tk_model.action_type).difference(execode_kwargs):
return CompleteExecodeResult(OperationOutcome.X_MISSING_ARGS, None, tk_model, missing_keys,
ExecodeCompletionOutcome.X_MISSING_ARGS)
try:
outcome, cmpl_outcome, ex = self._attempt_complete(execode, tk_model, execode_kwargs)
except ModelConstructionError as e:
outcome = OperationOutcome.X_CONSTRUCTION_ERROR
cmpl_outcome = ExecodeCompletionOutcome.X_MODEL_CONSTRUCTION
ex = e
else:
cmpl_outcome = ExecodeCompletionOutcome.X_EXECODE_NOT_FOUND
if get_execode.outcome == GetOutcome.X_NOT_FOUND_ABORTED_INSERT:
outcome = OperationOutcome.X_EXECODE_NOT_FOUND
elif get_execode.outcome == GetOutcome.X_EXECODE_TYPE_MISMATCH:
outcome = OperationOutcome.X_EXECODE_TYPE_MISMATCH
else:
outcome = OperationOutcome.X_ERROR
return CompleteExecodeResult(outcome, ex, tk_model, set(), cmpl_outcome)
ExecodeManager = _ExecodeManager()
| [
"raenonx0710@gmail.com"
] | raenonx0710@gmail.com |
bc311155799542ad602305eb319bcfe862940578 | f37978530be6cf40bd7b4e5dbaf63f779114ff95 | /src/bioregistry/curation/add_descriptions_from_gs.py | 04f3759ae75bb8aaa0a3fc81e7d23e8b6bb18533 | [
"MIT",
"CC0-1.0",
"CC-PDDC",
"CC-BY-4.0"
] | permissive | biopragmatics/bioregistry | 03d983e96b65681352d0eddbe39902059d299e6d | a05af7e42f60109f01133e3072bb673423b74dd3 | refs/heads/main | 2023-08-30T21:02:44.854342 | 2023-08-30T01:10:16 | 2023-08-30T01:10:16 | 319,481,281 | 77 | 28 | MIT | 2023-09-12T08:21:24 | 2020-12-08T00:33:21 | Python | UTF-8 | Python | false | false | 940 | py | # -*- coding: utf-8 -*-
"""Add descriptions from a google curation sheet."""
import click
import pandas as pd
import bioregistry
URL = (
"https://docs.google.com/spreadsheets/d/e/2PACX-1vQVw4odnZF34f267p9WqdQOhi"
"Y9tewD-jbnATgpi5W9smbkemvbOcVZSdeboXknoWxDhPyvtcxUYiQO/pub?gid=1947246172&single=true&output=tsv"
)
@click.command()
def main():
"""Add descriptions from a google curation sheet."""
df = pd.read_csv(URL, sep="\t")
del df[df.columns[0]]
df = df[df.description.notna()]
df = df[df["prefix"].map(lambda p: bioregistry.get_description(p) is None)]
df = df[df["prefix"].map(lambda p: bioregistry.get_obofoundry_prefix(p) is None)]
click.echo(df.to_markdown())
r = dict(bioregistry.read_registry())
for prefix, description in df[["prefix", "description"]].values:
r[prefix].description = description
bioregistry.write_registry(r)
if __name__ == "__main__":
main()
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
01185e9f3d7396684bd2bb377e68a82b736fa683 | b30a71755dfd71c85a5bd9cb97d68c9ee400185b | /tests/test_get_indexes_digits.py | 2ddb04ea36ddae5a5406d5b317a835360399d901 | [] | no_license | rugeer/image_number_sequence_generator | 495804170ee18edad8a1ae6247099a359f92cd22 | 3fb758ede7fa7ab596867563f3c2359ed4fd6f4a | refs/heads/master | 2020-03-17T21:11:16.035881 | 2018-05-18T11:57:28 | 2018-05-18T11:57:28 | 133,947,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | import mnist
from script import get_indexes_digits
from collections import Counter
train_images = mnist.train_images()
train_labels = mnist.train_labels()
digits = [1, 3]
indexes = get_indexes_digits(digits)
def test_output_type():
assert isinstance(indexes, dict)
def test_keys():
"""Test whether all and only the selected digits are in the dictionary keys"""
keys_in_dict = all([str(digit) in indexes for digit in digits])
n_keys = len(indexes.keys()) == len(digits)
assert all([keys_in_dict, n_keys])
def test_correct_indexes():
"""Test whether all indexes for each key correspond to the correct label"""
correct = []
for digit in digits:
correct.append(all(train_labels[indexes[str(digit)]] == digit))
assert all(correct)
def test_correct_size():
"""Test that all indexes add up to the size of the dataset"""
unique_values = dict(Counter(train_labels.tolist()))
size = []
for digit in digits:
size.append(len(indexes[str(digit)]) == unique_values[digit])
assert all(size)
| [
"robin.hornak.14@ucl.ac.uk"
] | robin.hornak.14@ucl.ac.uk |
e1e3e0a6195e8962484a4fa4111f09eb936c7802 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_each148.py | 382a04d946964f99cf0e1924c7cf471beca627c8 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | from xcp2k.inputsection import InputSection
class _each148(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Just_energy': 'JUST_ENERGY', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Xas_scf': 'XAS_SCF', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Rot_opt': 'ROT_OPT', 'Cell_opt': 'CELL_OPT', 'Band': 'BAND', 'Ep_lin_solver': 'EP_LIN_SOLVER', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Replica_eval': 'REPLICA_EVAL', 'Bsse': 'BSSE', 'Shell_opt': 'SHELL_OPT', 'Tddft_scf': 'TDDFT_SCF'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.