content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import json
import os
import re
import shutil
import uuid
from dataclasses import dataclass, field
from typing import Callable, Optional, Set, List, Dict
import blobfile as bf
import numpy as np
import torch
from mpi4py import MPI
import summarize_from_feedback
from summarize_from_feedback.model_layout import ModelLayout
from summarize_from_feedback.models import sample_fns
from summarize_from_feedback.models.loss_functions import softmax_xent_loss_fn
from summarize_from_feedback.models.transformer import Hyperparams
from summarize_from_feedback.models.transformer import build_with_random_weights
from summarize_from_feedback.utils import exact_div, hyperparams
from summarize_from_feedback.utils import blobs
from summarize_from_feedback.utils.dist_utils import (
setup_cuda_device_and_dist,
create_data_parallel_comm,
create_within_replica_comm,
)
from summarize_from_feedback.utils.nested import map_nested
from summarize_from_feedback.utils.torch_utils import nans, to_numpy
@dataclass
@dataclass
def save_exported_model(layout, model, model_H: Hyperparams, save_dir, save_heads: Set[str]):
"""
Exporting a model allows it to be run with a different layout than it was trained with.
Currently, uploading/loading an exported model is slower than saving/restoring a checkpoint,
but if we can get exporting to be sufficiently fast, then we could replace legacy_checkpoints.py with
this "exporting" approach.
"""
if blobs.is_blob_url(save_dir):
local_dir = os.path.join("/tmp", str(uuid.uuid4()))
else:
local_dir = save_dir
os.makedirs(os.path.join(local_dir, "checkpoint"), exist_ok=True)
# Export the embeddings
if model.include_input_embeddings:
export_fine_piece(model.embedding.state_dict(), "input_embeddings")
if model.include_pos_embeddings:
export_fine_piece(model.position_embedding.state_dict(), "position_embedding")
# Export the resblocks
for resblock_idx, resblock in enumerate(model.torso.resblocks):
export_fine_piece(resblock.state_dict(), f"resblock_{resblock_idx:04d}")
# Export the final_layer_norm
if model.include_final_layer_norm:
export_fine_piece(model.ln_f.state_dict(), "final_layer_norm")
# Export the unembeddings
if model.include_output_unembeddings:
export_fine_piece({"unembedding_weights": model.unembedding_weights}, "output_unembeddings")
for head in save_heads:
export_fine_piece(model.scalar_heads[head].state_dict(), f"output_head_{head}")
if blobs.is_blob_url(save_dir):
blobs.parallel_copy_recursive(local_dir, save_dir)
shutil.rmtree(local_dir)
def parameter_name_to_sharding_dim(name: str) -> Optional[int]:
"""
:returns: None if all parameters are same on all shards, otherwise the dimension to
split upon.
"""
if name in ["embedding.weight", "position_embedding.weight", "unembedding_weights"]:
return -1
if name.startswith("torso.resblocks"):
match = re.search(r"torso\.resblocks\.\d+\.(.*)", name)
torso_part = match.group(1)
if torso_part.startswith("ln_1.") or torso_part.startswith("ln_2."):
return None
if _matches_any_prefix(
torso_part, ["attn.q_proj", "attn.k_proj", "attn.v_proj", "mlp.c_fc"]
):
return -1
if _matches_any_prefix(torso_part, ["attn.c_proj.weight", "mlp.c_proj.weight"]):
return -2
if _matches_any_prefix(torso_part, ["attn.c_proj.bias", "mlp.c_proj.bias"]):
return None
raise RuntimeError(f"Unexpected parameter name: {name}")
if name in ["ln_f.weight", "ln_f.bias"]:
return None
raise RuntimeError(f"Unexpected parameter name: {name}")
def load_exported_model(
layout: ModelLayout,
model,
model_H: Hyperparams,
load_path: str,
load_heads_map: Dict[str, str],
use_cache: bool = False,
):
"""
:param load_heads_map: maps name in model -> name to load from
"""
if use_cache and blobs.is_blob_url(load_path):
load_path = blobs.download_directory_cached(load_path)
with bf.BlobFile(os.path.join(load_path, "info.json")) as f:
info = json.load(f)
old_model_H = Hyperparams(**info["model_hparams"])
original_n_shards = old_model_H.n_shards
if "n_shards" in info:
assert info["n_shards"] == original_n_shards
assert layout.n_shards == model_H.n_shards
# print("orig n_shards", original_n_shards, "new n_shards", layout.n_shards)
if original_n_shards % layout.n_shards == 0:
n_chkpt_shards_per_rank = exact_div(original_n_shards, layout.n_shards)
shard_idx_start = n_chkpt_shards_per_rank * layout.shard_idx
load_shard_idxs = range(shard_idx_start, shard_idx_start + n_chkpt_shards_per_rank)
elif layout.n_shards % original_n_shards == 0:
n_ranks_per_chkpt_shard = exact_div(layout.n_shards, original_n_shards)
shard_idx_to_load = layout.shard_idx // n_ranks_per_chkpt_shard
shard_slice_idx = layout.shard_idx % n_ranks_per_chkpt_shard
else:
raise NotImplementedError(
f"Tried running a model that was originally created with "
f"{original_n_shards} shards with {layout.n_shards} shards. The new number "
f"of shards must evenly divide or be divisible by the original number of shards."
)
if model.include_input_embeddings:
model.embedding.load_state_dict(fetch("input_embeddings", "embedding"))
if model.include_pos_embeddings:
model.position_embedding.load_state_dict(
fetch("position_embedding", "position_embedding")
)
# fetch the resblocks
for resblock_idx, resblock in enumerate(model.torso.resblocks):
d = fetch(f"resblock_{resblock_idx:04d}", f"torso.resblocks.{resblock_idx}")
if not model_H.get("key_bias"):
d = {k: v for (k, v) in d.items() if "attn.k_proj.bias" not in k}
resblock.load_state_dict(d)
# fetch the final_layer_norm
if model.include_final_layer_norm:
model.ln_f.load_state_dict(fetch("final_layer_norm", "ln_f"))
# fetch the unembeddings
if model.include_output_unembeddings:
# Pull in the one piece
model.load_state_dict(fetch("output_unembeddings"), strict=False)
for model_head, save_head in load_heads_map.items():
model.scalar_heads[model_head].load_state_dict(
fetch(f"output_head_{save_head}", f"scalar_heads.{model_head}")
)
def _split_query_response_output_parts(x, query_length, response_padding_mask):
"""
Given an output x with shape [batch, num_responses, query_length + response_length, *rest],
returns a dictionary with it split into query/response parts with shapes
[batch, query_length + 1, *rest] and [batch, num_responses, response_length + 1, *rest]
"""
assert x.ndim >= 3
rest_shape = x.size()[3:]
d = dict()
# Add this back if it's ever actually useful
# d["query"] = torch.cat(
# [nans([x.size(0), 1, *rest_shape], dtype=x.dtype, device=x.device), x[:, 0, :query_length]],
# dim=1,
# )
if query_length > 0:
d["response"] = x[:, :, query_length - 1 :]
else:
d["response"] = torch.cat(
[
nans([x.size(0), x.size(1), 1, *rest_shape], dtype=x.dtype, device=x.device),
x[:, :, :query_length],
],
dim=2,
)
for _ in range(len(rest_shape)):
response_padding_mask = response_padding_mask.unsqueeze(-1)
# fill with NaNs in places where response had padding
d["response"].masked_fill_(
torch.cat(
[
torch.zeros(
[x.size(0), x.size(1), 1] + [1 for _ in range(len(rest_shape))],
dtype=torch.bool,
device=x.device,
),
response_padding_mask,
],
dim=2,
),
np.nan,
)
return d
PADDING_TOKEN = -1
@dataclass
class QueryResponseModel:
"""
Handles sampling, eval, and training with shared queries.
"""
def load(self, load_path, run_params=None, init_heads=(), map_heads={}, use_cache=False):
"""
Rebuilds everything, but keeps API semantics: model has same layout, and is on the same device, and all heads are the same (although some may be random init)
"""
if use_cache and blobs.is_blob_url(load_path):
load_path = blobs.download_directory_cached(load_path)
with bf.BlobFile(os.path.join(load_path, "info.json")) as f:
info = json.load(f)
self.model_hparams = Hyperparams(info["model_hparams"])
if run_params is not None:
extra_model_H = {k: v for k, v in run_params.to_json().items() if v is not None}
self.model_hparams.update(**extra_model_H)
self.encoder = summarize_from_feedback.encoder
model = build_with_random_weights(
layout=self.layout,
n_vocab=self.encoder.n_vocab,
device=self.device,
model_H=self.model_hparams,
)
self.model = self._update_model_with_head_info(model)
init_heads = set(init_heads or ())
# Load heads from where map_heads says, or the normal head name by default
load_heads_map = {
head: map_heads.get(head, head) for head in self.heads if head not in init_heads
}
load_exported_model(
self.layout,
self.model,
self.model_hparams,
load_path,
load_heads_map=load_heads_map,
use_cache=use_cache,
)
params_to_init = []
for head in init_heads:
params_to_init.append(self.model.scalar_heads[head].weight)
params_to_init.append(self.model.scalar_heads[head].bias)
self._sync_params(params_to_init, heads_to_init=init_heads)
self.barrier("load_finished")
def barrier(self, name=""):
"""
When called on all ranks, waits until all ranks are done
"""
self.in_replica_comm.barrier(name)
self.dp_comm.barrier(name)
def _eval(
self, queries, responses, eval_fn: Callable = None, eval_inputs=None, **model_call_kwargs
):
"""
Run a forward pass. Return all the head values, broadcasted within each replica. If an
eval_fn is passed, return its output across all replicas.
:return: A dict with structure:
eval_stats: structure from eval_fn
[head]: {
# disabled for now: query: [batch, query_len+1]
response: [batch, num_responses, sample_len+1]
}
"""
queries = queries.to(self.device)
responses = responses.to(self.device)
if eval_inputs is not None:
eval_inputs = map_nested(eval_inputs, lambda x: x.to(self.device))
mask, responses = _zero_padding_tokens(responses)
responses_per_query = responses.size(1)
# NOTE: could make this more efficient by sharing context work
tiled_queries = queries.unsqueeze(1).repeat(1, responses_per_query, 1)
run_tokens = torch.cat([tiled_queries, responses], dim=2).flatten(0, 1)
self.model.eval()
with torch.no_grad():
outputs = self.model(run_tokens, **model_call_kwargs)
outputs_mb = dict()
ret = dict()
for k in list(self.heads) + (["logits"] if self.logit_head else []):
reshaped = outputs[k].view(-1, responses_per_query, *outputs[k].size()[1:])
d = _split_query_response_output_parts(reshaped, queries.size(1), mask)
outputs_mb[k] = d
if k in self.heads:
ret[k] = d
if eval_fn is not None:
ret["eval_stats"] = eval_fn(outputs_mb, eval_inputs)
return ret
def _sample(
self,
context_tokens,
sample_len,
partial_responses=None,
responses_per_query=1,
sample_H=None,
**model_call_kwargs,
):
"""
:return: A dict with structure:
samples: [batch, num_responses, sample_len]
logprobs: [batch, num_responses, sample_len]
[head]: {
response: [batch, num_responses, sample_len+1]
}
"""
context_tokens = context_tokens.to(self.device)
self.model.eval()
n_batch, query_length = context_tokens.size()
assert self.logit_head, f"Cannot sample without logit_head"
# NOTE: could do this more efficiently by sharing context work
repeated_context_tokens = context_tokens.unsqueeze(1).repeat(1, responses_per_query, 1)
# Combine query and response so far into new query to be passed to _sample()
if partial_responses is not None:
partial_responses = partial_responses.to(self.device)
repeated_context_tokens = torch.cat((repeated_context_tokens, partial_responses), 2)
sample_fn = _get_sample_fn(sample_H)
flat_context_tokens = repeated_context_tokens.flatten(0, 1)
flat_n_batch, context_len = flat_context_tokens.shape
assert sample_len + context_len <= self.model_hparams["n_ctx"] + 1, (
f"Requested completion {sample_len} is too long for"
f"context {context_len} and model context_len {self.model_hparams.n_ctx}"
)
results = sample(
self.model,
flat_context_tokens,
sample_len=sample_len,
sample_fn=sample_fn,
model_output_keys=self.heads,
**model_call_kwargs,
)
samples = results["tokens"]
logprobs = results["logprobs"]
assert samples.size(-2) == n_batch * responses_per_query
assert logprobs.size(-2) == n_batch * responses_per_query
assert samples.size(-1) == sample_len, f"{samples.size()} vs {sample_len}"
assert logprobs.size(-1) == sample_len, f"{logprobs.size()} vs {sample_len}"
samples = samples.view(n_batch, responses_per_query, sample_len)
logprobs = logprobs.view(n_batch, responses_per_query, sample_len)
output = dict(contexts=context_tokens, samples=samples, logprobs=logprobs)
mask, _ = _zero_padding_tokens(output["samples"])
# NOTE: sample doesn't return eval'ed values on final token
mask = mask[:, :, :-1]
for k in self.heads:
reshaped = results[k].view(n_batch, responses_per_query, *results[k].shape[1:])
output[k] = _split_query_response_output_parts(reshaped, query_length, mask)
return output
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
4423,
346,
198,
11748,
334,
27112,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
4889,
540,
11,
32233,
11,
5345,
11,
7343,
11,
360,
... | 2.256973 | 6,561 |
print("123","10")
| [
201,
198,
4798,
7203,
10163,
2430,
940,
4943,
201,
198
] | 2.1 | 10 |
import argparse
import os
import os.path as osp
import pickle
import re
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from modelindex.load_model_index import load
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
console = Console()
MMCLS_ROOT = Path(__file__).absolute().parents[2]
METRICS_MAP = {
'Top 1 Accuracy': 'accuracy_top-1',
'Top 5 Accuracy': 'accuracy_top-5'
}
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
2298,
293,
198,
11748,
302,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
13... | 3.023392 | 171 |
#!/usr/bin/python
#"fserve" by Karim Sultan, September 2020
# fsend is a server (receiver) for the FBomb Protocol
# It communicates with a client (fsend) to transfer a file.
# This protocol makes it easy to drop f-bombs across machines.
#
# This design handles a single connection at a time but accepts
# backlogs. It is meant for individual use, not to serve
# concurrent users.
#
# FBOMB Particulars:
# -> Connect to server
# Client Server
# HI <user:pwd>
# OK <SID> | NOK <Message>
# FSEND <META>
# OK <Proceed> | NOK <Message>
# <DATA>
# OK <Received> | NOK <Message>
# BYE <CID> [CLOSE]
# [CLOSE]
#
# Messages are request / response based.
# Format is:
# [OPCODE] [TOKEN] " | " (optional message separator) <message> (optional message)
# Except for data which is sent in serial chunks.
# See FBomb documentation for details.
# NOTE: Avoid using "print" when possible. Instead, use one of the
# following. They apply formatting, trap the verbose flag, and log data:
#
# pip(msg) for an important success message (green on green)
# pip(msg, alert=True) for important warning messages (red on red)
# note(msg) for loggable verbose mode only messages
# notex(msg) for loggable all times message
import re
import types
import errno
import datetime
import ntpath
import socket
import select
import signal
import os
import hashlib
import getopt
from gamzia.timer import Timer
from gamzia.colours import Colours as C
from gamzia.filedescriptor import *
from gamzia.accountmanager import AccountManager
import sys
argv=sys.argv
argc=len(argv)
# App Info Constants
APP_NAME = "fserve"
APP_VERSION = 1.0
APP_AUTHOR = "Karim Sultan"
APP_DATE = "September 2020"
APP_EMAIL = "karimsultan@hotmail.com"
APP_BLURB = "Server program for file transfer using FBOMB protocol.\n" \
"Non-threaded version; blocking, uses connection queueing."
APP_SYNTAX = "Syntax: fserve [options] <inbound directory>"
# Settings defaults
DEF_ENC = "utf-8" # Default text encoding type
DEF_HOST = "localhost" # The server's hostname or IP address
DEF_PORT = 33333 # The port used by the server
DEF_OVERWRITE = False # Abort if file already exists in inbound dir.
DEF_ALLOWSUBS = False # Abort if inbound filename includes a subdir
DEF_MAXDEPTH = 3 # If allow sub dirs, max hierarchy depth permitted
DEF_VERBOSE = False # T/F for extra detail
DEF_LOGGING = False # Logs output to file
DEF_LOGFILE = "fbomb.log" # Default log file
DEF_HOME = "pub" # Default home directory for inbound files
DEF_ACCOUNTDB = "fb_accounts.db" # Default accounts database
DEF_AUTH = True # Require login?
# Global
FLAG_KILL = False
FLAG_LOGOPEN = False
logfilehandle = None
#Enums
#*************************************************************************
# The configuration class houses parameter and initialization data
# which configures the client.
# NOTE: Private variables must be prefixed with "_". ToDictionary() relies
# on this.
# Uses reflection to create a dictionary of public atributes
# Skips any methods or functions or internals.
#*************************************************************************
# Traps control C for a smooth exit
# Outputs a message to a log file.
# Strips the ANSI colour codes out of the string to stop log clutter.
# Caches file handle so that it is only opened once, and closed on exit.
# Applies a header to new logging session (captured to same logfile).
# Format of a log is:
# [time since program start] message
# Outputs a message for a serious error, and terminates program
# Use this for fatal errors only!
# "Pips up" to let you know something minor happened, doesn't impact
# program flow. This method is intended for non-fatal errors.
# Outputs a message to screen only if in verbose mode OR if show==true
# IE: note() -> only shown if verbose mode enabled
# Just outputs a message regardless of verboseness
# IE: notex() -> always shown
# Adds a user and password to database.
# Note that password is salted with username.
# The default action is "add", but "delete", "list" and "update" are
# also supported.
# KAS 210302 Fixed a password creation bug in add user
# Receives inbound instructions as a string
# Transmits outbound data as bytes
# Handles ASCII files an os dependent line terminators.
# Computes and compares hash values.
# Receives binary data
# OS indepedently strips path returning filename
# Extracts a user and password from string if present
# Returns boolean, string, string.
# NOTE: This method does not use a good pattern. It can
# be decomposed and refactored into a nicer approach.
# BUT it was implemented like this during agile development
# and works so was kept.
# TODO: Refactor / Decompose and clean
# Handles the protocol
# TODO: Convert from linear to state machine
# From:
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta/9532586
# There was a problem with ERROR_INVALID_FILE not being defined under NT (but fine in Linux) so
# this was replaced with the value 123 to ensure NT portability.
def isSafeFilename(pathname: str) -> bool:
'''
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
'''
ERROR_INVALID_NAME=123
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname)
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
# Only the following exceptions indicate invalid pathnames:
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
# NOTE: ERROR_INVALID_NAME=123 and the constant isn't portable.
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid.
else:
return True
# Shows the info, blurb, syntax and options screen.
# Parses options, switches and args. Does validation. Populates the config
# structure which contains the info need to do the file transfer.
# Main function sets up server socket, and handles connections until CTRL-C or error.
# 210302 Cross-platform portability issue detected. Socket.accept() is blocking by default.
# However, on linux it will accept a CTRL-C and abort. In Windows 10, it does not
# interrupt the blocking call, and instead buffers the CTRL-C to be processed after the
# accept() call returns! This makes it hard to exit the server on Windows. Python
# does allow you to set sockets to non-blocking, but I wanted blocking reads and writes.
# So the solution was to use select.select() (which is blocking as well, BUT can accept
# a timeout value).
# Run program
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
1,
69,
2655,
303,
1,
416,
9375,
320,
37399,
11,
2693,
12131,
198,
2,
277,
21280,
318,
257,
4382,
357,
260,
39729,
8,
329,
262,
13186,
2381,
20497,
198,
2,
632,
48556,
351,
257,
... | 2.89114 | 3,307 |
#!/usr/bin/python
import sys
import os
import logging
log = logging.getLogger( __name__ )
logging.basicConfig()
import hdbfs
import higu.config
MAX_TEXT_LEN = 2**18
if( __name__ == '__main__' ):
import optparse
parser = optparse.OptionParser( usage = 'Usage: %prog [options] files...' )
parser.add_option( '-c', '--config',
dest = 'config',
help = 'Configuration File' )
parser.add_option( '-p', '--pretend',
dest = 'pretend', action = 'store_true', default = False,
help = 'Pretend, don\'t actually do anything' )
parser.add_option( '-r', '--recovery',
dest = 'recovery', action = 'store_true', default = False,
help = 'Recovery mode' )
parser.add_option( '-a', '--album',
dest = 'album',
help = 'Create album and add files to album' )
parser.add_option( '-x', '--text',
dest = 'text_data',
help = 'Add text description to album (txt file)' )
parser.add_option( '-t', '--tags',
dest = 'taglist',
help = 'List of tags (\',\' separated) to apply' )
parser.add_option( '-T', '--newtags',
dest = 'taglist_new',
help = 'Same as -t, but creates tags if they don\'t exist' )
parser.add_option( '-n', '--name-policy',
dest = 'name_policy',
help = 'Policy for persisting names ("noreg", "noset", "setundef", "setall")' )
opts, files = parser.parse_args()
if( len( files ) < 1 ):
parser.print_help()
sys.exit( 0 )
if( opts.config is not None ):
cfg = higu.config.init( opts.config )
hdbfs.init( cfg.get_path( 'library' ) )
else:
hdbfs.init()
h = hdbfs.Database()
h.enable_write_access()
if( opts.recovery ):
h.recover_files( files )
sys.exit( 0 )
tags = opts.taglist.split( ',' ) if( opts.taglist is not None ) else []
tags_new = opts.taglist_new.split( ',' ) if( opts.taglist_new is not None ) else []
create_album = opts.album is not None
album_name = opts.album if( opts.album != '-' ) else None
if( create_album and opts.text_data is not None ):
textfile = open( opts.text_data, 'r' )
text_data = unicode( textfile.read( MAX_TEXT_LEN ), 'utf-8' )
assert textfile.read( 1 ) == '', 'Text file too long'
else:
text_data = None
name_policy = hdbfs.NAME_POLICY_SET_IF_UNDEF
if( opts.name_policy == "noreg" ):
name_policy = hdbfs.NAME_POLICY_DONT_REGISTER
elif( opts.name_policy == "noset" ):
name_policy = hdbfs.NAME_POLICY_DONT_SET
elif( opts.name_policy == "setundef" ):
name_policy = hdbfs.NAME_POLICY_SET_IF_UNDEF
elif( opts.name_policy == "setall" ):
name_policy = hdbfs.NAME_POLICY_SET_ALWAYS
h.batch_add_files( files, tags, tags_new, name_policy,
create_album, album_name, text_data )
# vim:sts=4:et:sw=4
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
11748,
18931,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
11593,
3672,
834,
1267,
198,
6404,
2667,
13,
35487,
16934,
3419,
198,
198,
11748,
... | 2.247295 | 1,294 |
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import hashlib
import hmac
import os
import sys
import unittest
import datahog
from datahog import error
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
if __name__ == '__main__':
unittest.main()
| [
2,
43907,
25,
2393,
12685,
7656,
28,
40477,
23,
25,
316,
25,
2032,
28,
19,
25,
912,
28,
23,
25,
6448,
28,
19,
198,
198,
11748,
12234,
8019,
198,
11748,
289,
20285,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
555,
715,
395,
1... | 2.516129 | 124 |
#
#
# Jesse Livezey 2014-04-19
#
import numpy as np
#Initialize settings for inference
def infer(basis, stimuli, eta, lamb, nIter, adapt, coeffs=None, softThresh=0):
"""Infers sparse coefficients for dictionary elements when representing a stimulus using LCA algorithm.
Args:
basis: Dictionary used to represent stimuli. Should be arranged along rows.
coeffs: Values to start pre-threshold dictionary coefficients at for all stimuli.
stimuli: Goals for dictionary representation. Should be arranged along rows.
eta: Controls rate of inference. Equals to 1/tau in 2018 Olshausen paper.
thresh: Threshold used in calculation of output variable of model neuron.
lamb: Minimum value for thresh.
nIter: Numer of times to run inference loop.
softThresh: Boolean choice of threshold type.
adapt: Amount to change thresh by per run.
Results:
a: Post-threshold dictionary coefficients.
u: Pre-threshold internal *voltage.*
thresh: Final value of thresh variable.
Raises:
"""
numDict = basis.shape[0] # number of elements in dictionary
numStim = stimuli.shape[0] # number of stimuli
dataSize = basis.shape[1] # size of a dictionary element
#Initialize u and a
u = np.zeros((numStim, numDict))
# Don't understand what this does yet
if coeffs is not None:
u[:] = np.atleast_2d(coeffs)
a = np.zeros_like(u)
ci = np.zeros((numStim, numDict))
# Calculate G: overlap of basis functions with each other minus identity
# Row-wise correlation matrix - identity matrix to eliminate self correlation
G = basis.dot(basis.T) - np.eye(numDict)
#b[i,j] is the overlap from stimuli:i and basis:j
b = stimuli.dot(basis.T)
thresh = np.absolute(b).mean(1)
#Update u[i] and a[i] for nIter time steps
for kk in range(nIter):
#Calculate ci: amount other neurons are stimulated times overlap with rest of basis
ci[:] = a.dot(G)
# Update u using Rozell et al. (2008) eqn.
u[:] = eta*(b-ci)+(1-eta)*u
if softThresh == 1:
# shrinkage with thresh
a[:] = np.sign(u)*np.maximum(0.,np.absolute(u)-thresh[:,np.newaxis])
else: # hard thresh
a[:] = u
# Converts 'thresh' from 1D vector to 2D where each element is a row value.
# Compares every element of a row of 'a' with the element of the same row in 'thresh'.
# Hard threshold
a[np.absolute(a) < thresh[:,np.newaxis]] = 0.
# Multiply threshold values bigger than 'lamb' with 'adapt' to change thresh per run
thresh[thresh>lamb] = adapt*thresh[thresh>lamb]
# Soft thresholding - asude
# a[:] = np.sign(u) * np.maximum(0., np.absolute(u) - lamb)
return (a,u)
| [
2,
198,
2,
198,
2,
18033,
7547,
2736,
88,
1946,
12,
3023,
12,
1129,
198,
2,
628,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
24243,
1096,
6460,
329,
32278,
198,
4299,
13249,
7,
12093,
271,
11,
25973,
11,
2123,
64,
11,
19343,
... | 2.327957 | 1,302 |
"""
OpenVINO DL Workbench
Class for ORM model described a Environment
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from typing import List, Dict
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.orm import Session
from config.constants import ENVIRONMENTS_FOLDER
from wb.main.models.base_model import BaseModel
| [
37811,
198,
4946,
53,
46016,
23641,
5521,
26968,
198,
5016,
329,
6375,
44,
2746,
3417,
257,
9344,
628,
15069,
357,
66,
8,
33448,
8180,
10501,
628,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
19... | 3.973333 | 225 |
import os
import glob
import subprocess
import re
if __name__ == "__main__":
glob_remove("_test/*_mifs.*")
for file in glob.glob("_test/*"):
glob_remove("_test/*_mifs.*")
output = run(f'py mifs.py "{file}"')
filename, extension = os.path.splitext(os.path.basename(file))
output_file = glob.glob(f'_test/*_mifs.*')[0]
size_input = os.stat(file).st_size
size_output = os.stat(output_file).st_size
print( f'{file} {size_input}=>{size_output} ({(1 if size_input < size_output else -1)*round((size_input/size_output)*100, 2)}%)' )
for key, value in re.findall(r"(?m)^(.*?)\: ([^\s]+)", output):
print(f' {key}: {value}') | [
11748,
28686,
198,
11748,
15095,
198,
11748,
850,
14681,
198,
11748,
302,
198,
197,
197,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
197,
4743,
672,
62,
28956,
7203,
62,
9288,
15211,
62,
76,
361,
82,
15885,
4943... | 2.301075 | 279 |
"""Firebase token authentication classes"""
from typing import *
from django.contrib import auth
from django.utils import timezone
from rest_framework import authentication, exceptions
import firebase_admin
from firebase_admin import auth as firebase_auth
from .settings import api_settings
from . import models
FIREBASE_APP_NAME = 'drf_firebase_token_auth'
_User = auth.get_user_model()
class FirebaseTokenAuthentication(authentication.TokenAuthentication):
"""Firebase token authentication class"""
keyword = api_settings.AUTH_HEADER_TOKEN_KEYWORD
@staticmethod
def _extract_email_from_firebase_user(
firebase_user: firebase_auth.UserRecord,
ignore_unverified_email=api_settings.IGNORE_FIREBASE_UNVERIFIED_EMAIL,
) -> Union[str, None]:
"""Extract user email from a Firebase user.
Args:
firebase_user: A Firebase user.
ignore_unverified_email: Is a verified email required.
Returns:
User's email address or None if not found.
"""
if ignore_unverified_email:
if firebase_user.email_verified and firebase_user.email:
return firebase_user.email
else:
return None
# Make best effort to extract an email address.
emails = [firebase_user.email] if firebase_user else []
emails += [data.email for data in firebase_user.provider_data if data.email]
return emails[0] if emails else None
def authenticate_firebase_user(self,
token: str) -> firebase_auth.UserRecord:
"""Authenticate a Firebase user using a given token
Args:
token: A Firebase token.
Returns:
A firebase user
"""
try:
decoded_token = firebase_auth.verify_id_token(
token,
app=self._firebase_app,
check_revoked=api_settings.VERIFY_FIREBASE_TOKEN_NOT_REVOKED
)
except ValueError:
raise exceptions.AuthenticationFailed(
'JWT was found to be invalid, or the App’s project ID cannot '
'be determined.'
)
except (firebase_auth.InvalidIdTokenError,
firebase_auth.ExpiredIdTokenError,
firebase_auth.RevokedIdTokenError,
firebase_auth.CertificateFetchError) as exc:
if exc.code == 'ID_TOKEN_REVOKED':
raise exceptions.AuthenticationFailed(
'Token revoked, inform the user to reauthenticate or '
'signOut().'
)
else:
raise exceptions.AuthenticationFailed(
'Token is invalid.'
)
return firebase_auth.get_user(decoded_token['uid'],
app=self._firebase_app)
def get_local_user(self, firebase_user: firebase_auth.UserRecord) -> _User:
"""Get a local user from a Firebase user.
Args:
firebase_user: A Firebase user.
Returns:
A local user model object.
Raises:
User.DoesNotExist: Could not find a local user matching to the
given Firebase user.
"""
# Try getting from a local firebase user.
try:
return models.FirebaseUser.objects.select_related('user').\
get(uid=firebase_user.uid).user
except models.FirebaseUser.DoesNotExist:
pass
# Try getting user by email.
email = self._extract_email_from_firebase_user(firebase_user)
if email:
try:
return _User.objects.get(**{_User.EMAIL_FIELD: email})
except _User.DoesNotExist:
pass
# Try getting user by uid, and let User.DoesNotExist raise if not found.
return _User.objects.get(**{_User.USERNAME_FIELD: firebase_user.uid})
def create_local_user(self,
firebase_user: firebase_auth.UserRecord) -> _User:
"""Create a local user for a given Firebase user
Args:
firebase_user: A Firebase user.
Returns:
The created local user model object.
"""
email = self._extract_email_from_firebase_user(firebase_user)
username = email if email else firebase_user.uid
user = _User.objects.create_user(**{_User.USERNAME_FIELD: username})
if email:
user.email = email
if firebase_user.display_name:
words = firebase_user.display_name.split(' ')
user.first_name = ' '.join(words[:-1])
user.last_name = words[-1]
user.save()
return user
@staticmethod
def get_or_create_local_firebase_user(
firebase_user: firebase_auth.UserRecord,
local_user
) -> models.FirebaseUser:
"""Get or create a local firebase user.
Args:
firebase_user: A Firebase user.
local_user: User model object.
Returns:
The created local Firebase user.
"""
local_firebase_user, created = \
models.FirebaseUser.objects.get_or_create(
uid=firebase_user.uid,
defaults={'user': local_user}
)
return local_firebase_user
def authenticate_credentials(self, token: str) -> Tuple[_User, None]:
"""Authenticate the token against Firebase
Args:
token: Firebase authentication token.
Returns:
The local user matching the Firebase authenticated user.
"""
# Authenticate the Firebase token.
firebase_user = self.authenticate_firebase_user(token)
# Get or create local user that matches the Firebase user.
try:
local_user = self.get_local_user(firebase_user)
except _User.DoesNotExist:
if api_settings.SHOULD_CREATE_LOCAL_USER:
local_user = self.create_local_user(firebase_user)
else:
raise exceptions.AuthenticationFailed(
'User is not registered to the application.'
)
# Update user last login.
local_user.last_login = timezone.now()
local_user.save()
# Get or create a local Firebase user.
self.get_or_create_local_firebase_user(firebase_user=firebase_user,
local_user=local_user)
return local_user, None
| [
37811,
13543,
8692,
11241,
18239,
6097,
37811,
198,
198,
6738,
19720,
1330,
1635,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6284,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
1334,
62,
30604,
1330,
18239,
11,
13269... | 2.148922 | 3,062 |
from pathlib import Path
from astropy.io import fits
import numpy as np
from PIL import Image
def load_hdf(file_name):
'''load HDF image
Parameters
----------
full file name of HDF5 file
'''
hdf = h5py.File(path,'r')['entry']['data']['data'].value
tmp = []
for iScan in hdf:
tmp.append(iScan)
return tmp
def load_fits(file_name):
'''load fits image
Parameters
----------
full file name of fits image
'''
tmp = []
try:
tmp = fits.open(file_name,ignore_missing_end=True)[0].data
if len(tmp.shape) == 3:
tmp = tmp.reshape(tmp.shape[1:])
return tmp
except OSError:
raise OSError("Unable to read the FITS file provided!")
def load_tiff(file_name):
'''load tiff image
Parameters:
-----------
full file name of tiff image
'''
try:
_image = Image.open(file_name)
metadata = dict(_image.tag_v2)
data = np.asarray(_image)
_image.close()
return [data, metadata]
except:
raise OSError("Unable to read the TIFF file provided!") | [
6738,
3108,
8019,
1330,
10644,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
4299,
3440,
62,
71,
7568,
7,
7753,
62,
3672,
2599,
198,
220,
220,
220,
705,
... | 2.187023 | 524 |
import warnings
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import KNNImputer, SimpleImputer
from sklearn.feature_selection import RFECV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
confusion_matrix,
classification_report,
make_scorer,
accuracy_score
)
def cost_score(y, y_pred, fp_cost=25, fn_cost=125):
'''
'''
# get the misclassifications
misclass_idx = np.where(np.equal(y, y_pred) == False)[0]
# get the false positives
fp_idx = np.where(y_pred[misclass_idx] == 1)[0]
# get the false negatives
fn_idx = np.where(y_pred[misclass_idx] == 0)[0]
# calc the misclassification cost
misclassification_cost = fp_idx.size * fp_cost + fn_idx.size * fn_cost
return misclassification_cost
warnings.filterwarnings('ignore')
# pd.options.display.max_columns = 100
random_state = 42
random_generator = np.random.RandomState(random_state)
cost_scorer = make_scorer(cost_score, greater_is_better=False)
data = pd.read_csv('../final_project.csv')
y = data['y']
X = data.drop(['y'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random_state, stratify=y)
print('X_train: ', X_train.shape,
'\ny_train: ', y_train.shape,
'\nX_test: ', X_test.shape,
'\ny_test: ', y_test.shape)
# fix spelling error
X_test['x24'] = X_test['x24'].str.replace('euorpe', 'europe')
# remove %
X_test['x32'] = pd.to_numeric(X_test['x32'].str.replace('%', ''))
# remove $
X_test['x37'] = pd.to_numeric(X_test['x37'].str.replace('$', ''))
# repeat process for training set
X_train['x24'] = X_train['x24'].str.replace('euorpe', 'europe')
X_train['x32'] = pd.to_numeric(X_train['x32'].str.replace('%', ''))
X_train['x37'] = pd.to_numeric(X_train['x37'].str.replace('$', ''))
# remake objects
objects = X_train.select_dtypes(['O'])
objects_test = X_test.select_dtypes(['O'])
# imputing with mode from training data
X_train['x24'].fillna('asia', inplace=True)
X_train['x29'].fillna('July', inplace=True)
X_train['x30'].fillna('wednesday', inplace=True)
X_test['x24'].fillna('asia', inplace=True)
X_test['x29'].fillna('July', inplace=True)
X_test['x30'].fillna('wednesday', inplace=True)
names = [i for i in list(objects.columns)]
le = LabelEncoder()
for i in names:
le.fit(objects[i].astype(str))
X_train[i] = le.transform(X_train[i])
X_test[i] = le.transform(X_test[i])
KNNimp = KNNImputer(n_neighbors=3)
X_train = KNNimp.fit_transform(X_train)
X_test = KNNimp.transform(X_test)
# define the estimator
logistic = LogisticRegression()
# provide the parameters of the feature selection process
feature_selector = RFECV(logistic,
step = 1,
min_features_to_select= 1,
cv = 5,
n_jobs = -1)
feature_selector = feature_selector.fit(X_train, y_train)
X_train = feature_selector.transform(X_train)
X_test = feature_selector.transform(X_test)
print('X_train shape: ', X_train.shape,
'\nX_test shape: ', X_test.shape)
xgb_params = {
'n_estimators': np.arange(100, 500, 10, dtype='int'),
'learning_rate': np.linspace(0.01, 1, num=1000, dtype='float'),
'gamma':np.geomspace(0.001, 10, num=1000, dtype='float'),
'max_depth':[d for d in range(1, 11)],
'subsample':np.linspace(0.1, 1, num=100, dtype='float'),
'colsample_bytree':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bylevel':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bynode':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'lambda': np.geomspace(0.001, 10, num=100, dtype='float'),
'alpha': np.geomspace(0.001, 10, num=100, dtype='float')
}
xgb = XGBClassifier(booster='gbtree',
early_stopping_rounds=10,
random_state=random_state)
xgb_search = RandomizedSearchCV(xgb,
xgb_params,
random_state=random_state,
scoring=cost_scorer,
n_iter=100,
cv=5,
verbose=0,
n_jobs=-1)
xgb_search.fit(X_train, y_train)
y_pred = xgb_search.best_estimator_.predict(X_train)
print('\n\n\nTraining Performance')
print('Best model Score:', -xgb_search.best_score_) # negate since 'greater_is_better=False'
print('Best model Accuracy:', accuracy_score(y_train, y_pred) )
y_pred = xgb_search.best_estimator_.predict(X_test)
test_cost = cost_score(y_test, y_pred)
test_acc = accuracy_score(y_test, y_pred)
print('\n\n\nTest Performance')
print('Best Model Test Cost', test_cost)
print('Best Model Test Accuracy', test_acc)
print('\n\n\nBest Parameters')
print(xgb_search.best_params_)
| [
198,
11748,
14601,
198,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
11748,
384,
397,
1211,... | 2.23266 | 2,278 |
import interface
import arquivo
import classes
from time import sleep
#Ler os Arquivos txt.
arquivo.carregarArqEst()
arquivo.carregarArqCid()
#Funcoes
#Menu
while True:
resposta = interface.menu(['Finalizar o Programa', 'Cadastrar Estados', 'Cadastrar Cidades', 'Relatório de Estados', 'Relatório de Cidades', 'Atualizar números de casos'])
if resposta == 1:
interface.cabecalho('Saindo do Sistema.....Até logo e lave as mãos!')
break
elif resposta == 2:
cadastroDeEstado()
elif resposta == 3:
cadastroDeCidade()
elif resposta == 4:
relatorioEstado()
elif resposta == 5:
relatorioCidade()
elif resposta == 6:
atualizacaoCasos()
else:
print('\033[31mOpção inválida. Digite novamente!\033[m')
sleep(2)
| [
11748,
7071,
201,
198,
11748,
610,
421,
23593,
201,
198,
11748,
6097,
201,
198,
6738,
640,
1330,
3993,
201,
198,
220,
201,
198,
2,
43,
263,
28686,
943,
421,
452,
418,
256,
742,
13,
201,
198,
283,
421,
23593,
13,
7718,
2301,
283,
3... | 1.98627 | 437 |
# A3.b1
import numpy as np
import matplotlib.pyplot as plt
n = 30
# np.random.seed(1)
x = np.random.uniform(0,1,n)
x_mean = np.mean(x)
x_sd = np.std(x)
# x = (x-x_mean) # x after standardization
y = 4*np.sin(np.pi*x)*np.cos(6*np.pi*(x**2)) + np.random.standard_normal(n)
# y = (y - x_mean) / x_sd
error_validation_list = []
lamb = 500
lamb_list = []
d_list = []
for lamb in list(500 * (1/2)**(np.arange(0,20))):
for d in list(range(0, 51)):
error_validation = 0
print("Lam: ", lamb, ", d: ", d)
for i in range(n):
x_train = np.append(x[0:i], x[i+1:n])
y_train = np.append(y[0:i], y[i+1:n])
x_validation = x[i]
y_validation = y[i]
K = k_poly(x_train[:, np.newaxis], x_train[:, np.newaxis], d)
alpha = np.linalg.pinv(K + lamb) @ y_train
# in predicted y formula
k_xi_x = (1 + x_validation * x_train[np.newaxis, :]) ** d # use this when polynomial kernel
# k_xi_x = np.exp(-gamma*np.linalg.norm(x_validation - x_train[np.newaxis, :], 2))
y_predicted = alpha @ k_xi_x.T
error_validation += (y_predicted - y_validation).T @ (y_predicted- y_validation)
# error_validation = error_validation[0][0]
error_validation /= n
print("error_validation: ", error_validation)
error_validation_list.append(error_validation)
lamb_list.append(lamb)
d_list.append(d)
min_error = min(error_validation_list)
index_boostrap_sample_min_error = error_validation_list.index(min(error_validation_list))
lamb_best_poly = lamb_list[index_boostrap_sample_min_error]
d_best = d_list[index_boostrap_sample_min_error]
print("Best lamb: ", lamb_best_poly, ", Best d: ", d_best)
# lamb_best_poly = 0.48828125
d_best = 30
# plots the comparaison
# np.random.seed(1)
x_fine = np.array(list(np.arange(min(x),max(x), 0.01)) )
n = len(x_fine)
y_fine_true = 4*np.sin(np.pi*x_fine)*np.cos(6*np.pi*(x_fine**2))
y_fine_grid = y_fine_true + np.random.standard_normal(n)
f_poly_predicted = []
for xi in x_fine:
K = k_poly(x_fine[:, np.newaxis], x_fine[:, np.newaxis], d_best)
alpha = np.linalg.pinv(K + lamb_best_poly) @ y_fine_grid
k_xi_x = (1 + xi * x_fine[np.newaxis, :]) ** d_best # use this when polynomial kernel
y_predicted = alpha @ k_xi_x.T
f_poly_predicted.append(y_predicted)
plt.plot(x_fine, y_fine_true, label='True')
plt.plot(x_fine, f_poly_predicted, label='Poly Kernel')
plt.plot(x, y,'bo', label='Observed')
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3b_1_test.png")
plt.show()
# A3.c1
B = 300
n = 30
n_fine = len(x_fine)
# np.random.seed(0)
boostrap_predicted_poly_matrix = []
for j in range(B):
index_boostrap_sample = np.random.choice(n,n)
x_training = x[index_boostrap_sample]
y_training = y[index_boostrap_sample]
K = k_poly(x_training[:,np.newaxis],x_training[:,np.newaxis], d_best)
alpha = np.linalg.solve((K + lamb_best_poly*np.eye(n, n)), y_training)
y_predicted_boostrap_ploy = []
for xi in x_fine:
y_predicted_boostrap_ploy.append(np.sum((1+xi*x_training[np.newaxis,:]) ** d_best @ alpha))
boostrap_predicted_poly_matrix.append(y_predicted_boostrap_ploy)
boostrap_predicted_poly_matrix = np.array(boostrap_predicted_poly_matrix)
percent_5_list_poly = []
percent_95_list_poly = []
for i in range(n_fine):
sorted_xi_from_300_B_sample = np.sort(boostrap_predicted_poly_matrix[:, i])
x_percentile_5 = sorted_xi_from_300_B_sample[int(B * 0.05)]
x_percentile_95 = sorted_xi_from_300_B_sample[int(B * 0.95)]
percent_5_list_poly.append(x_percentile_5)
percent_95_list_poly.append(x_percentile_95)
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_poly_predicted, label = 'Poly Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.fill_between(x_fine, percent_5_list_poly, percent_95_list_poly, alpha=0.3, label="90% CI")
plt.ylim(-6, 6)
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3c_1_test.png")
plt.show()
#######################################################################################################################
# A3.b2
n = 30
# np.random.seed(0)
# x = np.random.rand(n)
x = np.random.uniform(0,1,n)
y_true = 4*np.sin(np.pi*x)*np.cos(6*np.pi*(x**2))
y = y_true + np.random.randn(n)
error_validation_list = []
lamb_list = []
gamma_list = []
d_list =[]
lamb = 1
for lamb in list(500 * (1/2)**(np.arange(0,30))):
for gamma in list(50 * (1/1.1)**(np.arange(0,30))):
print("Lam: ", lamb, ", gamma: ", gamma)
error_validation = 0
for i in range(n):
x_train = np.append(x[0:i], x[i+1:n])
y_train = np.append(y[0:i], y[i+1:n])
x_validation = x[i]
y_validation = y[i]
K = k_rbf(x_train[:,np.newaxis],x_train[np.newaxis,:], gamma)
alpha = np.linalg.pinv(K + lamb) @ y_train
k_xi_x = np.exp(-gamma*(x_validation-x_train[np.newaxis,:])**2)
error_validation += (k_xi_x@alpha - y_validation).T@(k_xi_x@alpha - y_validation)
error_validation_list.append(error_validation)
print("error_validation: ", error_validation)
lamb_list.append(lamb)
gamma_list.append(gamma)
min_error = min(error_validation_list)
index_boostrap_sample_min_error = error_validation_list.index(min_error)
lamb_best_rbf = lamb_list[index_boostrap_sample_min_error]
gamma_best = gamma_list[index_boostrap_sample_min_error]
print('Best gamma for RBF kernel is : ', gamma_best)
print('Best Lambda for RBF kernel is :', lamb_best_rbf)
gamma_best= 10.175399541327897
lamb_best_rbf= 9.313225746154785e-07
# np.random.seed(10)
x_fine = np.arange(min(x),max(x),0.001)
n = len(x_fine)
y_fine_true = 4*np.sin(np.pi*x_fine)*np.cos(6*np.pi*(x_fine**2))
y_fine_grid = y_fine_true + np.random.standard_normal(n)
f_rbf_predicted = []
K_rbf = k_rbf(x_fine[:,np.newaxis],x_fine[np.newaxis,:], gamma_best)
alpha = np.linalg.solve((K_rbf + lamb_best_rbf*np.eye(n, n)), y_fine_grid)
for xi in x_fine:
f_rbf_predicted.append(np.sum(alpha * np.exp(-gamma_best*(xi-x_fine)**2)))
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_rbf_predicted, label = 'RBF Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3b_2.png")
plt.show()
# A3.c2
B = 300
n=30
n_fine = len(x_fine)
# np.random.seed(0)
boostrap_predicted_rbf_matrix = []
# user x, y from previous
for j in range(B):
index_boostrap_sample = np.random.choice(n,n)
x_training = x[index_boostrap_sample]
y_training = y[index_boostrap_sample]
K_rbf = k_rbf(x_training[:, np.newaxis], x_training[np.newaxis, :], gamma_best)
alpha = np.linalg.solve((K_rbf + lamb_best_rbf * np.eye(n, n)), y_training)
y_predicted_boostrap_rbf = []
for xi in x_fine:
y_predicted_boostrap_rbf.append(np.sum(alpha * np.exp(-gamma_best*(xi-x_training)**2)))
boostrap_predicted_rbf_matrix.append(y_predicted_boostrap_rbf)
boostrap_predicted_rbf_matrix = np.array(boostrap_predicted_rbf_matrix)
percent_5_list_rbf = []
percent_95_list_rbf = []
for i in range(n_fine):
sorted_xi_from_300_B_sample = np.sort(boostrap_predicted_rbf_matrix[:, i])
x_percentile_5 = sorted_xi_from_300_B_sample[int(B * 0.05)]
x_percentile_95 = sorted_xi_from_300_B_sample[int(B * 0.95)]
percent_5_list_rbf.append(x_percentile_5)
percent_95_list_rbf.append(x_percentile_95)
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_rbf_predicted, label = 'rbf Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.fill_between(x_fine, percent_5_list_rbf, percent_95_list_rbf, alpha=0.3, label="90% CI")
plt.ylim(-6, 6)
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3c_2_test.png")
plt.show()
#######################################################################################################################
| [
2,
317,
18,
13,
65,
16,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
77,
796,
1542,
198,
2,
45941,
13,
25120,
13,
28826,
7,
16,
8,
198,
87,
796,
45941,
13,
25120,
13... | 2.160657 | 3,772 |
#!/usr/bin/env python
#
# Make software environment from a yml database.
#
import getopt
import yaml
import sys
import shlex
def get_entry(spec=None, distrib=None, distrib_version=None, pkg=None,
section=None):
"""
Get one entry with precedence distrib with version > distrib >
match distrib > wildcard.
"""
distrib_full = '{0}-{1}'.format(distrib, distrib_version)
if pkg in spec[section]:
if distrib_full in spec[section][pkg]:
return spec[section][pkg][distrib_full]
elif distrib in spec[section][pkg]:
return spec[section][pkg][distrib]
elif distrib in spec['match']:
match_distrib = spec['match'][distrib]
if match_distrib in spec[section][pkg]:
return spec[section][pkg][match_distrib]
if wildcard in spec[section][pkg]:
return spec[section][pkg][wildcard]
else:
return None
def pkg_entries(spec=None, distrib=None, distrib_version=None, pkg=None):
"""
Find recursively entries for pkg and distribution distrib in a
specification spec.
"""
result = None
if pkg in spec['pkgs']:
result = get_entry(spec, distrib, distrib_version, pkg, 'pkgs')
if result is None:
return [result]
elif is_dict(result):
return [result]
else:
if is_atom(result):
result = [result]
r = list()
for e in result:
if e != pkg:
ne = pkg_entries(spec=spec, distrib=distrib,
distrib_version=distrib_version, pkg=e)
if ne == [None]:
r.append(e)
else:
r += ne
return r
def begin(distrib=None, distrib_version=None, output_mode=None):
"""
Distribution preamble.
"""
if output_mode == OutputMode.Docker:
sys.stdout.write('FROM {0}:{1}\n'.format(distrib, distrib_version))
def env(definitions=None, output_mode=None):
"""
Environment specification.
"""
if len(definitions) > 0:
items = list()
if output_mode == OutputMode.Docker:
items.append('ENV')
items += definitions
sys.stdout.write('{0}\n'.format(' \\ \n '.join(items)))
def run(installer=None, command=None, pkg=None, pkgs=None,
output_mode=OutputMode.Script):
"""
Format an install command according to output mode.
"""
if output_mode == OutputMode.Docker:
items = ['RUN']
else:
if output_mode == OutputMode.Script:
items = []
else:
sys.stderr.write('output mode {0} is not implemented\n'.format(
output_mode))
exit(1)
if installer is not None:
items.append(installer)
if command is not None:
if '&&' in command:
coms = command.split('&&')
items += ['{0} &&'.format(c.lstrip().rstrip()) for c in coms[:-1]]
items.append(coms[-1].lstrip().rstrip())
else:
items.append(command)
if pkg is not None:
items.append(pkg)
if pkgs is not None:
items += pkgs
sys.stdout.write('{0}\n'.format(' \\ \n '.join(items)))
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['pkg=',
'pkgs=',
'script',
'docker',
'vagrant',
'split=',
'distrib='])
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
distrib = None
distrib_version = None
pkgs = list()
output_mode = OutputMode.Script
split = False
for o, a in opts:
if o == '--distrib':
if ':' in a:
distrib, distrib_version = a.split(':')
else:
distrib = a
elif o == '--pkg':
pkgs.append(a)
elif o == '--pkgs':
pkgs += a.split(',')
elif o == '--script':
output_mode = OutputMode.Script
elif o == '--docker':
output_mode = OutputMode.Docker
elif o == '--vagrant':
output_mode = OutputMode.Vagrant
elif o == '--split':
split = a.lower() in ['true', 'yes', '1']
specfilename = args[0]
with open(specfilename) as specfile:
spec = yaml.load(specfile.read())
wildcard = None
if 'wildcard' in spec:
wildcard = spec['wildcard']
else:
wildcard = 'any'
by_installer = list()
by_command = list()
definitions = list()
for pkg in pkgs:
definition = get_entry(spec, distrib, distrib_version, pkg, 'env')
if definition is not None:
if is_list(definition):
for iter_def in definition:
definitions.append(iter_def)
else:
definitions.append(definition)
entries = pkg_entries(spec=spec, distrib=distrib,
distrib_version=distrib_version, pkg=pkg)
for entry in entries:
if entry is not None:
if hasattr(entry, 'has_key'):
if 'command' in entry:
by_command.append(entry['command'])
elif hasattr(entry, 'sort'):
by_installer += entry
else:
by_installer.append(entry)
else:
by_installer.append(pkg)
begin(distrib=distrib, distrib_version=distrib_version,
output_mode=output_mode)
installer = get_entry(spec, distrib, distrib_version, wildcard,
'installer')
assert installer is not None
updater = get_entry(spec, distrib, distrib_version, wildcard, 'updater')
if updater:
installer = '{0} && {1}'.format(updater, installer)
if split:
for pkg in by_installer:
run(installer=installer,
pkg=pkg, output_mode=output_mode)
else:
run(installer=installer,
pkgs=by_installer, output_mode=output_mode)
for command in by_command:
run(command=command, output_mode=output_mode)
env(definitions, output_mode)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
198,
2,
6889,
3788,
2858,
422,
257,
331,
4029,
6831,
13,
198,
2,
198,
198,
11748,
651,
8738,
198,
11748,
331,
43695,
198,
11748,
25064,
198,
11748,
427,
2588,
628,
628,
628,
... | 2.015248 | 3,148 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AssumeRole',
]
@pulumi.output_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.344828 | 116 |
import random
from pygamescratch.pygs import *
| [
11748,
4738,
198,
198,
6738,
12972,
19966,
6098,
963,
13,
9078,
14542,
1330,
1635,
628
] | 3.266667 | 15 |
"""The class for managing more environment
The class requires the follow properties:
'id' (str): the suffix name of resource created
'ec2_params' (dict): the dictionary of the EC2 custom parameters
'lambda_params' (dict): the dictionary of the Lambda custom parameters
All properties are mandatory. See the unit tests for an example.
The class extendes the class named Basic.
# license MIT
# support https://github.com/bilardi/aws-simple-pipeline/issues
"""
from aws_cdk import (core, aws_ec2 as ec2,
aws_cloudwatch as cloudwatch)
from sample.basic import Basic
import json | [
37811,
464,
1398,
329,
11149,
517,
2858,
198,
198,
464,
1398,
4433,
262,
1061,
6608,
25,
198,
220,
220,
220,
705,
312,
6,
357,
2536,
2599,
262,
35488,
1438,
286,
8271,
2727,
198,
220,
220,
220,
705,
721,
17,
62,
37266,
6,
357,
116... | 3.274194 | 186 |
import os
import glob
import shutil
import re
import json
from traitlets import Bool
from .baseapp import TransferApp, transfer_aliases, transfer_flags
aliases = {}
aliases.update(transfer_aliases)
aliases.update({
})
flags = {}
flags.update(transfer_flags)
flags.update({
'inbound': (
{'ListApp' : {'inbound': True}},
"List inbound files rather than outbound."
),
'cached': (
{'ListApp' : {'cached': True}},
"List cached files rather than inbound/outbound."
),
'remove': (
{'ListApp' : {'remove': True}},
"Remove an assignment from the exchange."
),
'json': (
{'ListApp' : {'as_json': True}},
"Print out assignments as json."
),
})
| [
11748,
28686,
198,
11748,
15095,
198,
11748,
4423,
346,
198,
11748,
302,
198,
11748,
33918,
198,
198,
6738,
1291,
2578,
912,
1330,
347,
970,
198,
198,
6738,
764,
8692,
1324,
1330,
20558,
4677,
11,
4351,
62,
7344,
1386,
11,
4351,
62,
3... | 2.446667 | 300 |
import socket
import re
import yaml
from time import sleep
config = yaml.safe_load(open('config.yml', 'rb'))
HOST = config['HOST']
PORT = config['PORT']
NICK = config['NICK']
PASS = config['PASS']
class Bot(object):
""""""
| [
11748,
17802,
198,
11748,
302,
198,
11748,
331,
43695,
198,
198,
6738,
640,
1330,
3993,
628,
198,
11250,
796,
331,
43695,
13,
21230,
62,
2220,
7,
9654,
10786,
11250,
13,
88,
4029,
3256,
705,
26145,
6,
4008,
198,
39,
10892,
796,
4566,
... | 2.761905 | 84 |
from setuptools import setup, find_packages
from runfolder import __version__
import os
setup(
name='runfolder',
version=__version__,
description="Microservice for managing runfolders",
long_description=read_file('README.md'),
keywords='bioinformatics',
author='SNP&SEQ Technology Platform, Uppsala University',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': ['runfolder-ws = runfolder.app:start']
}
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
1057,
43551,
1330,
11593,
9641,
834,
198,
11748,
28686,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
5143,
43551,
3256,
198,
220,
220,
220,
2196,
28,
834,
96... | 2.888235 | 170 |
from django_jekyll.lib.configparse import config | [
6738,
42625,
14208,
62,
73,
988,
25727,
13,
8019,
13,
11250,
29572,
1330,
4566
] | 3.428571 | 14 |
from flask import redirect, url_for, request, session
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.contrib import sqla
from flask_login import current_user
from .auth import current_user_is_roadie
from .db import postgres
from .account.models import OAuth
from .members.models import User, EmailAddress
from .projects.models import (
Project,
ProjectCredential,
ProjectUpload,
ProjectMembership,
)
| [
6738,
42903,
1330,
18941,
11,
19016,
62,
1640,
11,
2581,
11,
6246,
198,
6738,
42903,
62,
28482,
1330,
32053,
11,
32053,
15732,
7680,
11,
15651,
198,
6738,
42903,
62,
28482,
13,
3642,
822,
1330,
19862,
5031,
198,
6738,
42903,
62,
38235,
... | 3.427481 | 131 |
try:
6/0
except Exception as e:
print "%s" % e
raise e
print "Despues"
| [
28311,
25,
198,
220,
220,
220,
718,
14,
15,
198,
16341,
35528,
355,
304,
25,
198,
220,
220,
220,
3601,
36521,
82,
1,
4064,
304,
198,
220,
220,
220,
5298,
304,
198,
198,
4798,
366,
5960,
79,
947,
1,
198
] | 2.1 | 40 |
# -*- coding: utf-8 -*-
import socket
import platform
import getpass
import psutil | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
17802,
198,
11748,
3859,
198,
11748,
651,
6603,
198,
11748,
26692,
22602
] | 2.964286 | 28 |
from aca.client import ACAClient
| [
6738,
936,
64,
13,
16366,
1330,
7125,
2246,
75,
1153,
628
] | 3.090909 | 11 |
if __name__ == "__main__":
import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../..")
import unittest
from hamcrest.core.core.allof import *
from hamcrest.core.core.isequal import equal_to
from hamcrest_unit_test.matcher_test import MatcherTest
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
if __name__ == "__main__":
unittest.main()
| [
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1330,
25064,
628,
220,
220,
220,
25064,
13,
6978,
13,
28463,
7,
15,
11,
366,
492,
4943,
198,
220,
220,
220,
25064,
13,
6978,
13,
28463,
7,
15,
11,
366,
... | 2.579268 | 164 |
#!/usr/bin/env python
# Copyright 2016 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
import subprocess
import sys
import time
import utils
HOST_OS = utils.GuessOS()
HOST_ARCH = utils.GuessArchitecture()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
AVAILABLE_ARCHS = utils.ARCH_FAMILY.keys()
GN = os.path.join(DART_ROOT, 'buildtools', 'gn')
# Environment variables for default settings.
DART_USE_TOOLCHAIN = "DART_USE_TOOLCHAIN" # Use instread of --toolchain-prefix
DART_USE_SYSROOT = "DART_USE_SYSROOT" # Use instead of --target-sysroot
DART_USE_CRASHPAD = "DART_USE_CRASHPAD" # Use instead of --use-crashpad
# use instead of --platform-sdk
DART_MAKE_PLATFORM_SDK = "DART_MAKE_PLATFORM_SDK"
DART_GN_ARGS = "DART_GN_ARGS"
# The C compiler's target.
# The Dart compiler's target.
# Where string_map is formatted as X1=Y1,X2=Y2 etc.
# If key is X1, returns Y1.
def AddCommonGnOptionArgs(parser):
"""Adds arguments that will change the default GN arguments."""
parser.add_argument('--goma', help='Use goma', action='store_true')
parser.add_argument('--no-goma',
help='Disable goma',
dest='goma',
action='store_false')
parser.set_defaults(goma=True)
parser.add_argument('--verify-sdk-hash',
help='Enable SDK hash checks (default)',
dest='verify_sdk_hash',
action='store_true')
parser.add_argument('-nvh',
'--no-verify-sdk-hash',
help='Disable SDK hash checks',
dest='verify_sdk_hash',
action='store_false')
parser.set_defaults(verify_sdk_hash=True)
parser.add_argument('--clang', help='Use Clang', action='store_true')
parser.add_argument('--no-clang',
help='Disable Clang',
dest='clang',
action='store_false')
parser.set_defaults(clang=True)
parser.add_argument(
'--platform-sdk',
help='Directs the create_sdk target to create a smaller "Platform" SDK',
default=MakePlatformSDK(),
action='store_true')
parser.add_argument('--use-crashpad',
default=False,
dest='use_crashpad',
action='store_true')
parser.add_argument('--use-qemu',
default=False,
dest='use_qemu',
action='store_true')
parser.add_argument('--exclude-kernel-service',
help='Exclude the kernel service.',
default=False,
dest='exclude_kernel_service',
action='store_true')
parser.add_argument('--arm-float-abi',
type=str,
help='The ARM float ABI (soft, softfp, hard)',
metavar='[soft,softfp,hard]',
default='')
parser.add_argument('--code-coverage',
help='Enable code coverage for the standalone VM',
default=False,
dest="code_coverage",
action='store_true')
parser.add_argument('--debug-opt-level',
'-d',
help='The optimization level to use for debug builds',
type=str)
parser.add_argument('--gn-args',
help='Set extra GN args',
dest='gn_args',
action='append')
parser.add_argument(
'--toolchain-prefix',
'-t',
type=str,
help='Comma-separated list of arch=/path/to/toolchain-prefix mappings')
parser.add_argument('--ide',
help='Generate an IDE file.',
default=os_has_ide(HOST_OS),
action='store_true')
parser.add_argument(
'--target-sysroot',
'-s',
type=str,
help='Comma-separated list of arch=/path/to/sysroot mappings')
def AddCommonConfigurationArgs(parser):
"""Adds arguments that influence which configuration will be built."""
parser.add_argument("-a",
"--arch",
type=str,
help='Target architectures (comma-separated).',
metavar='[all,' + ','.join(AVAILABLE_ARCHS) + ']',
default=utils.GuessArchitecture())
parser.add_argument('--mode',
'-m',
type=str,
help='Build variants (comma-separated).',
metavar='[all,debug,release,product]',
default='debug')
parser.add_argument('--os',
type=str,
help='Target OSs (comma-separated).',
metavar='[all,host,android,fuchsia]',
default='host')
parser.add_argument('--sanitizer',
type=str,
help='Build variants (comma-separated).',
metavar='[all,none,asan,lsan,msan,tsan,ubsan]',
default='none')
def AddOtherArgs(parser):
"""Adds miscellaneous arguments to the parser."""
parser.add_argument("-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1584,
383,
29032,
1628,
7035,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
... | 1.890464 | 3,104 |
from pathlib import Path
from typing import Dict, List, Optional, Set
| [
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
5345,
628
] | 3.944444 | 18 |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
This web app uses a random forest regressor to estimate out-of-pocket medical costs based on [Medicare Payment Data](https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Inpatient.html).
Consumers often have no clue how much a diagnosis or procedure could end up costing them beforehand. The complicated nature of the human body usually means that doctors don't even have a clue how much it might end up costing you. This uncertainty and lack of transparency is just one of the reasons our health care costs in the United States are so high (about 17% of our GDP compared with the average of about 10-12%).
This tool takes in a category of medical diagnosis and your health care provider's state, then returns an estimate of the out-of-pocket costs one would expect with Medicare coverage.
To create this, I sorted the top 100 most common diagnoses and procedures into the general categories:
* Cardiac/Circulatory
* Cranial/Neurological
* Digestive
* Orthopedic
* Respiratory
* Other
These categories were tuned to be general enough so that one could reasonably guess which category your diagnosis might fall into without a doctor's opinion, yet detailed enough to capture the wide variance among out-of-pocket costs.
Out-of-pocket costs can be found by subtracting medicare coverage from net price for each diagnosis. Using the category of diagnosis and the provider's state, I trained a random forest regressor to predict the out-of-pocket costs.
Due to the simplicity of the inputs, this model has a mean absolute error of about $680. So take these estimates with a grain of salt. This tool does well to get you in the ballpark of what you might expect to pay. Models trained with uncategorized diagnoses (assumes the consumer knows their exact diagnosis) only reduced mean absolute error to about $630, so we didn't lose much resolution by categorizing the diagnoses.
There's a remarkable lack of transparency in the health care system, which is further confounded by the inherent uncertainty in the nature of medicine. Hopefully this tool can provide a bit more information to Medicare beneficiaries about their expected costs.
"""
),
],
)
layout = dbc.Row([column1]) | [
11748,
14470,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
6738,
14470,
13,
45841,
3976,
... | 3.112691 | 914 |
import math
import logging
from datetime import date
nTypes = {0: "Nordic Event",
1: "Nordic Main Header",
2: "Nordic Macroseismic Header",
3: "Nordic Comment Header",
5: "Nordic Error Header",
6: "Nordic Waveform Header",
8: "Nordic Phase Data"}
| [
11748,
10688,
198,
11748,
18931,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
77,
31431,
796,
1391,
15,
25,
366,
45,
585,
291,
8558,
1600,
198,
197,
197,
16,
25,
366,
45,
585,
291,
8774,
48900,
1600,
198,
197,
197,
17,
25,
366,
45... | 2.543689 | 103 |
import glob
import os
import shutil
from conans import ConanFile
from conans.model import Generator
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
6738,
369,
504,
1330,
31634,
8979,
198,
6738,
369,
504,
13,
19849,
1330,
35986,
628,
628,
628,
628
] | 3.724138 | 29 |
from twitter.common import app
from kazoo.client import KazooClient
from twitter.common.zookeeper.group.kazoo_group import KazooGroup
app.main()
| [
6738,
17044,
13,
11321,
1330,
598,
198,
198,
6738,
479,
1031,
2238,
13,
16366,
1330,
16385,
2238,
11792,
198,
6738,
17044,
13,
11321,
13,
10872,
2088,
5723,
13,
8094,
13,
74,
1031,
2238,
62,
8094,
1330,
16385,
2238,
13247,
628,
198,
1... | 3.170213 | 47 |
from __future__ import print_function
import boto3
import json
import CloudCanvas
import service
from cgf_utils import aws_utils
from cgf_utils import custom_resource_utils
# import errors
#
# raise errors.ClientError(message) - results in HTTP 400 response with message
# raise errors.ForbiddenRequestError(message) - results in 403 response with message
# raise errors.NotFoundError(message) - results in HTTP 404 response with message
#
# Any other exception results in HTTP 500 with a generic internal service error message.
workflow = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('Workflow'))
workflow_domain_name = workflow + '-domain'
swf_client = boto3.client('swf', region_name=aws_utils.current_region)
@service.api
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
275,
2069,
18,
198,
11748,
33918,
198,
198,
11748,
10130,
6090,
11017,
198,
11748,
2139,
198,
198,
6738,
269,
70,
69,
62,
26791,
1330,
3253,
82,
62,
26791,
198,
6738,
269... | 3.534884 | 215 |
import prometheus.tftpd
'''
prime the device with:
nc64 -c 192.168.4.1 9195
connect ssid:password
to run:
set PYTHONPATH=p:\lanot\src\core
'''
files = [
'main.py',
'test02.py'
]
prometheus.tftpd.tftp_client('10.20.2.116', *files)
| [
11748,
1552,
36916,
13,
83,
701,
30094,
198,
198,
7061,
6,
198,
35505,
262,
3335,
351,
25,
198,
10782,
2414,
532,
66,
17817,
13,
14656,
13,
19,
13,
16,
860,
22186,
198,
8443,
264,
30255,
25,
28712,
198,
198,
1462,
1057,
25,
198,
2... | 2.132743 | 113 |
import os
import shutil
import tempfile
import unittest
from unittest import mock
from dataverk.connectors import NaisS3Connector, GoogleStorageConnector
from dataverk.connectors.storage import storage_connector_factory
from dataverk.connectors.storage.file_storage import FileStorageConnector
from dataverk.connectors.storage.storage_connector_factory import StorageType
from tests.dataverk.connectors.storage.test_resources.google_storage_common import GOOGLE_SERVICE_ACCOUNT, GCS_BUCKET_NAME
from tests.dataverk.connectors.storage.test_resources.mock_google_cloud_api import MockGoogleClient, MockGoogleCredentials
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
198,
6738,
555,
715,
395,
1330,
15290,
198,
6738,
1366,
332,
74,
13,
8443,
669,
1330,
11013,
271,
50,
18,
34525,
11,
3012,
31425,
34525,
198... | 3.522727 | 176 |
#Visuafy a Spotify visualiser by Aryan Bhajanka
from os import name
from flask import Flask, request, url_for, session, redirect, render_template
from flask.helpers import get_template_attribute
import spotipy
from spotipy.exceptions import SpotifyException
from spotipy.oauth2 import SpotifyOAuth, SpotifyOauthError
app = Flask(__name__, static_folder='static')
app.secret_key = "7490017841visuafy7490017841"
app.config['SESSION_COOKIE_NAME'] = 'spotify-user-read-currently-playing'
scope = "user-read-currently-playing"
@app.route('/', methods =["GET", "POST"])
@app.route('/colours')
@app.route('/moonlight')
@app.route('/leo_the_cat')
@app.route('/ship')
@app.route('/homework')
@app.route('/by_the_window')
@app.route('/on_the_road')
@app.route('/comfy_night')
@app.route('/custom_image', methods =["GET", "POST"])
@app.route('/custom_theme')
@app.route('/la')
@app.route('/nyc')
@app.route('/info')
@app.route('/select_theme')
@app.route('/help')
@app.route('/credits')
if __name__ == '__main__':
app.run() | [
2,
15854,
84,
1878,
88,
257,
26778,
5874,
5847,
416,
317,
29038,
16581,
1228,
15927,
201,
198,
201,
198,
6738,
28686,
1330,
1438,
201,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
19016,
62,
1640,
11,
6246,
11,
18941,
11,
8543,
62,
... | 2.427293 | 447 |
# Generated by Django 2.0.9 on 2018-10-31 09:05
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
24,
319,
2864,
12,
940,
12,
3132,
7769,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
'''
Settings
========
Methods described in this section relate to the the Settings API.
These methods can be accessed at ``Nessus.settings``.
.. rst-class:: hide-signature
.. autoclass:: SettingsAPI
:members:
'''
from typing import List, Dict, Optional
from typing_extensions import Literal
from restfly.utils import dict_clean
from tenable.base.endpoint import APIEndpoint
from .schema.settings import SettingsListSchema
| [
7061,
6,
198,
26232,
198,
2559,
198,
198,
46202,
3417,
287,
428,
2665,
15124,
284,
262,
262,
16163,
7824,
13,
198,
4711,
5050,
460,
307,
17535,
379,
7559,
45,
408,
385,
13,
33692,
15506,
13,
198,
198,
492,
374,
301,
12,
4871,
3712,
... | 3.575 | 120 |
#!/usr/bin/env python3
import boto3,json,os,urllib3
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
275,
2069,
18,
11,
17752,
11,
418,
11,
333,
297,
571,
18,
198
] | 2.12 | 25 |
__author__ = 'nightfade'
from example.echo_service_pb2 import IEchoService, IEchoClient_Stub
import logger
| [
834,
9800,
834,
796,
705,
3847,
69,
671,
6,
198,
198,
6738,
1672,
13,
30328,
62,
15271,
62,
40842,
17,
1330,
28157,
6679,
16177,
11,
28157,
6679,
11792,
62,
1273,
549,
198,
11748,
49706,
628
] | 3.114286 | 35 |
# Generated by Django 2.2.13 on 2021-07-22 14:49
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1485,
319,
33448,
12,
2998,
12,
1828,
1478,
25,
2920,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyCreateParameters(Model):
"""The key create parameters.
:param kty: The type of key to create. Valid key types, see
JsonWebKeyType. Supported JsonWebKey key types (kty) for Elliptic Curve,
RSA, HSM, Octet. Possible values include: 'EC', 'RSA', 'RSA-HSM', 'oct'
:type kty: str or :class:`JsonWebKeyType
<azure.keyvault.models.JsonWebKeyType>`
:param key_size: The key size in bytes. e.g. 1024 or 2048.
:type key_size: int
:param key_ops:
:type key_ops: list of str or :class:`JsonWebKeyOperation
<azure.keyvault.models.JsonWebKeyOperation>`
:param key_attributes:
:type key_attributes: :class:`KeyAttributes
<azure.keyvault.models.KeyAttributes>`
:param tags: Application-specific metadata in the form of key-value pairs
:type tags: dict
"""
_validation = {
'kty': {'required': True, 'min_length': 1},
}
_attribute_map = {
'kty': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'key_ops': {'key': 'key_ops', 'type': '[JsonWebKeyOperation]'},
'key_attributes': {'key': 'attributes', 'type': 'KeyAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 2.852941 | 612 |
import os
import json
import time
import subprocess
import contextlib
import dateutil.parser
import datetime
import logging
from tempfile import TemporaryDirectory
# See https://github.com/containers/podman/issues/10173
CGROUP_WORKAROUND = False
RUNTIME = "crun"
def invokePodmanCommandPoll(command, output):
"""
Invoke podman command and continuously output stdout and stderr via a callback
"""
command = podmanBaseCommand(command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
output(line.decode("utf-8"))
output(p.stdout.read().decode("utf-8"))
exitcode = p.wait()
if exitcode != 0:
raise PodmanError(f"{' '.join(command)}")
def imageExists(name):
"""
Return if given image exists
"""
p = subprocess.run(["podman", "image", "exists", name],
capture_output=True)
return p.returncode == 0
def buildImage(dockerfile, tag, args, cpuLimit=None, memLimit=None, noCache=False, onOutput=None):
"""
Build image for given dockerfile (string). Return the logs of the build.
"""
with TemporaryDirectory() as d:
dockerfilePath = os.path.join(d, "Dockerfile")
with open(dockerfilePath, "w") as f:
f.write(dockerfile)
command = ["build", "-t", tag]
for k, v in args.items():
command.extend(["--build-arg", f"{k}={v}"])
if memLimit is not None:
command.extend(["--memory", str(memLimit)])
if cpuLimit is not None:
command.extend(["--cpu-period", "100000"])
command.extend(["--cpu-quota", str(100000 * cpuLimit)])
if noCache:
command.append("--no-cache")
# Use docker format to support extensions like SHELL
command.extend(["--format", "docker"])
command.extend(["-f", dockerfilePath])
command.append(d)
if onOutput is not None:
return invokePodmanCommandPoll(command, onOutput)
else:
return invokePodmanCommand(command)[0]
def createContainer(image, command, mounts=[], cpuLimit=None, memLimit=None,
cgroup=None, name=None):
"""
Create container, return its identifier
"""
podmanCmd = ["container", "create", "--runtime", RUNTIME]
for m in mounts:
podmanCmd.extend(["--mount", f"type=bind,src={m['source']},target={m['target']}"])
if cpuLimit is not None:
podmanCmd.extend(["--cpus", str(cpuLimit)])
if memLimit is not None:
podmanCmd.extend(["--memory", str(memLimit)])
podmanCmd.extend(["--memory-swap", str(memLimit)])
if cgroup is not None:
podmanCmd.extend(["--cgroup-parent", cgroup.path])
if name is not None:
podmanCmd.extend(["--name", name])
podmanCmd.append(image)
podmanCmd.extend(command)
if CGROUP_WORKAROUND:
r, w = os.pipe()
pid = os.fork()
if pid > 0:
os.close(w)
with os.fdopen(r) as r:
os.waitpid(pid, 0)
s = r.read()
return s.strip()
else:
os.close(r)
cgroup.addProcess(pid)
with os.fdopen(w, 'w') as w:
res = invokePodmanCommand(podmanCmd)[0]
w.write(res)
w.close()
os._exit(0)
else:
return invokePodmanCommand(podmanCmd)[0].strip()
def containerRunTime(inspection):
"""
Return container runtime in microseconds
"""
started = dateutil.parser.parse(inspection["State"]["StartedAt"])
finished = dateutil.parser.parse(inspection["State"]["FinishedAt"])
if datetime.datetime.timestamp(finished) < 0:
finished = datetime.datetime.now(datetime.timezone.utc)
delta = finished - started
return delta.seconds * 1000000 + delta.microseconds
def runAndWatch(container, cgroup, watchCgroup, notify=None, wallClockLimit=None,
cpuClockLimit=None, pollInterval=1, notifyInterval=10):
"""
Run a container and watch it for time limits. Returns a dictionary with
container statistics.
"""
inspection = inspectContainer(container)
command = ["container", "start", "--runtime", RUNTIME, container]
if CGROUP_WORKAROUND:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
cgroup.addProcess(pid)
invokePodmanCommand(command)
os._exit(0)
else:
invokePodmanCommand(command)
timeout = False
ticks = 0
maxMemoryUsage = 0
while True:
time.sleep(pollInterval)
ticks += 1
if ticks % notifyInterval == 0 and notify is not None:
notify()
inspection = inspectContainer(container)
if containerStatus(inspection) != "running":
break
wTime = containerRunTime(inspection)
maxMemoryUsage = max(maxMemoryUsage, watchCgroup.currentMemoryUsage())
cTime = watchCgroup.cpuStats()["usage_usec"]
if wTime >= wallClockLimit * 1000000 or cTime >= cpuClockLimit * 1000000:
stopContainer(container, timeout=1)
timeout = True
inspection = inspectContainer(container)
stats = {
"cpuStat": watchCgroup.cpuStats(),
"memStat": watchCgroup.memoryStats(),
"maxMemory": maxMemoryUsage,
"wallTime": containerRunTime(inspection),
"exitCode": containerExitCode(inspection),
"outOfMemory": containerOomKilled(inspection),
"timeout": timeout,
"output": containerLogs(container)
}
return stats
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
850,
14681,
198,
11748,
4732,
8019,
198,
11748,
3128,
22602,
13,
48610,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
6738,
20218,
7753,
1330,
46042,
43055,
198,
198,
2,
4091... | 2.3372 | 2,414 |
import sys, time, os
| [
11748,
25064,
11,
640,
11,
28686,
198
] | 3 | 7 |
########## Script 2: Different Costs ###################
import sys
from RK_IO_model import RK_IO_methods
from Generalized_RK_Framework import generalized_RK_framework
import pdb #for debugging
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
from pyomo.opt import SolverStatus, TerminationCondition
import pyomo.mpec as pyompec #for the complementarity
import math
from scipy.io import savemat, loadmat
import pandas
import time
import matplotlib.pyplot as plt
import pickle
############### Step 1: Importing the class object ######################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
file_to_be_read = open("class_object_1","rb")
generalized_framework_object = pickle.load(file_to_be_read)
file_to_be_read.close()
############# Step 2: Inverse Optimization Step #####################
generalized_framework_object.running_IO_code_to_obtain_costs_different_costs()
########### Step 3: Saving the Object Again ###################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
name_of_file = "class_object_2"
test = open(name_of_file,'wb')
pickle.dump(generalized_framework_object,test)
test.close()
| [
7804,
2235,
12327,
362,
25,
20615,
36845,
1303,
14468,
2235,
198,
198,
11748,
25064,
198,
198,
6738,
371,
42,
62,
9399,
62,
19849,
1330,
371,
42,
62,
9399,
62,
24396,
82,
198,
6738,
3611,
1143,
62,
49,
42,
62,
21055,
6433,
1330,
382... | 3.078481 | 395 |
import pygame
from pygame.font import Font
from sprites import Sprite
from utils.types import Color, WindowSize, SpritePosition
| [
11748,
12972,
6057,
201,
198,
6738,
12972,
6057,
13,
10331,
1330,
24060,
201,
198,
6738,
42866,
1330,
33132,
201,
198,
6738,
3384,
4487,
13,
19199,
1330,
5315,
11,
26580,
10699,
11,
33132,
26545,
201,
198
] | 3.771429 | 35 |
from scene_manager import SceneManager
| [
6738,
3715,
62,
37153,
1330,
28315,
13511,
198
] | 4.875 | 8 |
import json
import os
import socket
import sys
if len(sys.argv) != 2:
print("Usage: python bn_rpyc.py <script>")
exit(1)
script = sys.argv[1]
if os.path.exists(script):
script = os.path.abspath(script)
else:
print("Can't find: %s" % script)
exit(1)
py3 = sys.version_info[0] >= 3
if not py3:
input = raw_input
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(os.path.expanduser('~/.bn_rpc.sock'))
if py3:
sin = s.makefile('r', buffering=1, encoding='utf8')
else:
sin = s.makefile('r', bufsize=1)
done = False
while True:
m = recv()
cmd = m['cmd']
if cmd == 'prompt':
if done:
s.shutdown(socket.SHUT_RDWR)
break
prompt = m['prompt']
try:
line = "exec(open(\"%s\").read())" % script
send('input', text=line + '\n')
except KeyboardInterrupt:
send('reset')
except EOFError:
s.shutdown(socket.SHUT_RDWR)
break
done = True
elif cmd == 'print':
print(m['text'].rstrip('\n'))
elif cmd == 'exit':
break
print
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
25064,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
25,
198,
220,
220,
220,
3601,
7203,
28350,
25,
21015,
275,
77,
62,
81,
9078,
66,
13,
9078,
1279,
1204... | 2.014363 | 557 |
# ASGI server using starlette & uvicorn
from starlette.applications import Starlette
from starlette.responses import FileResponse
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.staticfiles import StaticFiles
from os import path, getenv
from uvicorn import run as serve_app
CUR_DIR = path.realpath(path.dirname(__file__))
app = Starlette(debug=True, middleware=[Middleware(GZipMiddleware)])
app.mount("/static", StaticFiles(directory="dist/static", html=True), name="static")
@app.route("/favicon.ico", methods=["GET"])
@app.route("/", methods=["GET"])
if __name__ == "__main__":
serve_app(app, host="127.0.0.1", port=getenv("PORT", 8080))
| [
2,
7054,
18878,
4382,
1262,
3491,
21348,
1222,
334,
25531,
1211,
198,
6738,
3491,
21348,
13,
1324,
677,
602,
1330,
2907,
21348,
198,
6738,
3491,
21348,
13,
16733,
274,
1330,
9220,
31077,
198,
6738,
3491,
21348,
13,
27171,
1574,
1330,
60... | 3.07265 | 234 |
#!/usr/bin/env python
""" Contains standard io reading & writing code
See the file "LICENSE" for the full license governing this code.
Copyright 2017 Ken Farmer
"""
import os
import sys
import csv
import io
import random
import errno
from os.path import isfile
from pprint import pprint
from typing import Union, Dict, List, Tuple, Any, Optional
from pprint import pprint as pp
import datagristle.csvhelper as csvhelper
import datagristle.file_type as file_type
class OutputHandler(object):
""" Handles all aspects of writing to output files: opening file,
writing records, managing random writes, keeping counts,
closing the file, etc.
"""
def write_rec(self,
record: List[str]) -> None:
""" Write a record to output.
If silent arg was provided, then write is suppressed.
If randomout arg was provided, then randomly determine
whether or not to write record.
"""
if self.dry_run:
return
if self.random_out != 1.0:
if random.random() > self.random_out:
return
try:
self.writer.writerow(record)
except csv.Error:
print('Invalid record: %s' % record)
raise
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
49850,
3210,
33245,
3555,
1222,
3597,
2438,
628,
220,
220,
220,
4091,
262,
2393,
366,
43,
2149,
24290,
1,
329,
262,
1336,
5964,
15030,
428,
2438,
13,
198,
220,
220,
220,
15069,
... | 2.547431 | 506 |
from graphene import relay, AbstractType, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Calendar, Day
class CalendarNode(DjangoObjectType):
"""
how does this work?
"""
class CalendarQuery(AbstractType):
"""
how does this work?
"""
calendar = relay.Node.Field(CalendarNode)
calendars = DjangoFilterConnectionField(CalendarNode)
class DayNode(DjangoObjectType):
"""
how does this work?
"""
image_large_url = String()
image_small_url = String()
def resolve_image_large_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_large_url()
)
def resolve_image_small_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_small_url()
)
class DayQuery(AbstractType):
"""
how does this work?
"""
day = relay.Node.Field(DayNode)
days = DjangoFilterConnectionField(DayNode)
| [
6738,
42463,
1330,
24248,
11,
27741,
6030,
11,
10903,
198,
6738,
42463,
62,
28241,
14208,
1330,
37770,
10267,
6030,
198,
6738,
42463,
62,
28241,
14208,
13,
24455,
1330,
37770,
22417,
32048,
15878,
198,
198,
6738,
764,
27530,
1330,
26506,
... | 2.627753 | 454 |
import random
import time
import re
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from unidecode import unidecode
from nordvpn_randomizer import logIn, chooseRandom, getCountries
# Used for some scientists in the list that have special characters in their names
translator = {'á': ('%C3%A1', '=aacute='), #, '\xc3\xa1'),
'é': ('%C3%A9', '=eacute='), #, '\xc3\xa9'),
'í': ('%C3%AD', '=iacute='), #, '\xc3\xad'),
'ó': ('%C3%B3', '=oacute='), #, '\xc3\xb3'),
'ú': ('%C3%BA', '=uacute='), #, '\xc3\xba'),
'ý': ('%C3%BD', '=yacute='), #, '\xc3\xbd'),
'è': ('%C3%A8', '=egrave='), #, '\xc3\xa8'),
'ò': ('%C3%B2', '=ograve='), #, '\xc3\xb2'),
'ê': ('%C3%AA', '=ecirc=' ), #, '\xc3\xaa'),
'ô': ('%C3%B4', '=ocirc=' ), #, '\xc3\xb4'),
'ä': ('%C3%A4', '=auml=' ), #, '\xc3\xa4'),
'ë': ('%C3%AB', '=euml=' ), #, '\xc3\xab'),
'ï': ('%C3%AF', '=iuml=' ), #, '\xc3\xaf'),
'ö': ('%C3%B6', '=ouml=' ), #, '\xc3\xb6'),
'ü': ('%C3%BC', '=uuml=' ), #, '\xc3\xbc'),
'ã': ('%C3%A3', '=atilde='), #, '\xc3\xa3'),
'õ': ('%C3%B5', '=otilde='), #, '\xc3\xb5'),
'ñ': ('%C3%B1', '=ntilde=')} #, '\xc3\xb1')}
# List of different "user agents" (mimics browser usage)
user_agents = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0 Firefox 68.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36']
# Initialize some useful variables
user_agent = user_agents[0] # Initialize user agent (changed if human/robot test)
delays = [7, 4, 6, 2, 10, 19] # Random delays between url requests, to avoid overloading google servers (and avoid human/robot test)
countries = getCountries()[1:] # List of countries for different IPs (changed if human/robot test), using NordVPN
countries[0] = countries[0][1:] # The first country (Albania) starts with a weird character
# Get number of citations per each year, for any scientist featured on google scholar
# Try to find scientific has a dedicated scholar publication page
# Make sure names with special characters can be found
| [
11748,
4738,
198,
11748,
640,
198,
11748,
302,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19390,
11,
19016,
9654,
198,
6738,
2956,
297,
571,
13,
18224,
1330,
14626,
12331,
198,
6738,
555,
485,
8189,
1330,
555,
485,
8189,
198,
6738,
2... | 1.976768 | 1,980 |
from functools import lru_cache
from typing import Tuple
import pygame
| [
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
12972,
6057,
628
] | 3.47619 | 21 |
import cv2
import os # handling directories
alg="haarcascade_frontalface_default.xml"# importing algorithm
har=cv2.CascadeClassifier(alg)# reading & storing the algorithm in a variable
cam=cv2.VideoCapture(0)
dataset="dataset"
name="sociallion"
path=os.path.join(dataset,name)
if not os.path.isdir(path):
os.makedirs(path)#creates a new directory for the sequence of folder
# resizing image using cv2
(width,height)=(150,150)
count=0
n=int(input("enter number of pictures to be taken"))
while (count<=n):
_,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=har.detectMultiScale(gray,1.3,4)#detecting the face and scakling the image
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
only_face=gray[y:y+h,x:x+w]#only used to process and store the face part from image
res=cv2.resize(only_face,(width,height))
cv2.imwrite("%s/%s.jpg" %(path,count),res)# having thwo %s , 1%s-path,2.%s-represents the number(count)
count+=1
print(count)
cv2.imshow("face detection",img)
key=cv2.waitKey(10)
if key == 27:# press escape button to exit
break
cam.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
28686,
1303,
9041,
29196,
198,
14016,
2625,
3099,
5605,
28966,
62,
8534,
1604,
558,
62,
12286,
13,
19875,
1,
2,
33332,
11862,
198,
9869,
28,
33967,
17,
13,
34,
28966,
9487,
7483,
7,
14016,
8,
2,
3555,... | 2.343137 | 510 |
from .libindy import do_call, create_cb
from typing import Optional
from ctypes import *
import logging
async def create_wallet(pool_name: str,
name: str,
xtype: Optional[str],
config: Optional[str],
credentials: str) -> None:
"""
Creates a new secure wallet with the given unique name.
:param pool_name: Name of the pool that corresponds to this wallet.
:param name: Name of the wallet.
:param xtype: (optional) Type of the wallet. Defaults to 'default'.
Custom types can be registered with indy_register_wallet_type call.
:param config: (optional) Wallet configuration json. List of supported keys are defined by wallet type.
if NULL, then default config will be used.
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("create_wallet: >>> pool_name: %r, name: %r, xtype: %r, config: %r, credentials: %r",
pool_name,
name,
xtype,
config,
credentials)
if not hasattr(create_wallet, "cb"):
logger.debug("create_wallet: Creating callback")
create_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_pool_name = c_char_p(pool_name.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_xtype = c_char_p(xtype.encode('utf-8')) if xtype is not None else None
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_create_wallet',
c_pool_name,
c_name,
c_xtype,
c_config,
c_credentials,
create_wallet.cb)
logger.debug("create_wallet: <<<")
async def open_wallet(name: str,
runtime_config: Optional[str],
credentials: str) -> int:
"""
Opens the wallet with specific name.
Wallet with corresponded name must be previously created with indy_create_wallet method.
It is impossible to open wallet with the same name more than once.
:param name: Name of the wallet.
:param runtime_config: (optional) Runtime wallet configuration json.
if NULL, then default runtime_config will be used. Example:
{
"freshness_time": string (optional), Amount of minutes to consider wallet value as fresh. Defaults to 24*60.
... List of additional supported keys are defined by wallet type.
}
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return: Handle to opened wallet to use in methods that require wallet access.
"""
logger = logging.getLogger(__name__)
logger.debug("open_wallet: >>> name: %r, runtime_config: %r, credentials: %r",
name,
runtime_config,
credentials)
if not hasattr(open_wallet, "cb"):
logger.debug("open_wallet: Creating callback")
open_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32))
c_name = c_char_p(name.encode('utf-8'))
c_runtime_config = c_char_p(runtime_config.encode('utf-8')) if runtime_config is not None else None
c_credentials = c_char_p(credentials.encode('utf-8'))
res = await do_call('indy_open_wallet',
c_name,
c_runtime_config,
c_credentials,
open_wallet.cb)
logger.debug("open_wallet: <<< res: %r", res)
return res
async def close_wallet(handle: int) -> None:
"""
Closes opened wallet and frees allocated resources.
:param handle: wallet handle returned by indy_open_wallet.
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("close_wallet: >>> handle: %i", handle)
if not hasattr(close_wallet, "cb"):
logger.debug("close_wallet: Creating callback")
close_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_handle = c_int32(handle)
await do_call('indy_close_wallet',
c_handle,
close_wallet.cb)
logger.debug("close_wallet: <<<")
async def delete_wallet(name: str,
credentials: str) -> None:
"""
Deletes created wallet.
:param name: Name of the wallet to delete.
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return:
"""
logger = logging.getLogger(__name__)
logger.debug("delete_wallet: >>> name: %r, credentials: %r",
name,
credentials)
if not hasattr(delete_wallet, "cb"):
logger.debug("delete_wallet: Creating callback")
delete_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_name = c_char_p(name.encode('utf-8'))
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_delete_wallet',
c_name,
c_credentials,
delete_wallet.cb)
logger.debug("delete_wallet: <<<")
| [
6738,
764,
8019,
521,
88,
1330,
466,
62,
13345,
11,
2251,
62,
21101,
198,
198,
6738,
19720,
1330,
32233,
198,
6738,
269,
19199,
1330,
1635,
198,
198,
11748,
18931,
628,
198,
292,
13361,
825,
2251,
62,
44623,
7,
7742,
62,
3672,
25,
9... | 2.246581 | 2,340 |
# @ Copyright Inria, Ecole Polytechnique
# Shared under the MIT license https://opensource.org/licenses/mit-license.php
# This file contains all the functions that are used in the comparison/aggregation detection
# The main part of the code is the function find_aggregators, that will be used elsewhere in the code
# The other functions are auxiliary that are being used in the main one
### IMPORT
# Python libraries import
import nltk
from nltk.parse import CoreNLPParser
from nltk.parse.corenlp import CoreNLPDependencyParser
from nltk.tag.stanford import StanfordNERTagger
import os
# Utils import
from parsing_analysis import get_nodes, get_subtrees
from utils import catch_words, cut_after, get_index
# Generic analysis functions import
from area_extraction import find_areas
from time_extraction import find_time, date_figures
### PARSERS
#Generic path
path = os.getcwd()
#Java path (to be changed)
java_path = "C:/Program Files (x86)/Java/jre1.8.0_251/bin/java.exe"
os.environ['JAVAHOME'] = java_path
#Files of the NER
jar = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/stanford-ner.jar")
model = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/classifiers/english.muc.7class.distsim.crf.ser.gz")
#Loading the parsers
parser = CoreNLPParser(url='http://localhost:9000')
dep_parser = CoreNLPDependencyParser(url='http://localhost:9000')
ner_tagger = StanfordNERTagger(model, jar, encoding='utf8')
pos_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
### FUNCTIONS
# The function cut_in_clause take the whole sentence (as a list of tokens named tok)
# and the comparative words that have been detected so far (comp_words)
# and try to cut the sentence into as many clauses as there are comparative words
# so that each clause contain one and only one comparison.
# The cuts have to be made at specific words (specified in cut_words)
cut_words = ["and", "or", "but", "while"]
# The function get_threshold take as input a comparative word (cp_word)
# And look if the word is associated with a numerical value (a threshold)
# To do that, we look at the contextual words around cp_word to find a number
# We also make sure that the number is not already tagged as a date (in date_figures)
# Finally, we check if the number is potentially linked with a unit multiplier
unit_m = {"hundred" : 100, "hundreds" : 100, "thousand" : 1000, "thousands" : 1000, "million" : 1000000, "millions" : 1000000, "billion" : 1000000000, "billions" : 1000000000,
"k" : 1000, 'm' : 1000000, "b" : 1000000000, "bn" : 1000000000, "bil" : 1000000000}
# The function find_aggregators takes the parses of the sentence
# and try to find every comparison and aggregation in it.
# It also takes as input the type of return the user wants (list of countries or list of years)
# and the words in the sentence giving that information
| [
2,
220,
2488,
15069,
554,
7496,
11,
38719,
293,
12280,
23873,
2350,
198,
2,
220,
39403,
739,
262,
17168,
5964,
3740,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,
14,
2781,
12,
43085,
13,
10121,
198,
198,
2,
770,
2393,
4909,
477,
262,... | 3.304046 | 865 |
from collections import namedtuple
Node = namedtuple('Node', 'url_name label parent context')
""" Represents a node or item in a navigation
url_name -- (string) The name of a named-urlpattern
label -- (string) The label to be used in the item
parent -- (string) the url_name of its parent or ''. Extra kwargs to be
met on the parent may be defined through:
'url_name|kw1:val1,kw2:val2'
context -- (dict, optional) Contains extra context for the items, to be
used on the templates (if needed) for customization purposes.
"""
| [
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
19667,
796,
3706,
83,
29291,
10786,
19667,
3256,
705,
6371,
62,
3672,
6167,
2560,
4732,
11537,
198,
37811,
1432,
6629,
257,
10139,
393,
2378,
287,
257,
16408,
198,
6371,
62,
3672,
220,
220,
... | 2.805687 | 211 |
try:
import numpy as np
import pandas as pd
from skimage import morphology
from skimage.measure import regionprops
from skimage.measure import label
from scipy import ndimage
from skimage.graph import route_through_array
from scipy.ndimage import binary_closing, binary_hit_or_miss
from scipy.spatial import distance
except ImportError:
print ('Import error!')
| [
28311,
25,
201,
198,
220,
220,
220,
1330,
299,
32152,
355,
45941,
201,
198,
220,
220,
220,
1330,
19798,
292,
355,
279,
67,
201,
198,
220,
220,
220,
422,
1341,
9060,
1330,
46320,
201,
198,
220,
220,
220,
422,
1341,
9060,
13,
1326,
... | 2.311881 | 202 |
# has complex polygon as tile shape, and lots of moving objects
import turtle
import engine
import math
WIDTH = 640
HEIGHT = 480
S = 25 # base unit size for ship
B = 50 # base unit size for tiles
GRIDCOLS = 1 + math.ceil(WIDTH / B) + 1
GRIDROWS = 1 + math.ceil(HEIGHT / B) + 1
SPEED = 3
HEADINGSTEP = 5
heading = 180
deltax = None # set based on heading and SPEED
deltay = None # set based on heading and SPEED
# add last, after tiles, and don't make it a static object
# and it stays on top, even if it's not moving - it gets
# re-rendered each time step
#
# complication: this means that tile objects in tile grid
# need to be recycled so they stay early in the object list,
# or add_obj needs to be extended to allow insertion at the
# head of the object list
# tile is a compound shape and can have multiple colors
if __name__ == '__main__':
engine.init_screen(WIDTH, HEIGHT)
engine.init_engine(delay=0) # no delay needed with so many objects!
engine.set_keyboard_handler(input_cb)
recalcdeltas()
makeshipshape()
maketileshape()
maketilegrid()
engine.add_obj(Me()) # needs to be after tile grid created
engine.engine()
| [
2,
468,
3716,
7514,
14520,
355,
17763,
5485,
11,
290,
6041,
286,
3867,
5563,
198,
198,
11748,
28699,
198,
11748,
3113,
198,
11748,
10688,
198,
198,
54,
2389,
4221,
796,
33759,
198,
13909,
9947,
796,
23487,
198,
198,
50,
796,
1679,
197... | 2.929471 | 397 |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='To predict the values of wafer sensor',
author='Pavantelugura',
license='MIT',
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
10677,
3256,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
22784,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,... | 2.710843 | 83 |
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
import PIL
import zernike as zk
try:
from cv2 import resize, INTER_AREA
except:
print('Problem importing opencv..')
def generate_SED_elems(SED, sim_psf_toolkit, n_bins=20):
r"""Generate the SED elements needed for using the TF_poly_PSF.
sim_psf_toolkit: An instance of the SimPSFToolkit class with the correct
initialization values.
"""
feasible_wv, SED_norm = sim_psf_toolkit.calc_SED_wave_values(SED, n_bins)
feasible_N = np.array([sim_psf_toolkit.feasible_N(_wv) for _wv in feasible_wv])
return feasible_N, feasible_wv, SED_norm
def generate_packed_elems(SED, sim_psf_toolkit, n_bins=20):
r"""Generate the packed values for using the TF_poly_PSF."""
feasible_N, feasible_wv, SED_norm = generate_SED_elems(SED, sim_psf_toolkit, n_bins=n_bins)
tf_feasible_N = tf.convert_to_tensor(feasible_N, dtype=tf.float64)
tf_feasible_wv = tf.convert_to_tensor(feasible_wv, dtype=tf.float64)
tf_SED_norm = tf.convert_to_tensor(SED_norm, dtype=tf.float64)
# returnes the packed tensors
return [tf_feasible_N, tf_feasible_wv, tf_SED_norm]
def calc_poly_position_mat(pos, x_lims, y_lims, d_max):
r""" Calculate a matrix with position polynomials.
Scale positions to the square:
[self.x_lims[0], self.x_lims[1]] x [self.y_lims[0], self.y_lims[1]]
to the square [-1,1] x [-1,1]
"""
# Scale positions
scaled_pos_x = (pos[:, 0] - x_lims[0]) / (x_lims[1] - x_lims[0])
scaled_pos_x = (scaled_pos_x - 0.5) * 2
scaled_pos_y = (pos[:, 1] - y_lims[0]) / (y_lims[1] - y_lims[0])
scaled_pos_y = (scaled_pos_y - 0.5) * 2
poly_list = []
for d in range(d_max + 1):
row_idx = d * (d + 1) // 2
for p in range(d + 1):
poly_list.append(scaled_pos_x**(d - p) * scaled_pos_y**p)
return tf.convert_to_tensor(poly_list, dtype=tf.float32)
def decimate_im(input_im, decim_f):
r"""Decimate image.
Decimated by a factor of decim_f.
Based on the PIL library using the default interpolator.
Default: PIL.Image.BICUBIC.
"""
pil_im = PIL.Image.fromarray(input_im)
(width, height) = (pil_im.width // decim_f, pil_im.height // decim_f)
im_resized = pil_im.resize((width, height))
return np.array(im_resized)
def downsample_im(input_im, output_dim):
r"""Downsample image.
Based on opencv function resize.
[doc](https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void%20resize(InputArray%20src,%20OutputArray%20dst,%20Size%20dsize,%20double%20fx,%20double%20fy,%20int%20interpolation))
The input image is downsampled to the dimensions specified in `output_dim`.
The downsampling method is based on the `INTER_AREA` method.
See [tensorflow_doc](https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/resize-area)
Each output pixel is computed by first transforming the pixel's footprint
into the input tensor and then averaging the pixels that intersect the
footprint. An input pixel's contribution to the average is weighted by the
fraction of its area that intersects the footprint.
This is the same as OpenCV's INTER_AREA.
An explanation of the INTER_AREA method can be found in the next
[link](https://medium.com/@wenrudong/what-is-opencvs-inter-area-actually-doing-282a626a09b3).
This version should be consistent with the tensorflow one.
Parameters
----------
input_im: np.ndarray (dim_x, dim_y)
input image
output_dim: int
Contains the dimension of the square output image.
"""
return resize(input_im, (output_dim, output_dim), interpolation=INTER_AREA)
def zernike_generator(n_zernikes, wfe_dim):
r"""
Generate Zernike maps.
Based on the zernike github repository.
https://github.com/jacopoantonello/zernike
Parameters
----------
n_zernikes: int
Number of Zernike modes desired.
wfe_dim: int
Dimension of the Zernike map [wfe_dim x wfe_dim].
Returns
-------
zernikes: list of np.ndarray
List containing the Zernike modes.
The values outside the unit circle are filled with NaNs.
"""
# Calculate which n (from the (n,m) Zernike convention) we need
# so that we have the desired total number of Zernike coefficients
min_n = (-3 + np.sqrt(1 + 8 * n_zernikes)) / 2
n = int(np.ceil(min_n))
# Initialize the zernike generator
cart = zk.RZern(n)
# Create a [-1,1] mesh
ddx = np.linspace(-1.0, 1.0, wfe_dim)
ddy = np.linspace(-1.0, 1.0, wfe_dim)
xv, yv = np.meshgrid(ddx, ddy)
cart.make_cart_grid(xv, yv)
c = np.zeros(cart.nk)
zernikes = []
# Extract each Zernike map one by one
for i in range(n_zernikes):
c *= 0.0
c[i] = 1.0
zernikes.append(cart.eval_grid(c, matrix=True))
return zernikes
def add_noise(image, desired_SNR):
""" Add noise to an image to obtain a desired SNR. """
sigma_noise = np.sqrt((np.sum(image**2)) / (desired_SNR * image.shape[0] * image.shape[1]))
noisy_image = image + np.random.standard_normal(image.shape) * sigma_noise
return noisy_image
class NoiseEstimator(object):
""" Noise estimator.
Parameters
----------
img_dim: tuple of int
Image size
win_rad: int
window radius in pixels
"""
@staticmethod
def sigma_mad(x):
r"""Compute an estimation of the standard deviation
of a Gaussian distribution using the robust
MAD (Median Absolute Deviation) estimator."""
return 1.4826 * np.median(np.abs(x - np.median(x)))
def estimate_noise(self, image):
r"""Estimate the noise level of the image."""
# Calculate noise std dev
return self.sigma_mad(image[self.window])
class ZernikeInterpolation(object):
""" Interpolate zernikes
This class helps to interpolate zernikes using only the closest K elements
in a given dataset using a RBF interpolation.
Parameters
----------
tf_pos: Tensor (n_sources, 2)
Positions
tf_zks: Tensor (n_sources, n_zernikes)
Zernike coefficients for each position
k: int
Number of elements to use for the interpolation.
Default is 50
order: int
Order of the RBF interpolation.
Default is 2, corresponds to thin plate interp (r^2*log(r))
"""
def interpolate_zk(self, single_pos):
""" Interpolate a single position
"""
# Compute distance
dist = tf.math.reduce_euclidean_norm(self.tf_pos - single_pos, axis=1) * -1.
# Get top K elements
result = tf.math.top_k(dist, k=self.k)
# Gather useful elements from the array
rec_pos = tf.gather(
self.tf_pos, result.indices, validate_indices=None, axis=0, batch_dims=0,
)
rec_zks = tf.gather(
self.tf_zks, result.indices, validate_indices=None, axis=0, batch_dims=0,
)
# Interpolate
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(rec_pos, axis=0),
train_values=tf.expand_dims(rec_zks, axis=0),
query_points=tf.expand_dims(single_pos[tf.newaxis,:], axis=0),
order=self.order,
regularization_weight=0.0
)
# Remove extra dimension required by tfa's interpolate_spline
interp_zk = tf.squeeze(interp_zk, axis=0)
return interp_zk
def interpolate_zks(self, interp_positions):
""" Vectorize to interpolate to each position
"""
interp_zks = tf.map_fn(
self.interpolate_zk,
interp_positions,
parallel_iterations=10,
fn_output_signature=tf.float32,
swap_memory=True
)
return tf.squeeze(interp_zks, axis=1)
class IndependentZernikeInterpolation(object):
""" Interpolate each Zernike polynomial independently
The interpolation is done independently for each Zernike polynomial.
Parameters
----------
tf_pos: Tensor (n_sources, 2)
Positions
tf_zks: Tensor (n_sources, n_zernikes)
Zernike coefficients for each position
order: int
Order of the RBF interpolation.
Default is 2, corresponds to thin plate interp (r^2*log(r))
"""
def interp_one_zk(self, zk_prior):
""" Interpolate each Zerkine polynomial independently
"""
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(self.tf_pos, axis=0),
train_values=tf.expand_dims(zk_prior[:,tf.newaxis], axis=0),
query_points=tf.expand_dims(self.target_pos, axis=0),
order=self.order,
regularization_weight=0.0
)
# Remove extra dimension required by tfa's interpolate_spline
return tf.squeeze(interp_zk, axis=0)
def interpolate_zks(self, target_pos):
""" Vectorize to interpolate to each Zernike!
Each zernike is computed indepently from the others.
"""
self.target_pos = target_pos
interp_zks = tf.map_fn(
self.interp_one_zk,
tf.transpose(self.tf_zks, perm=[1,0]),
parallel_iterations=10,
fn_output_signature=tf.float32,
swap_memory=True
)
# Remove null dimension and transpose back to have batch at input
return tf.transpose(tf.squeeze(interp_zks, axis=2), perm=[1,0])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
39996,
355,
256,
13331,
198,
11748,
350,
4146,
198,
11748,
1976,
1142,
522,
355,
1976,
74,
198,
28311,
25,
198,
220,
220,
220,
... | 2.272684 | 4,232 |
'''
Physical constants
'''
from scipy.constants import c, e, epsilon_0, physical_constants
C = c
ELEM_CHARGE = e
VACUUM_IMPEDANCE = 1.0/(c*epsilon_0)
ELECTRON_MASS_EV = physical_constants['electron mass energy equivalent in MeV'][0]*1e6
if __name__ == "__main__":
print("Speed of light (m/s):", C)
print("Elementary charge (C):", ELEM_CHARGE)
print("Speed of light x elementary charge (C*m/s):", C*ELEM_CHARGE)
print("Vacuum impedance (ohm):", VACUUM_IMPEDANCE)
print("Electron mass (eV):", ELECTRON_MASS_EV)
| [
7061,
6,
198,
220,
220,
220,
16331,
38491,
198,
7061,
6,
198,
6738,
629,
541,
88,
13,
9979,
1187,
1330,
269,
11,
304,
11,
304,
862,
33576,
62,
15,
11,
3518,
62,
9979,
1187,
198,
198,
34,
796,
269,
198,
36,
2538,
44,
62,
38019,
... | 1.990415 | 313 |
'''
# GET /companies/
- list all companies
# POST /companies/
- Create new company
# DELETE /companies/{company_id}
- Delete a company by company_id
# GET /companies/{company_id}
- Get list of ic in company
# GET /companies/{company_id}/ic/
- Get list of ic in company
# POST /companies/{company_id}/ic/{ic_number}/
- Add new ic to company
# DELETE /companies/{company_id}/ic/{ic_number}/
- Delete ic in company_id
'''
from flask import Flask
from flask_restplus import Api, Resource, fields
from web_utils import *
app = Flask(__name__)
api = Api(app)
company_model = api.model('List of Companies', {'company_id' : fields.Integer('Company ID')})
ic_list = api.model('IC List', {
'ic': fields.String('IC number')
})
company_ic_model = api.model('List of ICs', {
'company_id': fields.Integer(required=True, description='Company ID'),
'ic_list': fields.Nested(ic_list, description='List of IC in the company')
})
@api.route('/companies')
'''
def post(self):
'''
@api.route('/companies/<int:company_id>')
'''
@api.route('/companies/<int:company_id>/ic')
class IC(Resource):
def get(self):
@api.route('/companies/<int:company_id>/ic/<int:ic_number')
class ListIC(Resource):
def get(self):
'''
if __name__ == '__main__':
app.run(debug=True)
| [
7061,
6,
198,
2,
17151,
1220,
34390,
444,
14,
198,
220,
220,
220,
532,
1351,
477,
2706,
198,
198,
2,
24582,
1220,
34390,
444,
14,
198,
220,
220,
220,
532,
13610,
649,
1664,
198,
198,
2,
5550,
2538,
9328,
1220,
34390,
444,
14,
90,
... | 2.585317 | 504 |
import os
import logging
from collections import OrderedDict
from typing import List, Dict
from transformers import BertTokenizer
from .serializer import Serializer
from .vocab import Vocab
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from utils import save_pkl, load_csv
logger = logging.getLogger(__name__)
__all__ = [
"_handle_pos_limit",
"_add_pos_seq",
"_convert_tokens_into_index",
"_serialize_sentence",
"_lm_serialize",
"_add_attribute_data",
"_handle_attribute_data",
"preprocess"
]
| [
11748,
28686,
198,
11748,
18931,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
198,
6738,
6121,
364,
1330,
22108,
30642,
7509,
198,
6738,
764,
46911,
7509,
1330,
23283,
7509,
198,
6738,
764,
1... | 2.699531 | 213 |
from typing import Callable, Dict, List
import pandas as pd
from pandarallel import pandarallel
def _apply_to_row(row: pd.Series, column: str, func: Callable) -> pd.Series:
""" Apply `func` to data in `column` of dataframe's `raw`. """
copy_row = row.copy()
copy_row[column] = func(copy_row[column])
return copy_row
def apply(df: pd.DataFrame, column: str, func: Callable) -> pd.DataFrame:
""" Apply `func` to data in `column` of dataframe `df`. """
return df.apply(lambda row: _apply_to_row(row, column, func), axis=1)
def parallel_apply(df: pd.DataFrame, column: str, func: Callable) -> pd.DataFrame:
""" Parallel apply `func` to data in `column` of dataframe `df`. """
pandarallel.initialize(nb_workers=4, progress_bar=True)
return df.parallel_apply(lambda raw: _apply_to_row(raw, column, func), axis=1)
def rename_columns(df: pd.DataFrame, columns: Dict[str, str]) -> pd.DataFrame:
""" Rename columns of given dataframe `df`. """
return df.rename(columns=columns)
def drop_columns(df: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
""" Drop columns from given dataframe `df`. """
return df.drop(labels=columns, axis=1)
def merge_dfs(df_left: pd.DataFrame, df_right: pd.DataFrame, left_on: str, right_on: str, how='inner') -> pd.DataFrame:
""" Merge two given dataframes on `left_on` = `right_on`. Duplicated columns are removed. """
df_merged = pd.merge(df_left, df_right, how=how, left_on=left_on, right_on=right_on, suffixes=('', '_extra'))
df_merged.drop(df_merged.filter(regex='_extra$').columns.tolist(), axis=1, inplace=True)
return df_merged
def read_df(path: str) -> pd.DataFrame:
""" Read dataframe from given .csv file. """
return pd.read_csv(path)
def write_df(df: pd.DataFrame, path: str):
""" Write dataframe to given .csv file. """
df.to_csv(path, index=False)
def append_df(df: pd.DataFrame, path: str):
""" Append data to dataframe by given .csv file. """
df.to_csv(path, index=False, mode='a', header=False)
| [
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
7343,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
283,
29363,
1330,
19798,
283,
29363,
628,
198,
4299,
4808,
39014,
62,
1462,
62,
808,
7,
808,
25,
279,
67,
13,
27996,
... | 2.631242 | 781 |
import math
from datetime import time, timedelta, datetime, date
from typing import List
def time_to_seconds(time_: time) -> int:
"""Return total seconds from time object."""
minutes = time_.hour * 60
seconds = (time_.minute + minutes) * 60
return seconds
def seconds_to_time(seconds: int) -> time:
"""Return time object from total seconds."""
seconds_off = seconds % 60
minutes = int(seconds / 60)
minutes_off = minutes % 60
hours = int(minutes / 60)
return time(hours, minutes_off, seconds_off)
def calc_max_step_size(obj_list: List[time]) -> int:
"""Return largest possible step size in seconds to include every time from list."""
unique_ordered_list = sorted(set(obj_list))
diffs = []
for i in range(len(unique_ordered_list)):
if i == 0:
continue
diff = time_to_seconds(unique_ordered_list[i]) - time_to_seconds(unique_ordered_list[i-1])
diffs.append(diff)
return math.gcd(*diffs)
| [
11748,
10688,
198,
198,
6738,
4818,
8079,
1330,
640,
11,
28805,
12514,
11,
4818,
8079,
11,
3128,
198,
6738,
19720,
1330,
7343,
628,
198,
4299,
640,
62,
1462,
62,
43012,
7,
2435,
62,
25,
640,
8,
4613,
493,
25,
198,
220,
220,
220,
3... | 2.752089 | 359 |
import cv2
import numpy as np
cap = cv2.VideoCapture("images/Circle.mp4")
# 옵션 설명 http://layer0.authentise.com/segment-background-using-computer-vision.html
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=500, detectShadows=0)
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 100:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.imshow('mask',fgmask)
cv2.imshow('frame',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7203,
17566,
14,
31560,
293,
13,
3149,
19,
4943,
198,
198,
2,
23821,
246,
113,
168,
227,
246,
23821,
226,
97,
167,
1... | 2.157025 | 484 |
# `$ python3 simple_ast.py --help` for more information
# MIT License
#
# Copyright (c) 2020 John Scott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import json
import re
if __name__ == "__main__":
main()
| [
198,
2,
4600,
3,
21015,
18,
2829,
62,
459,
13,
9078,
1377,
16794,
63,
329,
517,
1321,
198,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
1757,
4746,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
... | 3.704142 | 338 |
from pyinfra.api import FactBase
| [
6738,
12972,
10745,
430,
13,
15042,
1330,
19020,
14881,
628
] | 3.4 | 10 |
"""
File-based cache for text retrieved from URL-based resources.
"""
import typing
from typeguard import typechecked
from devvyn.cache.web_cache import WebCache
StringFunction = typing.Callable[[str], str]
@typechecked
def cached(function: StringFunction) -> StringFunction:
"""
Wrap the decorated function in a cache handler.
Example:
```
import requests
@cached
def get_content(url: str) -> str:
return requests.get(url).text
content_fresh = get_content('https://example.com/') # save file to
cache after fetching
content_again = get_content('https://example.com/') # load file from
cache instead of fetching
```
:param function: the URL fetch function to wrap
:return: Wrapped function
"""
if not callable(function):
raise TypeError(
f'`function` is type {type(function)}, but it must be callable.')
cache = WebCache()
def wrapped(key: str) -> str:
"""
Attempt to get the value stored in `key`, and if the key doesn't
exist, store it with the value returned from `function` before
returning it.
:param key: URL specifying the document location, which also
identifies the page in the cache
:return: Page content
"""
try:
return cache.get(key)
except KeyError:
text = function(key)
cache.set(key, text)
return text
return wrapped
| [
37811,
198,
8979,
12,
3106,
12940,
329,
2420,
29517,
422,
10289,
12,
3106,
4133,
13,
198,
37811,
198,
11748,
19720,
198,
198,
6738,
2099,
14864,
1330,
2099,
26752,
198,
198,
6738,
1614,
85,
2047,
13,
23870,
13,
12384,
62,
23870,
1330,
... | 2.701465 | 546 |
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket utilities.
"""
import array
import errno
# Import hash classes from a module available and recommended for each Python
# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
# hashlib module in Python 2.6.
try:
import hashlib
md5_hash = hashlib.md5
sha1_hash = hashlib.sha1
except ImportError:
import md5
import sha
md5_hash = md5.md5
sha1_hash = sha.sha
import StringIO
import logging
import os
import re
import socket
import traceback
import zlib
try:
from mod_pywebsocket import fast_masking
except ImportError:
pass
def get_stack_trace():
"""Get the current stack trace as string.
This is needed to support Python 2.3.
TODO: Remove this when we only support Python 2.4 and above.
Use traceback.format_exc instead.
"""
out = StringIO.StringIO()
traceback.print_exc(file=out)
return out.getvalue()
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc),)
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
"""Gets #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
os.popen3 = __wrap_popen3
class NoopMasker(object):
"""A masking object that has the same interface as RepeatedXorMasker but
just returns the string passed in without making any change.
"""
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string given to mask method
with the masking bytes given to the constructor repeatedly. This object
remembers the position in the masking bytes the last mask method call
ended and resumes from that point on the next mask method call.
"""
if 'fast_masking' in globals():
mask = _mask_using_swig
else:
mask = _mask_using_array
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
# deflate library. DICTID won't be added as far as we don't set dictionary.
# LZ77 window of 32K will be used for both compression and decompression.
# For decompression, we can just use 32K to cover any windows size. For
# compression, we use 32K so receivers must use 32K.
#
# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
# to decode.
#
# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
# Python. See also RFC1950 (ZLIB 3.3).
# Compresses/decompresses given octets using the method introduced in RFC1979.
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
class _RFC1979Inflater(object):
"""A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
# Size of the buffer passed to recv to receive compressed data.
_RECV_SIZE = 4096
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
# TODO(tyoshino): Allow call with size=0. It should block until any
# decompressed data is available.
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return ''
self._inflater.append(read_data)
# vi:sts=4 sw=4 et
| [
2,
15069,
2813,
11,
3012,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
1708,
3403,
389,
198,
... | 2.876117 | 2,462 |
#!/usr/bin/env python
sol = Solution()
print(sol.generateParenthesis(2))
print(sol.generateParenthesis(3))
ret = sol.generateParenthesis(4)
set1 = set(ret)
set2 = set(["(((())))","((()()))","((())())","((()))()","(()(()))","(()()())","(()())()","(())(())","(())()()","()((()))","()(()())","()(())()","()()(())","()()()()"])
print(set2-set1)
print(len(set(ret)))
print(sorted(ret))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
34453,
796,
28186,
3419,
198,
4798,
7,
34453,
13,
8612,
378,
24546,
8497,
7,
17,
4008,
198,
4798,
7,
34453,
13,
8612,
378,
24546,
8497,
7,
18,
4008,
198,
1186,
796,
1540,
13,
... | 2.343558 | 163 |
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements command to describe an ops agents policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.compute.instances.ops_agents import exceptions as ops_agents_exceptions
from googlecloudsdk.api_lib.compute.instances.ops_agents.converters import guest_policy_to_ops_agents_policy_converter as to_ops_agents
from googlecloudsdk.api_lib.compute.instances.ops_agents.validators import guest_policy_validator
from googlecloudsdk.api_lib.compute.os_config import utils as osconfig_api_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.compute.instances.ops_agents.policies import parser_utils
from googlecloudsdk.command_lib.compute.os_config import utils as osconfig_command_utils
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe a Google Cloud's operations suite agents (Ops Agents) policy.
*{command}* describes a policy that facilitates agent management across
Compute Engine instances based on user specified instance filters. This policy
installs, specifies versioning, enables autoupgrade, and removes Ops Agents.
The command returns the content of one policy. For instance:
agent_rules:
- enable_autoupgrade: true
package_state: installed
type: ops-agent
version: latest
assignment:
group_labels:
- app: myapp
env: prod
os_types:
- short_name: ubuntu
version: '18.04'
zones:
- us-central1-a
create_time: '2021-02-02T02:10:25.344Z'
description: A test policy to install agents
etag: <ETAG>
id: projects/<PROJECT_NUMBER>/guestPolicies/ops-agents-test-policy
update_time: '2021-02-02T02:10:25.344Z'
If no policies are found, it returns a ``NOT_FOUND'' error.
"""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To describe an Ops Agents policy named ``ops-agents-test-policy'' in
the current project, run:
$ {command} ops-agents-test-policy
""",
}
@staticmethod
def Args(parser):
"""See base class."""
parser_utils.AddSharedArgs(parser)
def Run(self, args):
"""See base class."""
release_track = self.ReleaseTrack()
project = properties.VALUES.core.project.GetOrFail()
guest_policy_uri_path = osconfig_command_utils.GetGuestPolicyUriPath(
'projects', project, args.POLICY_ID)
client = osconfig_api_utils.GetClientInstance(
release_track, api_version_override='v1beta')
service = client.projects_guestPolicies
messages = osconfig_api_utils.GetClientMessages(
release_track, api_version_override='v1beta')
get_request = messages.OsconfigProjectsGuestPoliciesGetRequest(
name=guest_policy_uri_path)
try:
get_response = service.Get(get_request)
except apitools_exceptions.HttpNotFoundError:
raise ops_agents_exceptions.PolicyNotFoundError(
policy_id=args.POLICY_ID)
if not guest_policy_validator.IsOpsAgentPolicy(get_response):
raise ops_agents_exceptions.PolicyNotFoundError(
policy_id=args.POLICY_ID)
try:
ops_agents_policy = to_ops_agents.ConvertGuestPolicyToOpsAgentPolicy(
get_response)
except calliope_exceptions.BadArgumentException:
raise ops_agents_exceptions.PolicyMalformedError(
policy_id=args.POLICY_ID)
return ops_agents_policy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
2,
15069,
13130,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198... | 2.859406 | 1,515 |
# -*- coding: utf-8 -*-
'''
Module for gathering and managing network information
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import hashlib
import datetime
import socket
import salt.utils.network
import salt.utils.validate.net
try:
import salt.utils.winapi
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# Import 3rd party libraries
try:
import wmi # pylint: disable=W0611
except ImportError:
HAS_DEPENDENCIES = False
# Define the module's virtual name
__virtualname__ = 'network'
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and HAS_DEPENDENCIES is True:
return __virtualname__
return False
def ping(host):
'''
Performs a ping to a host
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
'''
cmd = ['ping', '-n', '4', salt.utils.network.sanitize_host(host)]
return __salt__['cmd.run'](cmd, python_shell=False)
def netstat():
'''
Return information on open ports and states
CLI Example:
.. code-block:: bash
salt '*' network.netstat
'''
ret = []
cmd = ['netstat', '-nao']
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if line.startswith(' TCP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': comps[3],
'program': comps[4]})
if line.startswith(' UDP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': None,
'program': comps[3]})
return ret
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = ['tracert', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if ' ' not in line:
continue
if line.startswith('Trac'):
continue
if line.startswith('over'):
continue
comps = line.split()
complength = len(comps)
# This method still needs to better catch rows of other lengths
# For example if some of the ms returns are '*'
if complength == 9:
result = {
'count': comps[0],
'hostname': comps[7],
'ip': comps[8],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
elif complength == 8:
result = {
'count': comps[0],
'hostname': None,
'ip': comps[7],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
else:
result = {
'count': comps[0],
'hostname': None,
'ip': None,
'ms1': None,
'ms2': None,
'ms3': None}
ret.append(result)
return ret
def nslookup(host):
'''
Query DNS for information about a domain or ip address
CLI Example:
.. code-block:: bash
salt '*' network.nslookup archlinux.org
'''
ret = []
addresses = []
cmd = ['nslookup', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if addresses:
# We're in the last block listing addresses
addresses.append(line.strip())
continue
if line.startswith('Non-authoritative'):
continue
if 'Addresses' in line:
comps = line.split(":", 1)
addresses.append(comps[1].strip())
continue
if ":" in line:
comps = line.split(":", 1)
ret.append({comps[0].strip(): comps[1].strip()})
if addresses:
ret.append({'Addresses': addresses})
return ret
def dig(host):
'''
Performs a DNS lookup with dig
Note: dig must be installed on the Windows minion
CLI Example:
.. code-block:: bash
salt '*' network.dig archlinux.org
'''
cmd = ['dig', salt.utils.network.sanitize_host(host)]
return __salt__['cmd.run'](cmd, python_shell=False)
def interfaces_names():
'''
Return a list of all the interfaces names
CLI Example:
.. code-block:: bash
salt '*' network.interfaces_names
'''
ret = []
with salt.utils.winapi.Com():
c = wmi.WMI()
for iface in c.Win32_NetworkAdapter(NetEnabled=True):
ret.append(iface.NetConnectionID)
return ret
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
CLI Example:
.. code-block:: bash
salt '*' network.interfaces
'''
return salt.utils.network.win_interfaces()
def hw_addr(iface):
'''
Return the hardware address (a.k.a. MAC address) for a given interface
CLI Example:
.. code-block:: bash
salt '*' network.hw_addr 'Wireless Connection #1'
'''
return salt.utils.network.hw_addr(iface)
# Alias hwaddr to preserve backward compat
hwaddr = hw_addr
def subnets():
'''
Returns a list of subnets to which the host belongs
CLI Example:
.. code-block:: bash
salt '*' network.subnets
'''
return salt.utils.network.subnets()
def in_subnet(cidr):
'''
Returns True if host is within specified subnet, otherwise False
CLI Example:
.. code-block:: bash
salt '*' network.in_subnet 10.0.0.0/16
'''
return salt.utils.network.in_subnet(cidr)
def ip_addrs(interface=None, include_loopback=False):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
CLI Example:
.. code-block:: bash
salt '*' network.ip_addrs
'''
return salt.utils.network.ip_addrs(interface=interface,
include_loopback=include_loopback)
ipaddrs = ip_addrs
def ip_addrs6(interface=None, include_loopback=False):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
CLI Example:
.. code-block:: bash
salt '*' network.ip_addrs6
'''
return salt.utils.network.ip_addrs6(interface=interface,
include_loopback=include_loopback)
ipaddrs6 = ip_addrs6
def connect(host, port=None, **kwargs):
'''
Test connectivity to a host using a particular
port from the minion.
.. versionadded:: Boron
CLI Example:
.. code-block:: bash
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
'''
ret = {'result': None,
'comment': ''}
if not host:
ret['result'] = False
ret['comment'] = 'Required argument, host, is missing.'
return ret
if not port:
ret['result'] = False
ret['comment'] = 'Required argument, port, is missing.'
return ret
proto = kwargs.get('proto', 'tcp')
timeout = kwargs.get('timeout', 5)
family = kwargs.get('family', None)
if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):
address = host
else:
address = '{0}'.format(salt.utils.network.sanitize_host(host))
try:
if proto == 'udp':
__proto = socket.SOL_UDP
else:
__proto = socket.SOL_TCP
proto = 'tcp'
if family:
if family == 'ipv4':
__family = socket.AF_INET
elif family == 'ipv6':
__family = socket.AF_INET6
else:
__family = 0
else:
__family = 0
(family,
socktype,
_proto,
garbage,
_address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]
skt = socket.socket(family, socktype, _proto)
skt.settimeout(timeout)
if proto == 'udp':
# Generate a random string of a
# decent size to test UDP connection
md5h = hashlib.md5()
md5h.update(datetime.datetime.now().strftime('%s'))
msg = md5h.hexdigest()
skt.sendto(msg, _address)
recv, svr = skt.recvfrom(255)
skt.close()
else:
skt.connect(_address)
skt.shutdown(2)
except Exception as exc:
ret['result'] = False
ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
ret['result'] = True
ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
26796,
329,
11228,
290,
11149,
3127,
1321,
198,
7061,
6,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
2,
17267,
8268,
9195,
82,
198,
11... | 2.164393 | 4,471 |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == '__main__':
main() | [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
... | 2.785714 | 42 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 16:23:55 2020
@author: dmattox
"""
import os
import dill
import lec_gly as LecGly
from bSiteResiFeatures import plipFile
os.chdir(LecGly.homeDir)
##########################
outDir = './data/structures/bsites/batchLists/'
if not os.path.exists(outDir):
os.makedirs(outDir)
##########################
maxBatchLstLength = 50 # generates 28 lists from 1365 PDB IDs, 27 lists of 50 and 1 of 15
##########################
with open(plipFile, "rb") as pickleFH:
allPLIP = dill.load(pickleFH)
def chunks(lst, n): # Function borrowed from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from a list"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
splitLst = chunks(list(allPLIP.keys()), maxBatchLstLength)
for i,lst in enumerate(splitLst):
with open(outDir + 'pdbList_' + str(i) + '.txt', 'w') as outFH:
for pdb in lst:
outFH.write(pdb + '\n')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
2447,
220,
352,
1467,
25,
1954,
25,
2816,
12131,
198,
198,
31,
9800,
25,
288,
76,
... | 2.395973 | 447 |
"""
Title: Density estimation using Real NVP
Authors: [Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), [Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/)
Date created: 2020/08/10
Last modified: 2020/08/10
Description: Estimating the density distribution of the "double moon" dataset.
"""
"""
## Introduction
The aim of this work is to map a simple distribution - which is easy to sample
and whose density is simple to estimate - to a more complex one learned from the data.
This kind of generative model is also known as "normalizing flow".
In order to do this, the model is trained via the maximum
likelihood principle, using the "change of variable" formula.
We will use an affine coupling function. We create it such that its inverse, as well as
the determinant of the Jacobian, are easy to obtain (more details in the referenced paper).
**Requirements:**
* Tensorflow 2.3
* Tensorflow probability 0.11.0
**Reference:**
[Density estimation using Real NVP](https://arxiv.org/pdf/1605.08803.pdf)
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
"""
## Load the data
"""
data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.experimental.preprocessing.Normalization()
norm.adapt(data)
normalized_data = norm(data)
"""
## Affine coupling layer
"""
# Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
"""
## Real NVP
"""
"""
## Model training
"""
model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)
"""
## Performance evaluation
"""
plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])
| [
37811,
198,
19160,
25,
360,
6377,
31850,
1262,
6416,
399,
8859,
198,
30515,
669,
25,
685,
49846,
43232,
402,
1504,
27769,
14200,
16151,
5450,
1378,
2503,
13,
25614,
259,
13,
785,
14,
259,
14,
70,
1504,
27769,
12,
3876,
544,
12,
22249,... | 2.755319 | 1,128 |
# Copyright 2018 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Co-developed by Tier IV, Inc. and Apex.AI, Inc.
import os
import socket
| [
2,
15069,
2864,
262,
5231,
322,
533,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 3.72067 | 179 |
#!/usr/bin/env python
# encoding: utf-8
'''
#-------------------------------------------------------------------
# CONFIDENTIAL --- CUSTOM STUDIOS
#-------------------------------------------------------------------
#
# @Project Name : 下载《笔趣看》网小说
#
# @File Name : biqukan.py
#
# @Programmer : autofelix
#
# @Start Date : 2022/01/10 13:14
#
# @Last Update : 2022/01/10 13:14
#
#-------------------------------------------------------------------
'''
from urllib import request
from bs4 import BeautifulSoup
import collections, re, os, sys
class biqukan:
'''
This is a main Class, the file contains all documents.
One document contains paragraphs that have several sentences
It loads the original file and converts the original file to new content
Then the new content will be saved by this class
'''
def hello(self):
'''
This is a welcome speech
:return: self
'''
print('*' * 50)
print(' ' * 15 + '下载《笔趣看》网小说')
print(' ' * 5 + '作者: autofelix Date: 2022-01-10 13:14')
print(' ' * 5 + '主页: https://autofelix.blog.csdn.net')
print('*' * 50)
return self
def get_download_url(self, target_url):
'''
get download url
'''
charter = re.compile(u'[第弟](.+)章', re.IGNORECASE)
target_req = request.Request(url = target_url, headers = self.header)
target_response = request.urlopen(target_req)
target_html = target_response.read().decode('gbk','ignore')
list_main_soup = BeautifulSoup(target_html,'lxml')
chapters = list_main_soup.find_all('div',class_ = 'listmain')
download_soup = BeautifulSoup(str(chapters), 'lxml')
novel_name = str(download_soup.dl.dt).split("》")[0][5:]
flag_name = "《" + novel_name + "》" + "正文卷"
numbers = (len(download_soup.dl.contents) - 1) / 2 - 8
download_dict = collections.OrderedDict()
begin_flag = False
numbers = 1
for child in download_soup.dl.children:
if child != '\n':
if child.string == u"%s" % flag_name:
begin_flag = True
if begin_flag == True and child.a != None:
download_url = "https://www.biqukan.com" + child.a.get('href')
download_name = child.string
names = str(download_name).split('章')
name = charter.findall(names[0] + '章')
if name:
download_dict['第' + str(numbers) + '章 ' + names[1]] = download_url
numbers += 1
return novel_name + '.txt', numbers, download_dict
def downloader(self, url):
'''
download the text
'''
download_req = request.Request(url = url, headers = self.header)
download_response = request.urlopen(download_req)
download_html = download_response.read().decode('gbk','ignore')
soup_texts = BeautifulSoup(download_html, 'lxml')
texts = soup_texts.find_all(id = 'content', class_ = 'showtxt')
soup_text = BeautifulSoup(str(texts), 'lxml').div.text.replace('\xa0','')
return soup_text
def writer(self, name, path, text):
'''
write to file
'''
write_flag = True
with open(path, 'a', encoding='utf-8') as f:
f.write(name + '\n\n')
for each in text:
if each == 'h':
write_flag = False
if write_flag == True and each != ' ':
f.write(each)
if write_flag == True and each == '\r':
f.write('\n')
f.write('\n\n')
def run(self):
'''
program entry
'''
target_url = str(input("请输入小说目录下载地址:\n"))
# 实例化下载类
d = self.downloader(target_url)
name, numbers, url_dict = d.get_download_url(target_url)
if name in os.listdir():
os.remove(name)
index = 1
# 下载中
print("《%s》下载中:" % name[:-4])
for key, value in url_dict.items():
d.Writer(key, name, d.Downloader(value))
sys.stdout.write("已下载:%.3f%%" % float(index / numbers) + '\r')
sys.stdout.flush()
index += 1
print("《%s》下载完成!" % name[:-4])
if __name__ == '__main__':
biqukan().hello().run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
7061,
6,
198,
2,
10097,
6329,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
7102,
37,
2... | 2.242583 | 1,719 |
# Turtle Objects
from turtle import Screen, Turtle
# Game Objects
from defender import Defender
from invaders import Invaders
from scoreboard import ScoreBoard
# Utilities
from time import sleep
from PIL import Image, ImageTk
# Initialize Screen
main_screen = Screen()
main_screen.title("Space Invaders")
main_screen.setup(width=800, height=550, startx= 250, starty=10)
# Setup Background
background_canvas = main_screen.getcanvas()
## Resize Image
image = Image.open("./images/space-bg.gif")
background_image = image.resize((800,550), Image.ANTIALIAS)
background_image = ImageTk.PhotoImage(background_image)
## Put image into background
background_canvas.create_image(0,0, image=background_image)
# Register shapes -> you need to resize the image first using PIL
main_screen.register_shape('./images/defender.gif')
main_screen.register_shape('./images/invader.gif')
# Add event listener
main_screen.tracer(0)
main_screen.listen()
# Menu indicator
is_game = False
# Menu drawing
menu = Turtle()
menu.hideturtle()
menu.penup()
menu.color('white')
draw_menu()
main_screen.onkey(key='s', fun=space_invaders)
main_screen.onkey(key='q', fun=main_screen.bye)
main_screen.mainloop() | [
2,
33137,
35832,
198,
6738,
28699,
1330,
15216,
11,
33137,
198,
2,
3776,
35832,
198,
6738,
13191,
1330,
25533,
198,
6738,
39417,
1330,
41671,
198,
6738,
50198,
1330,
15178,
29828,
198,
2,
41086,
198,
6738,
640,
1330,
3993,
198,
6738,
35... | 3.191375 | 371 |
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from torchvision import datasets
from torch.utils.data import DataLoader
from PIL import Image
mtcnn = MTCNN(image_size=240, margin=0, min_face_size=20) # initializing mtcnn for face detection
resnet = InceptionResnetV1(pretrained='vggface2').eval() # initializing resnet for face img to embeding conversion
dataset=datasets.ImageFolder('train') # photos folder path
idx_to_class = {i:c for c,i in dataset.class_to_idx.items()} # accessing names of peoples from folder names
loader = DataLoader(dataset, collate_fn=collate_fn)
face_list = [] # list of cropped faces from photos folder
name_list = [] # list of names corrospoing to cropped photos
embedding_list = [] # list of embeding matrix after conversion from cropped faces to embedding matrix using resnet
for img, idx in loader:
face, prob = mtcnn(img, return_prob=True)
if face is not None and prob>0.90: # if face detected and porbability > 90%
emb = resnet(face.unsqueeze(0)) # passing cropped face into resnet model to get embedding matrix
embedding_list.append(emb.detach()) # resulten embedding matrix is stored in a list
name_list.append(idx_to_class[idx]) # names are stored in a list
data = [embedding_list, name_list]
torch.save(data, 'dataface2.pt') # saving data.pt file | [
6738,
1777,
268,
316,
62,
9078,
13165,
354,
1330,
337,
4825,
6144,
11,
554,
4516,
4965,
3262,
53,
16,
201,
198,
11748,
28034,
201,
198,
6738,
28034,
10178,
1330,
40522,
201,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
20... | 2.921444 | 471 |
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""Tests for rss2kml.py."""
__author__ = 'arb@google.com (Anthony Baxter)'
import xml.etree.ElementTree as ElementTree
# Allow relative imports within the app. # pylint: disable=W0403
import mox
import rss2kml
import test_utils
from google.appengine.api import memcache
from google.appengine.api import urlfetch
class Rss2KmlTest(test_utils.BaseTest):
"""Tests for rss2kml.py."""
if __name__ == '__main__':
test_utils.main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
2321,
3012,
3457,
13,
220,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
198,
2,
779... | 3.31746 | 315 |
# run these tests with python -m pytest from src/ dir
| [
2,
1057,
777,
5254,
351,
21015,
532,
76,
12972,
9288,
422,
12351,
14,
26672,
198
] | 3.6 | 15 |
import math
angulo = float(input('Digite um angulo: '))
sen = math.sin(math.radians(angulo))
cos = math.cos(math.radians(angulo))
tan = math.tan(math.radians(angulo))
print('O seno é {:.2f}\nO Cosseno é {:.2f}\ne a tangente é {:.2f}'.format(sen, cos, tan))
| [
11748,
10688,
198,
648,
43348,
796,
12178,
7,
15414,
10786,
19511,
578,
23781,
3550,
43348,
25,
705,
4008,
198,
6248,
796,
10688,
13,
31369,
7,
11018,
13,
6335,
1547,
7,
648,
43348,
4008,
198,
6966,
796,
10688,
13,
6966,
7,
11018,
13,... | 2.424528 | 106 |
#import debugtest
#import imp
#imp.reload(debugtest)
#Testing script for various writing setups
#import redirect_print
import unreal_engine as ue
import time
import sys
import upythread as ut
from threading import Thread
#imp.reload(redirect_print)
#the test function
#test simple fire and forget
#Test with callback
#progress callback example functions
#test basic progress bar | [
2,
11748,
14257,
9288,
198,
2,
11748,
848,
198,
2,
11011,
13,
260,
2220,
7,
24442,
9288,
8,
198,
198,
2,
44154,
4226,
329,
2972,
3597,
44266,
198,
198,
2,
11748,
18941,
62,
4798,
198,
11748,
22865,
62,
18392,
355,
334,
68,
198,
11... | 3.583333 | 108 |
"""
Test suite for 'find.py' (Fall 2015 version)
"""
from find15F import matches
| [
37811,
198,
14402,
18389,
329,
705,
19796,
13,
9078,
6,
357,
24750,
1853,
2196,
8,
198,
37811,
198,
198,
6738,
1064,
1314,
37,
1330,
7466,
628,
220,
220,
220,
220,
628,
220,
220,
220,
220,
628,
198
] | 2.594595 | 37 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clustering_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
# A simple test that can be verified by hand.
@test_util.run_all_in_graph_and_eager_modes
# A test with large inputs.
if __name__ == "__main__":
np.random.seed(0)
test.main()
| [
2,
15069,
1584,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
198,
2,
779,
428,
2393,
2845,
287,
11846,
... | 3.331183 | 465 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
max_length: Optional[int] = field(
default=512, metadata={"help": "The maximum input sequence length"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: Optional[str] = field(
default=None, metadata={"help": "The input directory for training data."}
)
big_file_path: Optional[str] = field(
default=None, metadata={"help": "Big file with all cells in it"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
eval_frac: float = field(
default= 0.1,
metadata={
"help": "Fraction of dataset reserved for evaluation"
},
)
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
32233,
198,
198,
31,
19608,
330,
31172,
198,
4871,
9104,
28100,
2886,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
20559,
2886,
27113,
284,
543,
2... | 2.768908 | 952 |
"""
Planarity Calculators
=====================
#. :class:`.PlanarityCalculator`
Methods to calculate planarity measures of a molecule.
"""
import logging
import numpy as np
from ..calculators import Calculator
from ..results import PlanarityResults
logger = logging.getLogger(__name__)
class PlanarityCalculator(Calculator):
"""
Calculates measures of planarity of a molecule.
Measures based on plane deviation from Angew. paper [1]_ and a
ChemRxiv paper [2]_.
Plane deviation: sum of the shortest distance to the plane of best
fit of all deviation atoms (sum abs(d_i)).
Plane deviation span: d_max - d_min (SDP in [2]_)
Planarity parameter: defined as
sqrt((1/num_atoms) * (sum d_i ** 2)) (MPP in [2]_)
Examples
--------
.. code-block:: python
import stk
import stko
# Create a molecule whose torsions we want to know.
mol1 = stk.BuildingBlock('c1ccccc1')
# Create the calculator.
pc = stko.PlanarityCalculator()
# Extract the measures.
pc_results = pc.get_results(mol1)
plane_deviation = pc_results.get_plane_deviation()
plane_deviation_span = pc_results.get_plane_deviation_span()
planarity_parameter = pc_results.get_planarity_parameter()
References
----------
.. [1] https://onlinelibrary.wiley.com/doi/10.1002/anie.202106721
.. [2] https://chemrxiv.org/engage/chemrxiv/article-details/
60c73cbf9abda2e0c5f8b5c6
"""
def _shortest_distance_to_plane(self, plane, point):
"""
Calculate the perpendicular distance from a point and a plane.
"""
top = (
plane[0]*point[0] + plane[1]*point[1] +
plane[2]*point[2] - plane[3]
)
bottom = np.sqrt(plane[0]**2 + plane[1]**2 + plane[2]**2)
distance = top / bottom
return distance
def calculate(
self,
mol,
plane_atom_ids=None,
deviation_atom_ids=None,
):
"""
Perform calculation on `mol`.
Parameters
----------
mol : :class:`.Molecule`
The :class:`.Molecule` whose planarity is to be calculated.
plane_atom_ids : iterable of :class:`int`, optional
The atom ids to use to define the plane of best fit.
deviation_atom_ids : iterable of :class:`int`, optional
The atom ids to use to calculate planarity.
Yields
------
:class:`function`
The function to perform the calculation.
"""
if plane_atom_ids is None:
plane_atom_ids = list(range(len(list(mol.get_atoms()))))
else:
plane_atom_ids = list(plane_atom_ids)
if deviation_atom_ids is None:
deviation_atom_ids = list(
range(len(list(mol.get_atoms())))
)
else:
deviation_atom_ids = list(deviation_atom_ids)
atom_plane = self._get_plane_of_best_fit(mol, plane_atom_ids)
deviations = self._calculate_deviations(
mol=mol,
atom_plane=atom_plane,
deviation_atom_ids=deviation_atom_ids,
)
yield {
'plane_deviation': (
self._plane_deviation(deviations)
),
'plane_deviation_span': (
self._plane_deviation_span(deviations)
),
'planarity_parameter': (
self._planarity_parameter(deviations)
),
}
def get_results(
self,
mol,
plane_atom_ids=None,
deviation_atom_ids=None,
):
"""
Calculate the planarity of `mol`.
Parameters
----------
mol : :class:`.Molecule`
The :class:`.Molecule` whose planarity is to be calculated.
plane_atom_ids : iterable of :class:`int`, optional
The atom ids to use to define the plane of best fit.
deviation_atom_ids : iterable of :class:`int`, optional
The atom ids to use to calculate planarity.
Returns
-------
:class:`.PlanarityResults`
The planarity measures of the molecule.
"""
return PlanarityResults(self.calculate(
mol=mol,
plane_atom_ids=plane_atom_ids,
deviation_atom_ids=deviation_atom_ids,
))
| [
37811,
198,
20854,
6806,
27131,
2024,
198,
4770,
1421,
28,
198,
198,
2,
13,
1058,
4871,
25,
44646,
20854,
6806,
9771,
3129,
1352,
63,
198,
198,
46202,
284,
15284,
1410,
6806,
5260,
286,
257,
27756,
13,
198,
198,
37811,
198,
198,
11748... | 2.182673 | 2,020 |
'''
Created on 2012-01-19
@author: innovation
'''
import unittest
import numpy as np
from tests.simualtors.joint_binomial import JointSnvMixSimulator
from joint_snv_mix.counter import JointBinaryCountData, JointBinaryQualityData
from joint_snv_mix.models.joint_snv_mix import JointSnvMixModel, JointSnvMixPriors, JointSnvMixParameters
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
7061,
6,
198,
41972,
319,
2321,
12,
486,
12,
1129,
198,
198,
31,
9800,
25,
11044,
198,
7061,
6,
198,
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
5254,
13,
14323,
723,
83,
669,
13,
73,
1563,
62,
8800,
... | 2.794872 | 156 |
import os
DEBUG=True
TEMPLATE_PATH = '%s/templates/' % os.path.dirname(os.path.realpath(__file__))
STATIC_BUCKET = 'nytint-stg-newsapps'
REMOTE_STORAGE_PATH = 'apps/screenshotter' | [
11748,
28686,
198,
198,
30531,
28,
17821,
198,
51,
3620,
6489,
6158,
62,
34219,
796,
705,
4,
82,
14,
11498,
17041,
14,
6,
4064,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
198,
198,... | 2.291139 | 79 |
from typing import List, Tuple, Optional
from .symbols import *
from .errors import NLPError
class AnnotationError(NLPError):
'''Any error related to Annotation and subclasses'''
class TextData(object):
'''Container for text data.
All Annotations reference it and use it as a source of text data.
'''
# TODO: Why do I need this class? why cant it be Text with additional fields
# hide this class inside Text and delegate some methods?
@classmethod
class Annotation(object):
'''
Annotation is a base class for all other types more specific types of
annotations. Ideally, one does not need to create an object of type
Annotation directly.
An annotation does not contain text data directly but by proxy of
a TextData object.
'''
def __len__(self):
'''Length in bytes (not in characters)'''
if self.end:
return self.end - self.start
return 0
def is_blank(self):
'''Test if the annotation is blank or not. A blank annotation has no
text or consists of whitespace characters only.'''
return len(self) == 0 or self.text.isspace()
def __lt__(self, other):
'''By offsets, from left to right'''
# TODO: allow comparing different types of Annotations?
# does it make sense?
assert type(self) is type(other), \
("Can compare annotations of the same type only but got:"
" {} and {}".format(type(self), type(other)))
res = self.end <= other.start
# print("Comparing: {} vs {} = {}".format(self.end, other.start, res))
return res
def __contains__(self, other):
'''Test of other annotation is within boundaries of the current annotation'''
return self.start <= other.start and self.end >= other.end
@property
# TODO: what should happen when source gets set?
@source.setter
@property
@property
@property
def reoffset(self, val: int):
'''Shift annotation position by given number of characters.'''
self.start += val
self.end += val
def annotations(self, target=None):
'''List *all* annotations or annotations of given type <target> that
occur within the boundaries of the current annotation.
Target can be a tuple of type names: (Token, WSWord)'''
if target:
anns = [ann for ann in self.source.annotations
if isinstance(ann, target)]
else:
anns = self.source.annotations
anns = [ann for ann in anns if ann in self]
return anns
def paragraphs(self):
'''Return a list of annotations of type Paragraph'''
ann_type = Paragraph
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def sentences(self):
'''Return a list of annotations of type Sentence'''
ann_type = Sentence
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def lines(self):
'''Return a list of annotations of type Line'''
ann_type = Line
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def wswords(self):
'''Return a list of annotations of type WSWord'''
ann_type = WSWord
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def tokens(self):
'''Return a list of annotations of type Token'''
ann_type = Token
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def next(self, scope: 'Annotation' = None):
'''Get the next adjacent annotation of the same type.
If <scope> is given (and is another annotation), also ensure that
the annotation found is contained in the annotation <scope>.
This is intended to be used for finding another annotation within
the same super annotation, for example, finding next word that
belongs to the same sentence.
Example:
>> word.next(word.line())
'''
scope = scope or self.root
anns = sorted(scope.annotations(target=self.__class__))
idx = anns.index(self)
ann = None
if idx is not None and 1+idx < len(anns):
ann = anns[1+idx]
return ann
def line(self) -> Optional['Line']:
'''Find annotation of type Line that contains current annotation'''
anns = [line for line in self.root.lines() if self in line]
assert len(anns) < 2, \
("ERROR: an annotation can be contained in one Line annotation"
" only but found {} Lines".format(len(anns)))
return anns[0] if anns else None
def _must_be_annotated(self, anntype):
'''Check if current object has been annotated for specific phenomena
(e.g. Paragraphs, Sentences, Tokens, etc) and thrown an error if not.
'''
anntypes = self.source.annotation_types
if anntype not in anntypes:
raise AnnotationError(
f"Annotations of type {anntype} not available in the current"
" object. Was appropriate annotator applied to the text?"
)
def tokenized(self):
'''Return the current annotation as a single tokenized line.'''
return " ".join(map(str, self.tokens()))
class Text(Annotation):
'''Annotation that represents the whole text'''
@classmethod
def load_from_file(cls, filepath: str, **kwargs):
'''Load plain text from given file <filepath> and annotate it for
- lines (annotation.Line)
- words (annotation.WSWord, separated by white spaces)
Return:
annotation.Text
'''
textdata = TextData.load_from_file(filepath)
# print("--- Text annotation ---")
text = cls()
text.source = textdata
text.language = kwargs.get('lang', 'deu')
# print(text.offsets)
# print(repr(text))
textdata.root = text
# print("--- Annotating Lines ---")
LineAnnotator().annotate(text)
# for ann in text.lines():
# print(repr(ann))
# print("--- Annotating WSWords ---")
WSWordAnnotator().annotate(text)
# for ann in text.wswords():
# print(repr(ann))
return text
@property
@source.setter
def source(self, obj: TextData):
'''Set source and create annotation of class Text'''
self._source = obj
self.start = 0
self.end = len(self.source.content)
def tokenized(self):
'''Serialize the current object to a string. The text is fully
tokenized, that is:
- one sentence per line;
- paragraphs are separated by an empty line;
- words are separated from punctuation marks
'''
lines = []
for par in self.paragraphs():
for sent in par.sentences():
words = sent.tokens()
# words = sent.wswords()
line = " ".join(map(str, words))
lines.append(line)
lines.append('') # paragraph separator
if lines:
lines.pop()
return "\n".join(lines)
class Line(Annotation):
'''Annotation that holds one line of a Text'''
pass
class Sentence(Annotation):
'''Annotation that holds a sentence.'''
pass
class WSWord(Annotation):
'''A substring of text between two space characters.
It can be a word, a punctuation mark or combination of them.
Not to be confused with a Token.
WSWord stands for WhiteSpace Word.
'''
pass
class Token(Annotation):
'''A Token is either a lexeme (stripped off punctuation marks) or
a punctuation mark.'''
pass
# These are necessary in Text.load_from_file() and are imported here at
# the end of the file to fix circular dependencies.
from .line_annotator import LineAnnotator
from .wsword_annotator import WSWordAnnotator
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
32233,
198,
6738,
764,
1837,
2022,
10220,
1330,
1635,
198,
6738,
764,
48277,
1330,
399,
19930,
12331,
628,
198,
4871,
1052,
38983,
12331,
7,
45,
19930,
12331,
2599,
198,
220,
220,
220,
705,
... | 2.515313 | 3,200 |
# Import the required libraries
import cv2
import numpy as np
import Projects.VirtualCanvas.utils as utils
cv2.destroyAllWindows()
if __name__ == "__main__":
# mode = int(input("Debug mode -- 1 or Normal Run ---0: "))
drawOnCanvas(debug_mode=True)
"""
LOG:
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 103 45 0 120 255 255
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 0 80 148 255 255 255
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 0 89 178 255 238 255
for Dark blue, Orange, Yellow Sparx pens respectively..
"""
"""
Improvements:
Add the facility of saving the HSV values -- either via numpy or pkl
"""
"""
Backup code of def detect_and_draw_as_marker(self):
"""
"""
This function is made for testing purposes.
The part of this function's code is used in some other functions with some optimizations
:return:
"""
"""
while True:
# Required variables
count = 0
# Get camera feed..
image = self.get_camera_feed()
# convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..
HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)
# loop through all marker's HSV values
for marker_HSV in self.markers_HSV:
lower_boundary = np.array(marker_HSV[0])
upper_boundary = np.array(marker_HSV[1])
# Get the mask image that satisfies the lower and upper HSV values..
maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)
'''Draw the contours for the mask image detected, marker point for the marker'''
# Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)
x, y, width, height = self.draw_contours(image, maskImg)
if self.debug_mode:
cv2.rectangle(img=image, pt1=(x, y), pt2=(x+width, y+height), color=(0, 0, 0), thickness=3)
# Select the marker point..
marker_point_center = (x+width//2, y)
# Draw the marker point..
# cv2.circle(img=image, center=marker_point_center, radius=5, color=(2, 255, 10), thickness=cv2.FILLED)
cv2.circle(img=image, center=marker_point_center, radius=5, color=self.marker_colors[count], thickness=cv2.FILLED)
count += 1
cv2.imshow("Virtual Canvas", image)
#print("Working....")
if cv2.waitKey(1) == 27:
break
"""
"""
0 26 255 255 255 255 # orange
101 35 0 255 255 255 # Blue
0 76 25 255 255 255 # yellow
"""
| [
2,
17267,
262,
2672,
12782,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
29898,
13,
37725,
6090,
11017,
13,
26791,
355,
3384,
4487,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
19... | 2.207363 | 1,331 |
#!/home/kevinml/anaconda3/bin/python3.7
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 12:41:01 2019
@author: juangabriel and kevin Meza
"""
# Upper Confidence Bound (UCB)
# =======================================================================================================
# PASOS
#
# NOTAS: Se considera que cada recompensa puede ser diferente en cada ronda.
# Entre MAYOR es la "n", MENOR es la amplitud del intervalo de confianza.
#
# 1.- En cada ronda "n" se consideran 2 numeros para cada anuncio "i":
# N(n) = Numero de veces que el anuncio "i" se selecciona en la ronda "n".
# R(n) = La suma de recompensas del anuncio "i" hasta la ronda "n".
# 2.- A partir de estos 2 numeros, se calcula:
# - La recompensa media del anuncio "i" hasta la ronda "n".
# r(n)= R(n)/ N(n)
#
# - El intervalo de confianza de la ronda "n".
# ( r(n)-Δ(n) , r(n)+Δ(n) ); Donde:
# Δ(n) = sqrt( 3*log(n) / 2*N(n) )
#
# 3.- Se selecciona el anuncio "i" con mayor limite superior del intervalo de confianza (UCB)
#
# En un inicio, se parte del supuesto de que las medias y los intervalos de confianza de cada una de las
# distribuciones son iguales y con al paso del tiempo al juntar observaciones, se va definiendo el valor
# medio de recompensa de cada una, al igua que los intervalos de confianza. Recordando que ntre mayor sea
# la "n", mrnor sera la amplitud del intervalo de confianza.
#
# Primero se comienza a tirar en todas las maquinas (muestreando asi todas las distribuciones) y despues
# de ciertas iteraciones, se comienza a tirar (muestrear) la maquina (la distribucion) con el mayor limite
# superior del intervalo de confianza (UCB), hasta que el algoritmo converja.
#
# =======================================================================================================
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
################################################
### IMPORTAR EL DATA SET ###
################################################
# El dataset tiene en las filas las ronas en las que se presentan los anuncios y en las columnas estan los 20 anuncios.
# los 1's y 0's representan si el usuario dio "click" en el anuncio.
dataset = pd.read_csv("Ads_CTR_Optimisation.csv")
################################################
# Implementacion del Algoritmo de UCB #
################################################
import math
N = 10000 # no. de observaciones
d = 10 # no. de anuncios
# aqui se guardara el numero de veces que se muestreo cada anuncio. Vectro inicializado con 0's de tamaño igual al no. de observaciones.
number_of_selections = [0] * d
sums_of_rewards = [0] * d # aqui se guardara la recompenza de cada anuncio
ads_selected = [] # vector con el numero de anuncio elegido en cda ronda
total_reward = 0 # recompenza total
for n in range(0, N):
max_upper_bound = 0 # Contiene el UCB de la ronda
ad = 0 # Contiene el numero del anuncio con el mayor intervalo de confianza
# En la ronda actual, para cada anuncio, se obtiene la "recompensa media" y el limite superior del intervalo de confianza
# y se actualiza el UCB si es necesario
for i in range(0, d):
if(number_of_selections[i] > 0):
# Se obtiene la recompensa media
average_reward = sums_of_rewards[i] / number_of_selections[i]
# Se obtiene Δn, para sacar el intervalo de confianza; sumamos 1 para no dividir entre cero
delta_i = math.sqrt(3 / 2 * math.log(n + 1) /
number_of_selections[i])
# Se obtiene el limite superior del intervalo de confianza
upper_bound = average_reward + delta_i
else:
# Para las primeras rondas cuando no se ha seleccionado el anuncio, se le asigna como como "upper confidence bound" el numero 10^400
# Asi ningun anuncio sera mejor que otro en la primera ronda.
# En la primera ronda se eligira el primer anuncio, en la siguiente ronda el segundo, despues el tercero y asi sucesivamente,
# esto con la intencion de que al menos todos sean muestreados 1 vez, por eso el numero "10^400".
upper_bound = 1e400
# Si el limite superior del intervalo de confianza del actual anuncio supera al UCB, este pasa a ser el nuevo UCB
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
ad = i
# se añade a la lista correspondiente el anuncio "elegido", es decir, con el UCB hasta esa ronda
ads_selected.append(ad)
# se le suma 1 al vector que contiene cuantas veces se ha elegido el anuncio
number_of_selections[ad] = number_of_selections[ad] + 1
# Se guarda la recompensa de seleccionar ese anuncio
reward = dataset.values[n, ad]
# A la recompenza previa del anuncio "elegido", se le suma la recompenza conseguida en esta ronda
sums_of_rewards[ad] = sums_of_rewards[ad] + reward
# Se suma la recompenza de esta ronda a la recompenza total
total_reward = total_reward + reward
# En cada ronda, siempre se va seleccionar el anuncio con el UCB
################################################
# VISUALIZACION DE RESULTADOS #
################################################
# Histograma de resultados
plt.hist(ads_selected)
plt.title("Histograma de anuncios")
plt.xlabel("ID del Anuncio")
plt.ylabel("Frecuencia de visualización del anuncio")
plt.show()
| [
2,
48443,
11195,
14,
365,
7114,
4029,
14,
272,
330,
13533,
18,
14,
8800,
14,
29412,
18,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
2758,
220,
767,
1105,
25,
3901,
25... | 2.615677 | 2,105 |
# -*- time-stamp-pattern: "changed[\s]+:[\s]+%%$"; -*-
# AUTHOR INFORMATION ##########################################################
# file : csv_data_loader.py
# author : Marcel Arpogaus <marcel dot arpogaus at gmail dot com>
#
# created : 2022-01-07 09:02:38 (Marcel Arpogaus)
# changed : 2022-01-07 09:02:38 (Marcel Arpogaus)
# DESCRIPTION #################################################################
# ...
# LICENSE #####################################################################
# Copyright 2022 Marcel Arpogaus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pandas as pd
| [
2,
532,
9,
12,
640,
12,
301,
696,
12,
33279,
25,
366,
40985,
58,
59,
82,
48688,
33250,
59,
82,
48688,
16626,
3,
8172,
532,
9,
12,
198,
2,
44746,
38044,
1303,
29113,
14468,
7804,
2,
198,
2,
2393,
220,
220,
220,
1058,
269,
21370,
... | 3.913621 | 301 |
from tensorflow.keras.layers import Dropout, Activation, Flatten
from tensorflow.keras.layers import Dense, Input, BatchNormalization
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.activations import relu, softmax
| [
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
14258,
448,
11,
13144,
341,
11,
1610,
41769,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
360,
1072,
11,
23412,
11,
347,
963,
26447,
1634,
198,
6738,
11... | 3.380435 | 92 |
import sys
import cv2
import numpy as np
import tensorflow as tf
| [
11748,
25064,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
628
] | 3.142857 | 21 |
from .clean_command import CleanCommand
| [
6738,
764,
27773,
62,
21812,
1330,
5985,
21575,
198
] | 4.444444 | 9 |
import pytest
from collections import OrderedDict
from optopus import (
Parser,
Result,
)
| [
11748,
12972,
9288,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
2172,
25790,
1330,
357,
198,
220,
220,
220,
23042,
263,
11,
198,
220,
220,
220,
25414,
11,
198,
8,
628
] | 2.941176 | 34 |